diff --git a/.azure-pipelines/bazel.yml b/.azure-pipelines/bazel.yml index d68973ab0ee45..eef2e51dabd46 100644 --- a/.azure-pipelines/bazel.yml +++ b/.azure-pipelines/bazel.yml @@ -25,10 +25,6 @@ steps: path: $(Build.StagingDirectory)/repository_cache continueOnError: true -- bash: .azure-pipelines/cleanup.sh - displayName: "Removing tools from agent" - condition: ${{ parameters.managedAgent }} - - bash: | echo "disk space at beginning of build:" df -h diff --git a/.azure-pipelines/cleanup.sh b/.azure-pipelines/cleanup.sh deleted file mode 100755 index 3714f24cac1e3..0000000000000 --- a/.azure-pipelines/cleanup.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -set -e - -# Temporary script to remove tools from Azure pipelines agent to create more disk space room. -sudo apt-get update -y || true -sudo apt-get purge -y --no-upgrade 'ghc-*' 'zulu-*-azure-jdk' 'libllvm*' 'mysql-*' 'dotnet-*' 'libgl1' \ - 'adoptopenjdk-*' 'azure-cli' 'google-chrome-stable' 'firefox' 'hhvm' - -dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -rn diff --git a/.azure-pipelines/pipelines.yml b/.azure-pipelines/pipelines.yml index e52b2f7f94061..e3ef1a164a850 100644 --- a/.azure-pipelines/pipelines.yml +++ b/.azure-pipelines/pipelines.yml @@ -287,8 +287,6 @@ stages: pool: vmImage: "ubuntu-18.04" steps: - - bash: .azure-pipelines/cleanup.sh - displayName: "Removing tools from agent" - bash: | echo "disk space at beginning of build:" df -h diff --git a/.bazelrc b/.bazelrc index 225246fbea40f..7d5c4ce1e86bf 100644 --- a/.bazelrc +++ b/.bazelrc @@ -10,6 +10,9 @@ # Startup options cannot be selected via config. startup --host_jvm_args=-Xmx2g +run --color=yes + +build --color=yes build --workspace_status_command="bash bazel/get_workspace_status" build --experimental_strict_action_env=true build --host_force_python=PY3 diff --git a/.github/actions/pr_notifier/pr_notifier.py b/.github/actions/pr_notifier/pr_notifier.py index dd9f9b33796a9..066f172c0cecf 100644 --- a/.github/actions/pr_notifier/pr_notifier.py +++ b/.github/actions/pr_notifier/pr_notifier.py @@ -13,6 +13,7 @@ MAINTAINERS = { 'alyssawilk': 'U78RP48V9', + 'dio': 'U79S2DFV1', 'mattklein123': 'U5CALEVSL', 'lizan': 'U79E51EQ6', 'snowp': 'U93KTPQP6', @@ -28,6 +29,13 @@ 'asraa': 'UKZKCFRTP', } +# Only notify API reviewers who aren't maintainers. +# Maintainers are already notified of pending PRs. +API_REVIEWERS = { + 'markdroth': 'UMN8K55A6', + 'adisuissa': 'UT17EMMTP', +} + def get_slo_hours(): # on Monday, allow for 24h + 48h @@ -44,6 +52,14 @@ def is_waiting(labels): return False +# Return true if the PR has an API tag, false otherwise. +def is_api(labels): + for label in labels: + if label.name == 'api': + return True + return False + + # Generate a pr message, bolding the time if it's out-SLO def pr_message(pr_age, pr_url, pr_title, delta_days, delta_hours): if pr_age < datetime.timedelta(hours=get_slo_hours()): @@ -54,19 +70,38 @@ def pr_message(pr_age, pr_url, pr_title, delta_days, delta_hours): pr_url, pr_title, delta_days, delta_hours) -# Adds reminder lines to the appropriate maintainer to review the assigned PRs -def add_reminders(assignees, maintainers_and_prs, message): - has_maintainer_assignee = False +# Adds reminder lines to the appropriate assignee to review the assigned PRs +# Returns true if one of the assignees is in the known_assignee_map, false otherwise. +def add_reminders(assignees, assignees_and_prs, message, known_assignee_map): + has_known_assignee = False for assignee_info in assignees: assignee = assignee_info.login - if assignee not in MAINTAINERS: + if assignee not in known_assignee_map: continue - has_maintainer_assignee = True - if assignee not in maintainers_and_prs.keys(): - maintainers_and_prs[ + has_known_assignee = True + if assignee not in assignees_and_prs.keys(): + assignees_and_prs[ assignee] = "Hello, %s, here are your PR reminders for the day \n" % assignee - maintainers_and_prs[assignee] = maintainers_and_prs[assignee] + message - return has_maintainer_assignee + assignees_and_prs[assignee] = assignees_and_prs[assignee] + message + return has_known_assignee + + +# Returns true if the PR needs an LGTM from an API shephard. +def needs_api_review(labels, repo, pr_info): + # API reviews should always have the label, so don't bother doing an RPC if + # it's not tagged (this helps avoid github rate limiting) + if not (is_api(labels)): + return False + # repokitten tags each commit as pending unless there has been an API LGTM + # since the latest API changes. If this PR is tagged pendding it needs an + # API review, otherwise it's set. + headers, data = repo._requester.requestJsonAndCheck( + "GET", + ("https://api.github.com/repos/envoyproxy/envoy/statuses/" + pr_info.head.sha), + ) + if (data and data[0]["state"] == 'pending'): + return True + return False def track_prs(): @@ -79,13 +114,19 @@ def track_prs(): maintainers_and_prs = {} # A placeholder for unassigned PRs, to be sent to #maintainers eventually maintainers_and_prs['unassigned'] = "" + # A dict of shephard : outstanding_pr_string to be sent to slack + api_review_and_prs = {} # Out-SLO PRs to be sent to #envoy-maintainer-oncall stalled_prs = "" # Snag all PRs, including drafts for pr_info in repo.get_pulls("open", "updated", "desc"): + labels = pr_info.labels + assignees = pr_info.assignees # If the PR is waiting, continue. - if is_waiting(pr_info.labels): + if is_waiting(labels): + continue + if pr_info.draft: continue # Update the time based on the time zone delta from github's @@ -98,37 +139,40 @@ def track_prs(): # SLO, nudge in bold if not. message = pr_message(delta, pr_info.html_url, pr_info.title, delta_days, delta_hours) + if (needs_api_review(labels, repo, pr_info)): + add_reminders(pr_info.assignees, api_review_and_prs, message, API_REVIEWERS) + # If the PR has been out-SLO for over a day, inform on-call if delta > datetime.timedelta(hours=get_slo_hours() + 36): stalled_prs = stalled_prs + message # Add a reminder to each maintainer-assigner on the PR. - has_maintainer_assignee = add_reminders(pr_info.assignees, maintainers_and_prs, message) + has_maintainer_assignee = add_reminders( + pr_info.assignees, maintainers_and_prs, message, MAINTAINERS) # If there was no maintainer, track it as unassigned. if not has_maintainer_assignee: - # don't bother assigning maintainer WIPs. - if pr_info.draft and pr_info.user.login in maintainers_and_prs.keys(): - continue maintainers_and_prs['unassigned'] = maintainers_and_prs['unassigned'] + message - # Return the dict of {maintainers : PR notifications}, and stalled PRs - return maintainers_and_prs, stalled_prs + # Return the dict of {maintainers : PR notifications}, + # the dict of {api-shephards-who-are-not-maintainers: PR notifications}, + # and stalled PRs + return maintainers_and_prs, api_review_and_prs, stalled_prs -def post_to_maintainers(client, maintainers_and_messages): - # Post updates to individual maintainers - for key in maintainers_and_messages: - message = maintainers_and_messages[key] +def post_to_assignee(client, assignees_and_messages, assignees_map): + # Post updates to individual assignees + for key in assignees_and_messages: + message = assignees_and_messages[key] - # Only send messages if we have the maintainer UID - if key not in MAINTAINERS: + # Only send messages if we have the slack UID + if key not in assignees_map: continue - uid = MAINTAINERS[key] + uid = assignees_map[key] # Ship messages off to slack. try: - print(maintainers_and_messages[key]) + print(assignees_and_messages[key]) response = client.conversations_open(users=uid, text="hello") channel_id = response["channel"]["id"] response = client.chat_postMessage(channel=channel_id, text=message) @@ -151,6 +195,8 @@ def post_to_oncall(client, unassigned_prs, out_slo_prs): if __name__ == '__main__': + maintainers_and_messages, shephards_and_messages, stalled_prs = track_prs() + SLACK_BOT_TOKEN = os.getenv('SLACK_BOT_TOKEN') if not SLACK_BOT_TOKEN: print( @@ -158,8 +204,7 @@ def post_to_oncall(client, unassigned_prs, out_slo_prs): ) sys.exit(1) - maintainers_and_messages, stalled_prs = track_prs() - client = WebClient(token=SLACK_BOT_TOKEN) - post_to_maintainers(client, maintainers_and_messages) post_to_oncall(client, maintainers_and_messages['unassigned'], stalled_prs) + post_to_assignee(client, shephards_and_messages, API_REVIEWERS) + post_to_assignee(client, maintainers_and_messages, MAINTAINERS) diff --git a/.github/actions/pr_notifier/requirements.txt b/.github/actions/pr_notifier/requirements.txt index 253c687e5fe77..e9d1ec1845062 100644 --- a/.github/actions/pr_notifier/requirements.txt +++ b/.github/actions/pr_notifier/requirements.txt @@ -111,9 +111,9 @@ six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 # via pynacl -slack-sdk==3.6.0 \ - --hash=sha256:195f044e02a2844579a7a26818ce323e85dde8de224730c859644918d793399e \ - --hash=sha256:e1b257923a1ef88b8620dd3abff94dc5b3eee16ef37975d101ba9e60123ac3af +slack-sdk==3.7.0 \ + --hash=sha256:50b9fd6d8f83af7e8ad6d8e76882d04931842241f85ccfd30da09b4a7b9b1516 \ + --hash=sha256:f0bf3e38ac393eba7fe1a99191b0e72f710860c6d2edc1271606fcfc08bea2e1 # via -r .github/actions/pr_notifier/requirements.txt urllib3==1.26.5 \ --hash=sha256:753a0374df26658f99d826cfe40394a686d05985786d946fbe4165b5148f5a7c \ diff --git a/.github/dependabot.yml b/.github/dependabot.yml index d79fcd80045d0..ee4c89391face 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -21,11 +21,6 @@ updates: schedule: interval: "daily" -- package-ecosystem: "pip" - directory: "/docs" - schedule: - interval: "daily" - - package-ecosystem: "pip" directory: "/tools/docs" schedule: diff --git a/.github/workflows/codeql-daily.yml b/.github/workflows/codeql-daily.yml index ad64f8c1f3474..358695c84f61c 100644 --- a/.github/workflows/codeql-daily.yml +++ b/.github/workflows/codeql-daily.yml @@ -34,7 +34,8 @@ jobs: - name: Install deps shell: bash run: | - sudo apt-get update && sudo apt-get install libtool cmake automake autoconf make ninja-build curl unzip virtualenv openjdk-11-jdk build-essential libc++1 + sudo apt-get update --error-on=any + sudo apt-get install --yes libtool cmake automake autoconf make ninja-build curl unzip virtualenv openjdk-11-jdk build-essential libc++1 mkdir -p bin/clang11 cd bin/clang11 wget https://github.com/llvm/llvm-project/releases/download/llvmorg-11.0.1/clang+llvm-11.0.1-x86_64-linux-gnu-ubuntu-16.04.tar.xz diff --git a/.github/workflows/codeql-push.yml b/.github/workflows/codeql-push.yml index c4569102c82c6..d79e7a011d347 100644 --- a/.github/workflows/codeql-push.yml +++ b/.github/workflows/codeql-push.yml @@ -42,7 +42,8 @@ jobs: - name: Install deps shell: bash run: | - sudo apt-get update && sudo apt-get install libtool cmake automake autoconf make ninja-build curl unzip virtualenv openjdk-11-jdk build-essential libc++1 + sudo apt-get update --error-on=any + sudo apt-get install --yes libtool cmake automake autoconf make ninja-build curl unzip virtualenv openjdk-11-jdk build-essential libc++1 mkdir -p bin/clang11 cd bin/clang11 wget https://github.com/llvm/llvm-project/releases/download/llvmorg-11.0.1/clang+llvm-11.0.1-x86_64-linux-gnu-ubuntu-16.04.tar.xz diff --git a/CODEOWNERS b/CODEOWNERS index 3209be2307338..1f4bc2abbfde6 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -196,3 +196,5 @@ extensions/filters/http/oauth2 @rgs1 @derekargueta @snowp /*/extensions/filters/http/set_metadata @aguinet @snowp # Formatters /*/extensions/formatter/req_without_query @dio @tsaarni +# IP address input matcher +/*/extensions/matching/input_matchers/ip @aguinet @snowp diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4f73d3353d7af..38cded6eaf8b5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -102,7 +102,7 @@ versioning guidelines: Please see [support/README.md](support/README.md) for more information on these hooks. -* Create your PR. If your PR adds new code, it should include tests [covering](source/docs/coverage.md) the new code. +* Create your PR. If your PR adds new code, it should include tests [covering](source/docs/coverage.md) the new code. Please note that draft PRs may not be reviewed and will likely not be triaged, so do not create your PR as a draft if you want prompt reviews! * Tests will automatically run for you. * We will **not** merge any PR that is not passing tests. * PRs are expected to have 100% test coverage for added code. This can be verified with a coverage diff --git a/OWNERS.md b/OWNERS.md index f88de100dbc72..551f5d7412a97 100644 --- a/OWNERS.md +++ b/OWNERS.md @@ -44,6 +44,15 @@ routing PRs, questions, etc. to the right place. * Antonio Vicente ([antoniovicente](https://github.com/antoniovicente)) (avd@google.com) * Event management, security, performance, data plane. +# Senior extension maintainers + +The following extension maintainers have final say over the extensions mentioned below. Once they +approve an extension PR, it will be merged by the maintainer on-call (or any other maintainer) +without further review. + +* Piotr Sikora ([PiotrSikora](https://github.com/PiotrSikora)) (piotrsikora@google.com) + * Wasm + # Envoy security team * All maintainers diff --git a/api/BUILD b/api/BUILD index cb40c29c8e407..4b11cc147633a 100644 --- a/api/BUILD +++ b/api/BUILD @@ -195,6 +195,7 @@ proto_library( "//envoy/extensions/internal_redirect/safe_cross_scheme/v3:pkg", "//envoy/extensions/matching/common_inputs/environment_variable/v3:pkg", "//envoy/extensions/matching/input_matchers/consistent_hashing/v3:pkg", + "//envoy/extensions/matching/input_matchers/ip/v3:pkg", "//envoy/extensions/network/socket_interface/v3:pkg", "//envoy/extensions/quic/crypto_stream/v3:pkg", "//envoy/extensions/quic/proof_source/v3:pkg", diff --git a/api/bazel/external_proto_deps.bzl b/api/bazel/external_proto_deps.bzl index 010eeb145785f..6b11495d3c0dc 100644 --- a/api/bazel/external_proto_deps.bzl +++ b/api/bazel/external_proto_deps.bzl @@ -11,7 +11,7 @@ EXTERNAL_PROTO_IMPORT_BAZEL_DEP_MAP = { "google/api/expr/v1alpha1/checked.proto": "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto", "google/api/expr/v1alpha1/syntax.proto": "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", - "metrics.proto": "@prometheus_metrics_model//:client_model", + "io/prometheus/client/metrics.proto": "@prometheus_metrics_model//:client_model", "opencensus/proto/trace/v1/trace.proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_proto", "opencensus/proto/trace/v1/trace_config.proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto", "opentelemetry/proto/common/v1/common.proto": "@opentelemetry_proto//:common", diff --git a/api/bazel/repositories.bzl b/api/bazel/repositories.bzl index 7183613a47831..74e19f831179f 100644 --- a/api/bazel/repositories.bzl +++ b/api/bazel/repositories.bzl @@ -55,7 +55,7 @@ load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") api_cc_py_proto_library( name = "client_model", srcs = [ - "metrics.proto", + "io/prometheus/client/metrics.proto", ], visibility = ["//visibility:public"], ) diff --git a/api/bazel/repository_locations.bzl b/api/bazel/repository_locations.bzl index f2685aaeb014e..968c6a9ffa286 100644 --- a/api/bazel/repository_locations.bzl +++ b/api/bazel/repository_locations.bzl @@ -89,9 +89,9 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Prometheus client model", project_desc = "Data model artifacts for Prometheus", project_url = "https://github.com/prometheus/client_model", - version = "0255a22d35ad5661ef7aa89c95fdf5dfd685283f", - sha256 = "a83fd26a80c5f9b82d1231448141a148c1d7a0c8f581ddf49fdbd8c1545e5661", - release_date = "2021-01-16", + version = "147c58e9608a4f9628b53b6cc863325ca746f63a", + sha256 = "f7da30879dcdfae367fa65af1969945c3148cfbfc462b30b7d36f17134675047", + release_date = "2021-06-07", strip_prefix = "client_model-{version}", urls = ["https://github.com/prometheus/client_model/archive/{version}.tar.gz"], use_category = ["api"], diff --git a/api/envoy/config/accesslog/v3/accesslog.proto b/api/envoy/config/accesslog/v3/accesslog.proto index ad129a3ed64be..bb53286380c98 100644 --- a/api/envoy/config/accesslog/v3/accesslog.proto +++ b/api/envoy/config/accesslog/v3/accesslog.proto @@ -246,6 +246,7 @@ message ResponseFlagFilter { in: "DT" in: "UPE" in: "NC" + in: "OM" } } }]; diff --git a/api/envoy/config/accesslog/v4alpha/accesslog.proto b/api/envoy/config/accesslog/v4alpha/accesslog.proto index 7559a3b82c79f..3e0c7f53598cc 100644 --- a/api/envoy/config/accesslog/v4alpha/accesslog.proto +++ b/api/envoy/config/accesslog/v4alpha/accesslog.proto @@ -245,6 +245,7 @@ message ResponseFlagFilter { in: "DT" in: "UPE" in: "NC" + in: "OM" } } }]; diff --git a/api/envoy/config/cluster/v3/cluster.proto b/api/envoy/config/cluster/v3/cluster.proto index 5470b1807d435..f902deb92c980 100644 --- a/api/envoy/config/cluster/v3/cluster.proto +++ b/api/envoy/config/cluster/v3/cluster.proto @@ -43,7 +43,7 @@ message ClusterCollection { } // Configuration for a single upstream cluster. -// [#next-free-field: 54] +// [#next-free-field: 55] message Cluster { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster"; @@ -413,8 +413,8 @@ message Cluster { // The table size for Maglev hashing. The Maglev aims for ‘minimal disruption’ rather than an absolute guarantee. // Minimal disruption means that when the set of upstreams changes, a connection will likely be sent to the same // upstream as it was before. Increasing the table size reduces the amount of disruption. - // The table size must be prime number. If it is not specified, the default is 65537. - google.protobuf.UInt64Value table_size = 1; + // The table size must be prime number limited to 5000011. If it is not specified, the default is 65537. + google.protobuf.UInt64Value table_size = 1 [(validate.rules).uint64 = {lte: 5000011}]; } // Specific configuration for the @@ -876,6 +876,13 @@ message Cluster { // DNS resolution configuration which includes the underlying dns resolver addresses and options. core.v3.DnsResolutionConfig dns_resolution_config = 53; + // Optional configuration for having cluster readiness block on warm-up. Currently, only applicable for + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`. + // If true, cluster readiness blocks on warm-up. If false, the cluster will complete + // initialization whether or not warm-up has completed. Defaults to true. + google.protobuf.BoolValue wait_for_warm_on_init = 54; + // If specified, outlier detection will be enabled for this upstream cluster. // Each of the configuration values can be overridden via // :ref:`runtime values `. @@ -930,7 +937,7 @@ message Cluster { CommonLbConfig common_lb_config = 27; // Optional custom transport socket implementation to use for upstream connections. - // To setup TLS, set a transport socket with name `tls` and + // To setup TLS, set a transport socket with name `envoy.transport_sockets.tls` and // :ref:`UpstreamTlsContexts ` in the `typed_config`. // If no transport socket configuration is specified, new connections // will be set up with plaintext. diff --git a/api/envoy/config/cluster/v4alpha/cluster.proto b/api/envoy/config/cluster/v4alpha/cluster.proto index bfea44955bf58..a218a6931a8f0 100644 --- a/api/envoy/config/cluster/v4alpha/cluster.proto +++ b/api/envoy/config/cluster/v4alpha/cluster.proto @@ -43,7 +43,7 @@ message ClusterCollection { } // Configuration for a single upstream cluster. -// [#next-free-field: 54] +// [#next-free-field: 55] message Cluster { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Cluster"; @@ -416,8 +416,8 @@ message Cluster { // The table size for Maglev hashing. The Maglev aims for ‘minimal disruption’ rather than an absolute guarantee. // Minimal disruption means that when the set of upstreams changes, a connection will likely be sent to the same // upstream as it was before. Increasing the table size reduces the amount of disruption. - // The table size must be prime number. If it is not specified, the default is 65537. - google.protobuf.UInt64Value table_size = 1; + // The table size must be prime number limited to 5000011. If it is not specified, the default is 65537. + google.protobuf.UInt64Value table_size = 1 [(validate.rules).uint64 = {lte: 5000011}]; } // Specific configuration for the @@ -802,6 +802,13 @@ message Cluster { // DNS resolution configuration which includes the underlying dns resolver addresses and options. core.v4alpha.DnsResolutionConfig dns_resolution_config = 53; + // Optional configuration for having cluster readiness block on warm-up. Currently, only applicable for + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`. + // If true, cluster readiness blocks on warm-up. If false, the cluster will complete + // initialization whether or not warm-up has completed. Defaults to true. + google.protobuf.BoolValue wait_for_warm_on_init = 54; + // If specified, outlier detection will be enabled for this upstream cluster. // Each of the configuration values can be overridden via // :ref:`runtime values `. @@ -856,7 +863,7 @@ message Cluster { CommonLbConfig common_lb_config = 27; // Optional custom transport socket implementation to use for upstream connections. - // To setup TLS, set a transport socket with name `tls` and + // To setup TLS, set a transport socket with name `envoy.transport_sockets.tls` and // :ref:`UpstreamTlsContexts ` in the `typed_config`. // If no transport socket configuration is specified, new connections // will be set up with plaintext. diff --git a/api/envoy/config/listener/v3/listener_components.proto b/api/envoy/config/listener/v3/listener_components.proto index e6d73b791c216..93acc4c94a666 100644 --- a/api/envoy/config/listener/v3/listener_components.proto +++ b/api/envoy/config/listener/v3/listener_components.proto @@ -64,9 +64,12 @@ message Filter { // 3. Server name (e.g. SNI for TLS protocol), // 4. Transport protocol. // 5. Application protocols (e.g. ALPN for TLS protocol). -// 6. Source type (e.g. any, local or external network). -// 7. Source IP address. -// 8. Source port. +// 6. Directly connected source IP address (this will only be different from the source IP address +// when using a listener filter that overrides the source address, such as the :ref:`Proxy Protocol +// listener filter `). +// 7. Source type (e.g. any, local or external network). +// 8. Source IP address. +// 9. Source port. // // For criteria that allow ranges or wildcards, the most specific value in any // of the configured filter chains that matches the incoming connection is going @@ -90,7 +93,7 @@ message Filter { // listed at the end, because that's how we want to list them in the docs. // // [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules] -// [#next-free-field: 13] +// [#next-free-field: 14] message FilterChainMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.listener.FilterChainMatch"; @@ -124,6 +127,11 @@ message FilterChainMatch { // [#not-implemented-hide:] google.protobuf.UInt32Value suffix_len = 5; + // The criteria is satisfied if the directly connected source IP address of the downstream + // connection is contained in at least one of the specified subnets. If the parameter is not + // specified or the list is empty, the directly connected source IP address is ignored. + repeated core.v3.CidrRange direct_source_prefix_ranges = 13; + // Specifies the connection source IP match type. Can be any, local or external network. ConnectionSourceType source_type = 12 [(validate.rules).enum = {defined_only: true}]; @@ -238,7 +246,7 @@ message FilterChain { core.v3.Metadata metadata = 5; // Optional custom transport socket implementation to use for downstream connections. - // To setup TLS, set a transport socket with name `tls` and + // To setup TLS, set a transport socket with name `envoy.transport_sockets.tls` and // :ref:`DownstreamTlsContext ` in the `typed_config`. // If no transport socket configuration is specified, new connections // will be set up with plaintext. diff --git a/api/envoy/config/listener/v4alpha/listener_components.proto b/api/envoy/config/listener/v4alpha/listener_components.proto index 3a13391c0b78f..103fa23484f81 100644 --- a/api/envoy/config/listener/v4alpha/listener_components.proto +++ b/api/envoy/config/listener/v4alpha/listener_components.proto @@ -63,9 +63,12 @@ message Filter { // 3. Server name (e.g. SNI for TLS protocol), // 4. Transport protocol. // 5. Application protocols (e.g. ALPN for TLS protocol). -// 6. Source type (e.g. any, local or external network). -// 7. Source IP address. -// 8. Source port. +// 6. Directly connected source IP address (this will only be different from the source IP address +// when using a listener filter that overrides the source address, such as the :ref:`Proxy Protocol +// listener filter `). +// 7. Source type (e.g. any, local or external network). +// 8. Source IP address. +// 9. Source port. // // For criteria that allow ranges or wildcards, the most specific value in any // of the configured filter chains that matches the incoming connection is going @@ -89,7 +92,7 @@ message Filter { // listed at the end, because that's how we want to list them in the docs. // // [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules] -// [#next-free-field: 13] +// [#next-free-field: 14] message FilterChainMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.FilterChainMatch"; @@ -123,6 +126,11 @@ message FilterChainMatch { // [#not-implemented-hide:] google.protobuf.UInt32Value suffix_len = 5; + // The criteria is satisfied if the directly connected source IP address of the downstream + // connection is contained in at least one of the specified subnets. If the parameter is not + // specified or the list is empty, the directly connected source IP address is ignored. + repeated core.v4alpha.CidrRange direct_source_prefix_ranges = 13; + // Specifies the connection source IP match type. Can be any, local or external network. ConnectionSourceType source_type = 12 [(validate.rules).enum = {defined_only: true}]; @@ -228,7 +236,7 @@ message FilterChain { core.v4alpha.Metadata metadata = 5; // Optional custom transport socket implementation to use for downstream connections. - // To setup TLS, set a transport socket with name `tls` and + // To setup TLS, set a transport socket with name `envoy.transport_sockets.tls` and // :ref:`DownstreamTlsContext ` in the `typed_config`. // If no transport socket configuration is specified, new connections // will be set up with plaintext. diff --git a/api/envoy/config/route/v3/route.proto b/api/envoy/config/route/v3/route.proto index 80956fdeb4e23..e2bf52165be92 100644 --- a/api/envoy/config/route/v3/route.proto +++ b/api/envoy/config/route/v3/route.proto @@ -4,6 +4,7 @@ package envoy.config.route.v3; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/config_source.proto"; +import "envoy/config/core/v3/extension.proto"; import "envoy/config/route/v3/route_components.proto"; import "google/protobuf/wrappers.proto"; @@ -21,7 +22,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // * Routing :ref:`architecture overview ` // * HTTP :ref:`router filter ` -// [#next-free-field: 12] +// [#next-free-field: 13] message RouteConfiguration { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.RouteConfiguration"; @@ -119,6 +120,18 @@ message RouteConfiguration { // is not subject to data plane buffering controls. // google.protobuf.UInt32Value max_direct_response_body_size_bytes = 11; + + // [#not-implemented-hide:] + // A list of plugins and their configurations which may be used by a + // :ref:`envoy_v3_api_field_config.route.v3.RouteAction.cluster_specifier_plugin` + // within the route. All *extension.name* fields in this list must be unique. + repeated ClusterSpecifierPlugin cluster_specifier_plugins = 12; +} + +// Configuration for a cluster specifier plugin. +message ClusterSpecifierPlugin { + // The name of the plugin and its opaque configuration. + core.v3.TypedExtensionConfig extension = 1; } message Vhds { diff --git a/api/envoy/config/route/v3/route_components.proto b/api/envoy/config/route/v3/route_components.proto index ee82e8f732261..06a71c7858b1a 100644 --- a/api/envoy/config/route/v3/route_components.proto +++ b/api/envoy/config/route/v3/route_components.proto @@ -311,7 +311,7 @@ message Route { message WeightedCluster { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.WeightedCluster"; - // [#next-free-field: 11] + // [#next-free-field: 12] message ClusterWeight { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.WeightedCluster.ClusterWeight"; @@ -378,6 +378,13 @@ message WeightedCluster { // :ref:`FilterConfig` // message to specify additional options.] map typed_per_filter_config = 10; + + oneof host_rewrite_specifier { + // Indicates that during forwarding, the host header will be swapped with + // this value. + string host_rewrite_literal = 11 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + } } // Specifies one or more upstream clusters associated with the route. @@ -466,7 +473,7 @@ message RouteMatch { } // Indicates that prefix/path matching should be case sensitive. The default - // is true. + // is true. Ignored for safe_regex matching. google.protobuf.BoolValue case_sensitive = 4; // Indicates that the route should additionally match on a runtime key. Every time the route @@ -563,7 +570,7 @@ message CorsPolicy { core.v3.RuntimeFractionalPercent shadow_enabled = 10; } -// [#next-free-field: 37] +// [#next-free-field: 38] message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteAction"; @@ -839,6 +846,14 @@ message RouteAction { // :ref:`traffic splitting ` // for additional documentation. WeightedCluster weighted_clusters = 3; + + // [#not-implemented-hide:] + // Name of the cluster specifier plugin to use to determine the cluster for + // requests on this route. The plugin name must be defined in the associated + // :ref:`envoy_v3_api_field_config.route.v3.RouteConfiguration.cluster_specifier_plugins` + // in the + // :ref:`envoy_v3_api_field_config.core.v3.TypedExtensionConfig.name` field. + string cluster_specifier_plugin = 37; } // The HTTP status code to use when configured cluster is not found. diff --git a/api/envoy/config/route/v4alpha/route.proto b/api/envoy/config/route/v4alpha/route.proto index 912fc8051556e..4a19386824821 100644 --- a/api/envoy/config/route/v4alpha/route.proto +++ b/api/envoy/config/route/v4alpha/route.proto @@ -4,6 +4,7 @@ package envoy.config.route.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/config_source.proto"; +import "envoy/config/core/v4alpha/extension.proto"; import "envoy/config/route/v4alpha/route_components.proto"; import "google/protobuf/wrappers.proto"; @@ -21,7 +22,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // * Routing :ref:`architecture overview ` // * HTTP :ref:`router filter ` -// [#next-free-field: 12] +// [#next-free-field: 13] message RouteConfiguration { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteConfiguration"; @@ -120,6 +121,21 @@ message RouteConfiguration { // is not subject to data plane buffering controls. // google.protobuf.UInt32Value max_direct_response_body_size_bytes = 11; + + // [#not-implemented-hide:] + // A list of plugins and their configurations which may be used by a + // :ref:`envoy_v3_api_field_config.route.v3.RouteAction.cluster_specifier_plugin` + // within the route. All *extension.name* fields in this list must be unique. + repeated ClusterSpecifierPlugin cluster_specifier_plugins = 12; +} + +// Configuration for a cluster specifier plugin. +message ClusterSpecifierPlugin { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.ClusterSpecifierPlugin"; + + // The name of the plugin and its opaque configuration. + core.v4alpha.TypedExtensionConfig extension = 1; } message Vhds { diff --git a/api/envoy/config/route/v4alpha/route_components.proto b/api/envoy/config/route/v4alpha/route_components.proto index 256a3c742ff3a..eab4b76fd6c0c 100644 --- a/api/envoy/config/route/v4alpha/route_components.proto +++ b/api/envoy/config/route/v4alpha/route_components.proto @@ -310,7 +310,7 @@ message WeightedCluster { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.WeightedCluster"; - // [#next-free-field: 11] + // [#next-free-field: 12] message ClusterWeight { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.WeightedCluster.ClusterWeight"; @@ -377,6 +377,13 @@ message WeightedCluster { // :ref:`FilterConfig` // message to specify additional options.] map typed_per_filter_config = 10; + + oneof host_rewrite_specifier { + // Indicates that during forwarding, the host header will be swapped with + // this value. + string host_rewrite_literal = 11 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + } } // Specifies one or more upstream clusters associated with the route. @@ -467,7 +474,7 @@ message RouteMatch { } // Indicates that prefix/path matching should be case sensitive. The default - // is true. + // is true. Ignored for safe_regex matching. google.protobuf.BoolValue case_sensitive = 4; // Indicates that the route should additionally match on a runtime key. Every time the route @@ -564,7 +571,7 @@ message CorsPolicy { core.v4alpha.RuntimeFractionalPercent shadow_enabled = 10; } -// [#next-free-field: 37] +// [#next-free-field: 38] message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteAction"; @@ -838,6 +845,14 @@ message RouteAction { // :ref:`traffic splitting ` // for additional documentation. WeightedCluster weighted_clusters = 3; + + // [#not-implemented-hide:] + // Name of the cluster specifier plugin to use to determine the cluster for + // requests on this route. The plugin name must be defined in the associated + // :ref:`envoy_v3_api_field_config.route.v3.RouteConfiguration.cluster_specifier_plugins` + // in the + // :ref:`envoy_v3_api_field_config.core.v3.TypedExtensionConfig.name` field. + string cluster_specifier_plugin = 37; } // The HTTP status code to use when configured cluster is not found. diff --git a/api/envoy/data/accesslog/v3/accesslog.proto b/api/envoy/data/accesslog/v3/accesslog.proto index 98bdd1d6e8322..c53ae0d6ab852 100644 --- a/api/envoy/data/accesslog/v3/accesslog.proto +++ b/api/envoy/data/accesslog/v3/accesslog.proto @@ -186,7 +186,7 @@ message AccessLogCommon { } // Flags indicating occurrences during request/response processing. -// [#next-free-field: 26] +// [#next-free-field: 27] message ResponseFlags { option (udpa.annotations.versioning).previous_message_type = "envoy.data.accesslog.v2.ResponseFlags"; @@ -281,6 +281,9 @@ message ResponseFlags { // Indicates no cluster was found for the request. bool no_cluster_found = 25; + + // Indicates overload manager terminated the request. + bool overload_manager = 26; } // Properties of a negotiated TLS connection. diff --git a/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto b/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto index 79d6752502094..5c35e80d591fd 100644 --- a/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto +++ b/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.extensions.common.dynamic_forward_proxy.v3; import "envoy/config/cluster/v3/cluster.proto"; +import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/resolver.proto"; import "google/protobuf/duration.proto"; @@ -29,7 +30,7 @@ message DnsCacheCircuitBreakers { // Configuration for the dynamic forward proxy DNS cache. See the :ref:`architecture overview // ` for more information. -// [#next-free-field: 10] +// [#next-free-field: 11] message DnsCacheConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.common.dynamic_forward_proxy.v2alpha.DnsCacheConfig"; @@ -108,4 +109,9 @@ message DnsCacheConfig { // DNS resolution configuration which includes the underlying dns resolver addresses and options. config.core.v3.DnsResolutionConfig dns_resolution_config = 9; + + // Hostnames that should be preresolved into the cache upon creation. This might provide a + // performance improvement, in the form of cache hits, for hostnames that are going to be + // resolved during steady state and are known at config load time. + repeated config.core.v3.SocketAddress preresolve_hostnames = 10; } diff --git a/api/envoy/extensions/common/dynamic_forward_proxy/v4alpha/dns_cache.proto b/api/envoy/extensions/common/dynamic_forward_proxy/v4alpha/dns_cache.proto index 89b2fb12a0923..08b78d3fa45ca 100644 --- a/api/envoy/extensions/common/dynamic_forward_proxy/v4alpha/dns_cache.proto +++ b/api/envoy/extensions/common/dynamic_forward_proxy/v4alpha/dns_cache.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.extensions.common.dynamic_forward_proxy.v4alpha; import "envoy/config/cluster/v4alpha/cluster.proto"; +import "envoy/config/core/v4alpha/address.proto"; import "envoy/config/core/v4alpha/resolver.proto"; import "google/protobuf/duration.proto"; @@ -31,7 +32,7 @@ message DnsCacheCircuitBreakers { // Configuration for the dynamic forward proxy DNS cache. See the :ref:`architecture overview // ` for more information. -// [#next-free-field: 10] +// [#next-free-field: 11] message DnsCacheConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.common.dynamic_forward_proxy.v3.DnsCacheConfig"; @@ -105,4 +106,9 @@ message DnsCacheConfig { // DNS resolution configuration which includes the underlying dns resolver addresses and options. config.core.v4alpha.DnsResolutionConfig dns_resolution_config = 9; + + // Hostnames that should be preresolved into the cache upon creation. This might provide a + // performance improvement, in the form of cache hits, for hostnames that are going to be + // resolved during steady state and are known at config load time. + repeated config.core.v4alpha.SocketAddress preresolve_hostnames = 10; } diff --git a/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto b/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto index 1effca694d005..a4de844ac4f0d 100644 --- a/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto +++ b/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto @@ -215,18 +215,21 @@ message AuthorizationRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.ext_authz.v2.AuthorizationRequest"; - // Authorization request will include the client request headers that have a correspondent match - // in the :ref:`list `. Note that in addition to the - // user's supplied matchers: + // Authorization request includes the client request headers that have a correspondent match + // in the :ref:`list `. // - // 1. *Host*, *Method*, *Path* and *Content-Length* are automatically included to the list. + // .. note:: + // + // In addition to the the user's supplied matchers, ``Host``, ``Method``, ``Path``, + // ``Content-Length``, and ``Authorization`` are **automatically included** to the list. + // + // .. note:: // - // 2. *Content-Length* will be set to 0 and the request to the authorization service will not have - // a message body. However, the authorization request can include the buffered client request body - // (controlled by :ref:`with_request_body - // ` setting), - // consequently the value of *Content-Length* of the authorization request reflects the size of - // its payload size. + // By default, ``Content-Length`` header is set to ``0`` and the request to the authorization + // service has no message body. However, the authorization request *may* include the buffered + // client request body (controlled by :ref:`with_request_body + // ` + // setting) hence the value of its ``Content-Length`` reflects the size of its payload size. // type.matcher.v3.ListStringMatcher allowed_headers = 1; diff --git a/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto b/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto index 90f003b0a137c..07114e041ff04 100644 --- a/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto +++ b/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto @@ -215,18 +215,21 @@ message AuthorizationRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.ext_authz.v3.AuthorizationRequest"; - // Authorization request will include the client request headers that have a correspondent match - // in the :ref:`list `. Note that in addition to the - // user's supplied matchers: + // Authorization request includes the client request headers that have a correspondent match + // in the :ref:`list `. // - // 1. *Host*, *Method*, *Path* and *Content-Length* are automatically included to the list. + // .. note:: + // + // In addition to the the user's supplied matchers, ``Host``, ``Method``, ``Path``, + // ``Content-Length``, and ``Authorization`` are **automatically included** to the list. + // + // .. note:: // - // 2. *Content-Length* will be set to 0 and the request to the authorization service will not have - // a message body. However, the authorization request can include the buffered client request body - // (controlled by :ref:`with_request_body - // ` setting), - // consequently the value of *Content-Length* of the authorization request reflects the size of - // its payload size. + // By default, ``Content-Length`` header is set to ``0`` and the request to the authorization + // service has no message body. However, the authorization request *may* include the buffered + // client request body (controlled by :ref:`with_request_body + // ` + // setting) hence the value of its ``Content-Length`` reflects the size of its payload size. // type.matcher.v4alpha.ListStringMatcher allowed_headers = 1; diff --git a/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto b/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto index afc761c07c7e1..a79e3382d6334 100644 --- a/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto +++ b/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto @@ -52,7 +52,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // cache_duration: // seconds: 300 // -// [#next-free-field: 11] +// [#next-free-field: 12] message JwtProvider { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.jwt_authn.v2alpha.JwtProvider"; @@ -190,6 +190,15 @@ message JwtProvider { string forward_payload_header = 8 [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; + // When :ref:`forward_payload_header ` + // is specified, the base64 encoded payload will be added to the headers. + // Normally JWT based64 encode doesn't add padding. If this field is true, + // the header will be padded. + // + // This field is only relevant if :ref:`forward_payload_header ` + // is specified. + bool pad_forward_payload_header = 11; + // If non empty, successfully verified JWT payloads will be written to StreamInfo DynamicMetadata // in the format as: *namespace* is the jwt_authn filter name as **envoy.filters.http.jwt_authn** // The value is the *protobuf::Struct*. The value of this field will be the key for its *fields* diff --git a/api/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto b/api/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto index 442ba7df061ee..82f6bef04eae4 100644 --- a/api/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto +++ b/api/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto @@ -52,7 +52,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // cache_duration: // seconds: 300 // -// [#next-free-field: 11] +// [#next-free-field: 12] message JwtProvider { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.jwt_authn.v3.JwtProvider"; @@ -190,6 +190,15 @@ message JwtProvider { string forward_payload_header = 8 [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; + // When :ref:`forward_payload_header ` + // is specified, the base64 encoded payload will be added to the headers. + // Normally JWT based64 encode doesn't add padding. If this field is true, + // the header will be padded. + // + // This field is only relevant if :ref:`forward_payload_header ` + // is specified. + bool pad_forward_payload_header = 11; + // If non empty, successfully verified JWT payloads will be written to StreamInfo DynamicMetadata // in the format as: *namespace* is the jwt_authn filter name as **envoy.filters.http.jwt_authn** // The value is the *protobuf::Struct*. The value of this field will be the key for its *fields* diff --git a/api/envoy/extensions/matching/input_matchers/ip/v3/BUILD b/api/envoy/extensions/matching/input_matchers/ip/v3/BUILD new file mode 100644 index 0000000000000..1c1a6f6b44235 --- /dev/null +++ b/api/envoy/extensions/matching/input_matchers/ip/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/matching/input_matchers/ip/v3/ip.proto b/api/envoy/extensions/matching/input_matchers/ip/v3/ip.proto new file mode 100644 index 0000000000000..3c7cb4eb5f19a --- /dev/null +++ b/api/envoy/extensions/matching/input_matchers/ip/v3/ip.proto @@ -0,0 +1,38 @@ +syntax = "proto3"; + +package envoy.extensions.matching.input_matchers.ip.v3; + +import "envoy/config/core/v3/address.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.matching.input_matchers.ip.v3"; +option java_outer_classname = "IpProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: IP matcher] +// [#extension: envoy.matching.input_matchers.ip] + +// This input matcher matches IPv4 or IPv6 addresses against a list of CIDR +// ranges. It returns true if and only if the input IP belongs to at least one +// of these CIDR ranges. Internally, it uses a Level-Compressed trie, as +// described in the paper `IP-address lookup using LC-tries +// `_ +// by S. Nilsson and G. Karlsson. For "big" lists of IPs, this matcher is more +// efficient than multiple single IP matcher, that would have a linear cost. +message Ip { + // Match if the IP belongs to any of these CIDR ranges. + repeated config.core.v3.CidrRange cidr_ranges = 1 [(validate.rules).repeated = {min_items: 1}]; + + // The human readable prefix to use when emitting statistics for the IP input + // matcher. Names in the table below are concatenated to this prefix. + // + // .. csv-table:: + // :header: Name, Type, Description + // :widths: 1, 1, 2 + // + // ip_parsing_failed, Counter, Total number of IP addresses the matcher was unable to parse + string stat_prefix = 2 [(validate.rules).string = {min_len: 1}]; +} diff --git a/api/envoy/extensions/wasm/v3/wasm.proto b/api/envoy/extensions/wasm/v3/wasm.proto index 35af0cf690c20..b4566c826ed08 100644 --- a/api/envoy/extensions/wasm/v3/wasm.proto +++ b/api/envoy/extensions/wasm/v3/wasm.proto @@ -77,6 +77,7 @@ message VmConfig { // **envoy.wasm.runtime.wasmtime**: `Wasmtime `_-based WebAssembly runtime. // This runtime is not enabled in the official build. // + // [#extension-category: envoy.wasm.runtime] string runtime = 2 [(validate.rules).string = {min_len: 1}]; // The Wasm code that Envoy will execute. @@ -86,7 +87,6 @@ message VmConfig { // (proxy_on_start). `google.protobuf.Struct` is serialized as JSON before // passing it to the plugin. `google.protobuf.BytesValue` and // `google.protobuf.StringValue` are passed directly without the wrapper. - // [#extension-category: envoy.wasm.runtime] google.protobuf.Any configuration = 4; // Allow the wasm file to include pre-compiled code on VMs which support it. diff --git a/api/envoy/service/metrics/v2/metrics_service.proto b/api/envoy/service/metrics/v2/metrics_service.proto index aa5e703850155..78d6e47e20ab1 100644 --- a/api/envoy/service/metrics/v2/metrics_service.proto +++ b/api/envoy/service/metrics/v2/metrics_service.proto @@ -4,7 +4,7 @@ package envoy.service.metrics.v2; import "envoy/api/v2/core/base.proto"; -import "metrics.proto"; +import "io/prometheus/client/metrics.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; diff --git a/api/envoy/service/metrics/v3/metrics_service.proto b/api/envoy/service/metrics/v3/metrics_service.proto index 033c168c32ba1..e86bda356f7d2 100644 --- a/api/envoy/service/metrics/v3/metrics_service.proto +++ b/api/envoy/service/metrics/v3/metrics_service.proto @@ -4,7 +4,7 @@ package envoy.service.metrics.v3; import "envoy/config/core/v3/base.proto"; -import "metrics.proto"; +import "io/prometheus/client/metrics.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; diff --git a/api/envoy/service/metrics/v4alpha/metrics_service.proto b/api/envoy/service/metrics/v4alpha/metrics_service.proto index d4f2378d35f32..5e1412f103e93 100644 --- a/api/envoy/service/metrics/v4alpha/metrics_service.proto +++ b/api/envoy/service/metrics/v4alpha/metrics_service.proto @@ -4,7 +4,7 @@ package envoy.service.metrics.v4alpha; import "envoy/config/core/v4alpha/base.proto"; -import "metrics.proto"; +import "io/prometheus/client/metrics.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; diff --git a/api/versioning/BUILD b/api/versioning/BUILD index eb4267263f3f2..51bc63183a1c4 100644 --- a/api/versioning/BUILD +++ b/api/versioning/BUILD @@ -140,6 +140,7 @@ proto_library( "//envoy/extensions/internal_redirect/safe_cross_scheme/v3:pkg", "//envoy/extensions/matching/common_inputs/environment_variable/v3:pkg", "//envoy/extensions/matching/input_matchers/consistent_hashing/v3:pkg", + "//envoy/extensions/matching/input_matchers/ip/v3:pkg", "//envoy/extensions/network/socket_interface/v3:pkg", "//envoy/extensions/quic/crypto_stream/v3:pkg", "//envoy/extensions/quic/proof_source/v3:pkg", diff --git a/bazel/DEVELOPER.md b/bazel/DEVELOPER.md index 6830f220a9246..2dca90559c063 100644 --- a/bazel/DEVELOPER.md +++ b/bazel/DEVELOPER.md @@ -165,10 +165,10 @@ envoy_cc_binary( ## Filter linking Filters are registered via static initializers at early runtime by modules in -[`source/server/config`](../source/server/config). These require the `alwayslink +[`source/extensions/filters`](../source/extensions/filters). These require the `alwayslink = 1` attribute to be set in the corresponding `envoy_cc_library` target to ensure they are correctly linked. See -[`source/server/config/http/BUILD`](../source/server/config/http/BUILD) for +[`source/extensions/filters/http/BUILD`](../source/extensions/filters/http/BUILD) for examples. ## Tests with environment dependencies diff --git a/bazel/README.md b/bazel/README.md index 081550b4c60f2..5276c629951b6 100644 --- a/bazel/README.md +++ b/bazel/README.md @@ -887,9 +887,9 @@ Once this is set up, you can run clang-format without docker: ```shell ./tools/code_format/check_format.py check -./tools/spelling/check_spelling.sh check +./tools/spelling/check_spelling_pedantic.py check ./tools/code_format/check_format.py fix -./tools/spelling/check_spelling.sh fix +./tools/spelling/check_spelling_pedantic.py fix ``` # Advanced caching setup diff --git a/bazel/pch.bzl b/bazel/pch.bzl index da5fce9144084..f65229b2cf873 100644 --- a/bazel/pch.bzl +++ b/bazel/pch.bzl @@ -33,8 +33,7 @@ def _pch(ctx): "\n".join(["#include \"{}\"".format(include) for include in ctx.attr.includes]) + "\n", ) - # TODO: -fno-pch-timestamp / invalidation in that case doesn't work - pch_flags = ["-x", "c++-header"] + pch_flags = ["-x", "c++-header", "-Xclang", "-fno-pch-timestamp"] pch_file = ctx.actions.declare_file(ctx.label.name + ".pch") deps_ctx = deps_cc_info.compilation_context diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 7d6734c90df8d..9967b86504382 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -175,6 +175,7 @@ def envoy_dependencies(skip_targets = []): external_http_archive("bazel_compdb") external_http_archive("envoy_build_tools") external_http_archive("rules_cc") + external_http_archive("rules_pkg") # Unconditional, since we use this only for compiler-agnostic fuzzing utils. _org_llvm_releases_compiler_rt() diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 84bbecc323033..357c387c6ec98 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -322,26 +322,26 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_desc = "Data Collect Protocols of Apache SkyWalking", project_url = "https://github.com/apache/skywalking-data-collect-protocol", name = "skywalking_data_collect_protocol", - sha256 = "edfa970394511213eacc8055b4c13e4e9773e9196122a49e0db68f6162f67dff", + sha256 = "d967c1b6e78db017e0c28291211baf4a58c02ff4d4437560285165963dd4a9d0", urls = ["https://github.com/apache/skywalking-data-collect-protocol/archive/v{version}.tar.gz"], strip_prefix = "skywalking-data-collect-protocol-{version}", - version = "8.4.0", + version = "8.6.0", use_category = ["observability_ext"], extensions = ["envoy.tracers.skywalking"], - release_date = "2021-02-01", + release_date = "2021-06-07", cpe = "N/A", ), com_github_skyapm_cpp2sky = dict( project_name = "cpp2sky", project_desc = "C++ SDK for Apache SkyWalking", project_url = "https://github.com/SkyAPM/cpp2sky", - sha256 = "76117a63cf29355c28a75bc83bd1d7e5bc004039445e7c854ee752dfe66094e6", - version = "0.2.1", + sha256 = "f65b1054bd6eadadff0618f272f6d645a1ec933fa14af922a8e3c39603e45eaf", + version = "0.3.1", strip_prefix = "cpp2sky-{version}", urls = ["https://github.com/SkyAPM/cpp2sky/archive/v{version}.tar.gz"], use_category = ["observability_ext"], extensions = ["envoy.tracers.skywalking"], - release_date = "2021-03-24", + release_date = "2021-06-17", cpe = "N/A", ), com_github_datadog_dd_opentracing_cpp = dict( @@ -668,6 +668,16 @@ REPOSITORY_LOCATIONS_SPEC = dict( urls = ["https://github.com/bazelbuild/rules_python/releases/download/{version}/rules_python-{version}.tar.gz"], use_category = ["build"], ), + rules_pkg = dict( + project_name = "Packaging rules for Bazel", + project_desc = "Bazel rules for the packaging distributions", + project_url = "https://github.com/bazelbuild/rules_pkg", + version = "0.4.0", + sha256 = "038f1caa773a7e35b3663865ffb003169c6a71dc995e39bf4815792f385d837d", + urls = ["https://github.com/bazelbuild/rules_pkg/releases/download/{version}/rules_pkg-{version}.tar.gz"], + use_category = ["build"], + release_date = "2021-03-03", + ), six = dict( project_name = "Six", project_desc = "Python 2 and 3 compatibility library", @@ -784,14 +794,14 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "V8", project_desc = "Google’s open source high-performance JavaScript and WebAssembly engine, written in C++", project_url = "https://v8.dev", - version = "9.2.230.2", + version = "9.2.230.13", # This archive was created using https://storage.googleapis.com/envoyproxy-wee8/wee8-archive.sh # and contains complete checkout of V8 with all dependencies necessary to build wee8. - sha256 = "237b9816ee56ad9b86e12e082132d88c543be494385f9bf9797af2a415c05f56", + sha256 = "77b4d6aaabe1dc60bf6bd2523a187d82292c27a2073ec48610dd098e3d4f80ce", urls = ["https://storage.googleapis.com/envoyproxy-wee8/wee8-{version}.tar.gz"], use_category = ["dataplane_ext"], extensions = ["envoy.wasm.runtime.v8"], - release_date = "2021-05-20", + release_date = "2021-06-25", cpe = "cpe:2.3:a:google:v8:*", ), com_googlesource_quiche = dict( @@ -823,10 +833,10 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Common Expression Language (CEL) C++ library", project_desc = "Common Expression Language (CEL) C++ library", project_url = "https://opensource.google/projects/cel", - version = "9841e3ee251f3cc4cd5b6dd9deee6818bc9f2854", - sha256 = "7e42cbad7d1068d6e7891ad101e2863e727692136d6b3a817c487b3cc7bcfdcc", + version = "0.6.1", + sha256 = "d001494f1aa7d88172af944233fac3d7f83d9183d66590aa787aa2a35aab0440", strip_prefix = "cel-cpp-{version}", - urls = ["https://github.com/google/cel-cpp/archive/{version}.tar.gz"], + urls = ["https://github.com/google/cel-cpp/archive/v{version}.tar.gz"], use_category = ["dataplane_ext"], extensions = [ "envoy.access_loggers.wasm", @@ -838,7 +848,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( "envoy.filters.network.wasm", "envoy.stat_sinks.wasm", ], - release_date = "2020-12-17", + release_date = "2021-06-28", cpe = "N/A", ), com_github_google_flatbuffers = dict( @@ -941,8 +951,8 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "WebAssembly for Proxies (C++ SDK)", project_desc = "WebAssembly for Proxies (C++ SDK)", project_url = "https://github.com/proxy-wasm/proxy-wasm-cpp-sdk", - version = "d9baeb21d46ab07d4eb9295a5d53a1803b7b80af", - sha256 = "b517ac487e0ac4b5d4f951ec805f2e54d5aecece34159b053c5fb781fac5e0f5", + version = "fd0be8405db25de0264bdb78fae3a82668c03782", + sha256 = "c57de2425b5c61d7f630c5061e319b4557ae1f1c7526e5a51c33dc1299471b08", strip_prefix = "proxy-wasm-cpp-sdk-{version}", urls = ["https://github.com/proxy-wasm/proxy-wasm-cpp-sdk/archive/{version}.tar.gz"], use_category = ["dataplane_ext"], @@ -958,15 +968,15 @@ REPOSITORY_LOCATIONS_SPEC = dict( "envoy.wasm.runtime.wavm", "envoy.wasm.runtime.wasmtime", ], - release_date = "2021-05-15", + release_date = "2021-06-24", cpe = "N/A", ), proxy_wasm_cpp_host = dict( project_name = "WebAssembly for Proxies (C++ host implementation)", project_desc = "WebAssembly for Proxies (C++ host implementation)", project_url = "https://github.com/proxy-wasm/proxy-wasm-cpp-host", - version = "605ee8a0eb78127e81f53bcc3f0b7ec983fb65c2", - sha256 = "7996d1d34ca0cae4079dc2dbe9b8d51ec6db5cb78ab93533bb8cd31ef970b84a", + version = "7b64da1176b42a51b6be2d03f12d578a6028a39b", + sha256 = "6009ce7918abe83326db9b95cafe99d6333edddde7cea3ac054688fe793d0fea", strip_prefix = "proxy-wasm-cpp-host-{version}", urls = ["https://github.com/proxy-wasm/proxy-wasm-cpp-host/archive/{version}.tar.gz"], use_category = ["dataplane_ext"], @@ -982,7 +992,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( "envoy.wasm.runtime.wavm", "envoy.wasm.runtime.wasmtime", ], - release_date = "2021-06-09", + release_date = "2021-06-24", cpe = "N/A", ), proxy_wasm_rust_sdk = dict( diff --git a/ci/Dockerfile-envoy-windows b/ci/Dockerfile-envoy-windows index e2bdfd6819ef1..159bbe7ec7466 100644 --- a/ci/Dockerfile-envoy-windows +++ b/ci/Dockerfile-envoy-windows @@ -3,11 +3,12 @@ ARG BUILD_TAG=ltsc2019 FROM $BUILD_OS:$BUILD_TAG +USER ContainerAdministrator RUN net user /add "EnvoyUser" RUN net localgroup "Network Configuration Operators" "EnvoyUser" /add RUN mkdir "C:\\Program\ Files\\envoy" -RUN setx path "%path%;c:\Program Files\envoy" +RUN setx /M path "%path%;c:\Program Files\envoy" ADD ["windows/amd64/envoy.exe", "C:/Program Files/envoy/"] RUN mkdir "C:\\ProgramData\\envoy" diff --git a/ci/README.md b/ci/README.md index 6841eecf2ea5d..0561facedf73c 100644 --- a/ci/README.md +++ b/ci/README.md @@ -135,8 +135,6 @@ The `./ci/run_envoy_docker.sh './ci/do_ci.sh '` targets are: * `bazel.clang_tidy ` — build and run clang-tidy specified source files, if no files specified, runs against the diff with the last GitHub commit. * `check_format`— run `clang-format` and `buildifier` on entire source tree. * `fix_format`— run and enforce `clang-format` and `buildifier` on entire source tree. -* `check_spelling`— run `misspell` on entire project. -* `fix_spelling`— run and enforce `misspell` on entire project. * `check_spelling_pedantic`— run `aspell` on C++ and proto comments. * `docs`— build documentation tree in `generated/docs`. diff --git a/ci/build_setup.sh b/ci/build_setup.sh index a44d0c14d4c37..cedbda30a9e86 100755 --- a/ci/build_setup.sh +++ b/ci/build_setup.sh @@ -64,8 +64,9 @@ then fi # Environment setup. -export TEST_TMPDIR=${BUILD_DIR}/tmp -export PATH=/opt/llvm/bin:${PATH} +export TEST_TMPDIR="${TEST_TMPDIR:-$BUILD_DIR/tmp}" +export LLVM_ROOT="${LLVM_ROOT:-/opt/llvm}" +export PATH=${LLVM_ROOT}/bin:${PATH} export CLANG_FORMAT="${CLANG_FORMAT:-clang-format}" if [[ -f "/etc/redhat-release" ]]; then @@ -81,7 +82,6 @@ function cleanup() { cleanup trap cleanup EXIT -export LLVM_ROOT="${LLVM_ROOT:-/opt/llvm}" "$(dirname "$0")"/../bazel/setup_clang.sh "${LLVM_ROOT}" [[ "${BUILD_REASON}" != "PullRequest" ]] && BAZEL_EXTRA_TEST_OPTIONS+=("--nocache_test_results") diff --git a/ci/check_and_fix_format.sh b/ci/check_and_fix_format.sh index 7d5fe0a54d188..04f4db457461f 100755 --- a/ci/check_and_fix_format.sh +++ b/ci/check_and_fix_format.sh @@ -13,8 +13,6 @@ export FORCE_PYTHON_FORMAT=yes function fix { set +e ci/do_ci.sh fix_format - ci/do_ci.sh fix_spelling - ci/do_ci.sh fix_spelling_pedantic echo "Format check failed, try apply following patch to fix:" git add api git diff HEAD | tee "${DIFF_OUTPUT}" @@ -26,6 +24,3 @@ function fix { trap fix ERR ci/do_ci.sh check_format -ci/do_ci.sh check_repositories -ci/do_ci.sh check_spelling -ci/do_ci.sh check_spelling_pedantic diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 690dbd511dd8f..32c4bc2b17b7e 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -6,9 +6,9 @@ set -e build_setup_args="" -if [[ "$1" == "format_pre" || "$1" == "fix_format" || "$1" == "check_format" || "$1" == "check_repositories" || \ - "$1" == "check_spelling" || "$1" == "fix_spelling" || "$1" == "bazel.clang_tidy" || "$1" == "tooling" || \ - "$1" == "check_spelling_pedantic" || "$1" == "fix_spelling_pedantic" ]]; then +if [[ "$1" == "format_pre" || "$1" == "fix_format" || "$1" == "check_format" || "$1" == "docs" || \ + "$1" == "bazel.clang_tidy" || "$1" == "tooling" || "$1" == "deps" || "$1" == "verify_examples" || \ + "$1" == "verify_build_examples" ]]; then build_setup_args="-nofetch" fi @@ -144,8 +144,12 @@ function bazel_binary_build() { } function run_process_test_result() { - echo "running flaky test reporting script" - "${ENVOY_SRCDIR}"/ci/flaky_test/run_process_xml.sh "$CI_TARGET" + if [[ $(find "$TEST_TMPDIR" -name "*_attempt.xml" 2> /dev/null) ]]; then + echo "running flaky test reporting script" + "${ENVOY_SRCDIR}"/ci/flaky_test/run_process_xml.sh "$CI_TARGET" + else + echo "no flaky test results found" + fi } function run_ci_verify () { @@ -407,37 +411,15 @@ elif [[ "$CI_TARGET" == "fix_format" ]]; then echo "fix_format..." "${ENVOY_SRCDIR}"/tools/code_format/check_format.py fix - BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS[*]}" "${ENVOY_SRCDIR}"/tools/proto_format/proto_format.sh fix --test + BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS[*]}" "${ENVOY_SRCDIR}"/tools/proto_format/proto_format.sh fix exit 0 elif [[ "$CI_TARGET" == "check_format" ]]; then # proto_format.sh needs to build protobuf. setup_clang_toolchain - echo "check_format_test..." - "${ENVOY_SRCDIR}"/tools/code_format/check_format_test_helper.sh --log=WARN echo "check_format..." "${ENVOY_SRCDIR}"/tools/code_format/check_format.py check - BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS[*]}" "${ENVOY_SRCDIR}"/tools/proto_format/proto_format.sh check --test - exit 0 -elif [[ "$CI_TARGET" == "check_repositories" ]]; then - echo "check_repositories..." - "${ENVOY_SRCDIR}"/tools/check_repositories.sh - exit 0 -elif [[ "$CI_TARGET" == "check_spelling" ]]; then - echo "check_spelling..." - "${ENVOY_SRCDIR}"/tools/spelling/check_spelling.sh check - exit 0 -elif [[ "$CI_TARGET" == "fix_spelling" ]];then - echo "fix_spell..." - "${ENVOY_SRCDIR}"/tools/spelling/check_spelling.sh fix - exit 0 -elif [[ "$CI_TARGET" == "check_spelling_pedantic" ]]; then - echo "check_spelling_pedantic..." - "${ENVOY_SRCDIR}"/tools/spelling/check_spelling_pedantic.py --mark check - exit 0 -elif [[ "$CI_TARGET" == "fix_spelling_pedantic" ]]; then - echo "fix_spelling_pedantic..." - "${ENVOY_SRCDIR}"/tools/spelling/check_spelling_pedantic.py fix + BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS[*]}" "${ENVOY_SRCDIR}"/tools/proto_format/proto_format.sh check exit 0 elif [[ "$CI_TARGET" == "docs" ]]; then echo "generating docs..." @@ -445,16 +427,14 @@ elif [[ "$CI_TARGET" == "docs" ]]; then BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS[*]}" "${ENVOY_SRCDIR}"/docs/build.sh exit 0 elif [[ "$CI_TARGET" == "deps" ]]; then + echo "verifying dependencies..." # Validate dependency relationships between core/extensions and external deps. - "${ENVOY_SRCDIR}"/tools/dependency/validate_test.py "${ENVOY_SRCDIR}"/tools/dependency/validate.py - # Validate the CVE scanner works. We do it here as well as in cve_scan, since this blocks - # presubmits, but cve_scan only runs async. - bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/dependency:cve_scan_test - # Validate repository metadata. + echo "check repositories..." + "${ENVOY_SRCDIR}"/tools/check_repositories.sh "${ENVOY_SRCDIR}"/ci/check_repository_locations.sh # Run pip requirements tests @@ -467,8 +447,30 @@ elif [[ "$CI_TARGET" == "cve_scan" ]]; then bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/dependency:cve_scan exit 0 elif [[ "$CI_TARGET" == "tooling" ]]; then + setup_clang_toolchain + + # TODO(phlax): move this to a bazel rule + echo "Run pytest tooling tests..." bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/testing:all_pytests -- --cov-html /source/generated/tooling "${ENVOY_SRCDIR}" + + echo "Run protoxform test" + BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS[*]}" ./tools/protoxform/protoxform_test.sh + + echo "Run merge active shadow test" + bazel test "${BAZEL_BUILD_OPTIONS[@]}" //tools/protoxform:merge_active_shadow_test + + echo "check_format_test..." + "${ENVOY_SRCDIR}"/tools/code_format/check_format_test_helper.sh --log=WARN + + echo "dependency validate_test..." + "${ENVOY_SRCDIR}"/tools/dependency/validate_test.py + + # Validate the CVE scanner works. We do it here as well as in cve_scan, since this blocks + # presubmits, but cve_scan only runs async. + echo "cve_scan_test..." + bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/dependency:cve_scan_test + exit 0 elif [[ "$CI_TARGET" == "verify_examples" ]]; then run_ci_verify "*" "wasm-cc|win32-front-proxy" diff --git a/ci/docker_ci.sh b/ci/docker_ci.sh index ff75b56518a24..5ccf2dbd0c614 100755 --- a/ci/docker_ci.sh +++ b/ci/docker_ci.sh @@ -78,7 +78,6 @@ build_images() { else IMAGE_TAG="${BUILD_TAG}-${ARCH/linux\//}" fi - IMAGES_TO_SAVE+=("${IMAGE_TAG}") # docker buildx load cannot have multiple platform, load individually if ! is_windows; then @@ -138,13 +137,25 @@ fi # Test the docker build in all cases, but use a local tag that we will overwrite before push in the # cases where we do push. for BUILD_TYPE in "${BUILD_TYPES[@]}"; do - build_images "${BUILD_TYPE}" "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${IMAGE_NAME}" + image_tag="${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${IMAGE_NAME}" + build_images "${BUILD_TYPE}" "$image_tag" + + if ! is_windows; then + if [[ "$BUILD_TYPE" == "" || "$BUILD_TYPE" == "-alpine" ]]; then + # verify_examples expects the base and alpine images, and for them to be named `-dev` + dev_image="envoyproxy/envoy${BUILD_TYPE}-dev:latest" + docker tag "$image_tag" "$dev_image" + IMAGES_TO_SAVE+=("$dev_image") + fi + fi done mkdir -p "${ENVOY_DOCKER_IMAGE_DIRECTORY}" -ENVOY_DOCKER_TAR="${ENVOY_DOCKER_IMAGE_DIRECTORY}/envoy-docker-images.tar.xz" -echo "Saving built images to ${ENVOY_DOCKER_TAR}." -docker save "${IMAGES_TO_SAVE[@]}" | xz -T0 -2 >"${ENVOY_DOCKER_TAR}" +if [[ ${#IMAGES_TO_SAVE[@]} -ne 0 ]]; then + ENVOY_DOCKER_TAR="${ENVOY_DOCKER_IMAGE_DIRECTORY}/envoy-docker-images.tar.xz" + echo "Saving built images to ${ENVOY_DOCKER_TAR}: ${IMAGES_TO_SAVE[*]}" + docker save "${IMAGES_TO_SAVE[@]}" | xz -T0 -2 >"${ENVOY_DOCKER_TAR}" +fi # Only push images for main builds, release branch builds, and tag builds. if [[ "${AZP_BRANCH}" != "${MAIN_BRANCH}" ]] && diff --git a/ci/flaky_test/process_xml.py b/ci/flaky_test/process_xml.py index 9cbfa56bfbb7e..9eae5129275c8 100755 --- a/ci/flaky_test/process_xml.py +++ b/ci/flaky_test/process_xml.py @@ -4,6 +4,7 @@ import os import xml.etree.ElementTree as ET import slack +from slack.errors import SlackApiError import sys import ssl @@ -273,8 +274,12 @@ def get_git_info(CI_TARGET): ssl_context.verify_mode = ssl.CERT_NONE # Due to a weird interaction between `websocket-client` and Slack client # we need to set the ssl context. See `slackapi/python-slack-sdk/issues/334` - client = slack.WebClient(token=SLACKTOKEN, ssl=ssl_context) - client.chat_postMessage(channel='test-flaky', text=output_msg, as_user="true") + try: + client = slack.WebClient(token=SLACKTOKEN, ssl=ssl_context) + client.chat_postMessage(channel='test-flaky', text=output_msg, as_user="true") + except SlackApiError as e: + print("Call to SlackApi failed:", e.response["error"]) + print(output_msg) else: print(output_msg) else: diff --git a/ci/format_pre.sh b/ci/format_pre.sh index e5e928d499115..831e57ca4a298 100755 --- a/ci/format_pre.sh +++ b/ci/format_pre.sh @@ -58,6 +58,13 @@ bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/code_format:python_check -- --diff CURRENT=extensions bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/extensions:extensions_check +CURRENT=spelling +"${ENVOY_SRCDIR}"/tools/spelling/check_spelling_pedantic.py --mark check + +CURRENT=rst +# TODO(phlax): Move this to general docs checking of all rst files +bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/docs:rst_check + if [[ "${#FAILED[@]}" -ne "0" ]]; then echo "${BASH_ERR_PREFIX}TESTS FAILED:" >&2 for failed in "${FAILED[@]}"; do diff --git a/ci/run_envoy_docker.sh b/ci/run_envoy_docker.sh index 9a06ff233acb4..30905c21ac98e 100755 --- a/ci/run_envoy_docker.sh +++ b/ci/run_envoy_docker.sh @@ -61,6 +61,8 @@ mkdir -p "${ENVOY_DOCKER_BUILD_DIR}" export ENVOY_BUILD_IMAGE="${IMAGE_NAME}:${IMAGE_ID}" +time docker pull "${ENVOY_BUILD_IMAGE}" + # Since we specify an explicit hash, docker-run will pull from the remote repo if missing. docker run --rm \ "${ENVOY_DOCKER_OPTIONS[@]}" \ diff --git a/ci/verify_examples.sh b/ci/verify_examples.sh index 8ec2b0701d3e4..8b6d6f719a662 100755 --- a/ci/verify_examples.sh +++ b/ci/verify_examples.sh @@ -30,7 +30,10 @@ run_examples () { local examples example cd "${SRCDIR}/examples" || exit 1 - examples=$(find . -mindepth 1 -maxdepth 1 -type d -name "$TESTFILTER" ! -iname "_*" | grep -Ev "$TESTEXCLUDES" | sort) + examples=$(find . -mindepth 1 -maxdepth 1 -type d -name "$TESTFILTER" ! -iname "_*" | sort) + if [[ -n "$TESTEXCLUDES" ]]; then + examples=$(echo "$examples" | grep -Ev "$TESTEXCLUDES") + fi for example in $examples; do pushd "$example" > /dev/null || return 1 ./verify.sh diff --git a/configs/envoy_double_proxy.template.yaml b/configs/envoy_double_proxy.template.yaml index 223a0d5ccf86b..7e922bfd7542a 100644 --- a/configs/envoy_double_proxy.template.yaml +++ b/configs/envoy_double_proxy.template.yaml @@ -60,7 +60,6 @@ "@type": type.googleapis.com/envoy.extensions.filters.http.buffer.v3.Buffer max_request_bytes: 5242880 - name: envoy.filters.http.router - typed_config: {} {% if tracing %} tracing: provider: diff --git a/configs/envoy_front_proxy.template.yaml b/configs/envoy_front_proxy.template.yaml index 48fcec3c0695a..22f6fc2de3b30 100644 --- a/configs/envoy_front_proxy.template.yaml +++ b/configs/envoy_front_proxy.template.yaml @@ -70,7 +70,6 @@ envoy_grpc: cluster_name: ratelimit - name: envoy.filters.http.router - typed_config: {} add_user_agent: true {% if tracing %} tracing: diff --git a/configs/envoy_service_to_service.template.yaml b/configs/envoy_service_to_service.template.yaml index 8d18c862debfc..f55fe4dd0b556 100644 --- a/configs/envoy_service_to_service.template.yaml +++ b/configs/envoy_service_to_service.template.yaml @@ -46,7 +46,6 @@ "@type": type.googleapis.com/envoy.extensions.filters.http.buffer.v3.Buffer max_request_bytes: 5242880 - name: envoy.filters.http.router - typed_config: {} access_log: - name: envoy.access_loggers.file filter: @@ -164,10 +163,7 @@ static_resources: envoy_grpc: cluster_name: ratelimit - name: envoy.filters.http.grpc_http1_bridge - typed_config: {} - name: envoy.filters.http.router - typed_config: {} - - address: socket_address: protocol: TCP @@ -228,9 +224,7 @@ static_resources: envoy_grpc: cluster_name: ratelimit - name: envoy.filters.http.grpc_http1_bridge - typed_config: {} - name: envoy.filters.http.router - typed_config: {} {% if external_virtual_hosts|length > 0 or mongos_servers|length > 0 %}{% endif -%} {% for mapping in external_virtual_hosts -%} - name: "{{ mapping['address']}}" @@ -269,10 +263,8 @@ static_resources: http_filters: {% if mapping['name'] in ['dynamodb_iad', 'dynamodb_legacy'] -%} - name: envoy.filters.http.dynamo - typed_config: {} {% endif -%} - name: envoy.filters.http.router - typed_config: {} access_log: - name: envoy.access_loggers.file filter: diff --git a/configs/original-dst-cluster/proxy_config.yaml b/configs/original-dst-cluster/proxy_config.yaml index 1f2ec2128240f..ab57428744ef2 100644 --- a/configs/original-dst-cluster/proxy_config.yaml +++ b/configs/original-dst-cluster/proxy_config.yaml @@ -24,11 +24,9 @@ static_resources: cluster: cluster1 http_filters: - name: envoy.filters.http.router - typed_config: {} codec_type: AUTO listener_filters: - name: envoy.filters.listener.original_dst - typed_config: {} clusters: - name: cluster1 type: ORIGINAL_DST diff --git a/docs/BUILD b/docs/BUILD index e4530086e02db..e2a0c33e0114f 100644 --- a/docs/BUILD +++ b/docs/BUILD @@ -2,6 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_package", ) +load("@rules_pkg//:pkg.bzl", "pkg_tar") licenses(["notice"]) # Apache 2 @@ -10,6 +11,7 @@ exports_files([ "v2_mapping.json", "empty_extensions.json", "redirects.txt", + "VERSION", ]) envoy_package() @@ -41,21 +43,6 @@ filegroup( srcs = glob(["root/**/*.pb"]), ) -filegroup( - name = "base_rst_files", - srcs = glob( - [ - "conf.py", - "_ext/*", - ], - ), -) - -filegroup( - name = "root_rst_files", - srcs = glob(["root/**/*"]), -) - genrule( name = "v2_redirects", outs = ["v2_redirects.txt"], @@ -82,39 +69,48 @@ genrule( ], ) -genrule( - name = "google_vrp_config", - outs = ["google_vrp_config.tar"], - cmd = """ - tar cfh $@ --transform='s/configs/configuration\\/best_practices/' \\ - $(location //configs:google-vrp/envoy-edge.yaml) - """, - tools = ["//configs:google-vrp/envoy-edge.yaml"], +pkg_tar( + name = "sphinx_base", + srcs = glob( + [ + "conf.py", + "_ext/*", + ], + ) + [":redirects"], + extension = "tar", + strip_prefix = "/docs/", ) -genrule( +pkg_tar( + name = "sphinx_root", + srcs = glob(["root/**/*"]), + extension = "tar", + strip_prefix = "/docs/root", +) + +pkg_tar( name = "base_rst", - outs = ["base_rst.tar"], - cmd = """ - tar cfh $@ --transform='s/docs\\///' $(locations base_rst_files) \\ - && tar rhf $@ --transform='s/docs\\/root\\///' $(locations root_rst_files) \\ - && tar rf $@ -C $$(dirname $(location redirects)) $$(basename $(location redirects)) - """, - tools = [ - ":base_rst_files", - ":redirects", - ":root_rst_files", + extension = "tar", + deps = [ + ":sphinx_base.tar", + ":sphinx_root.tar", ], ) -genrule( +pkg_tar( + name = "google_vrp_config", + srcs = ["//configs:google-vrp/envoy-edge.yaml"], + extension = "tar", + package_dir = "/best_practices", + strip_prefix = "/configs/configuration", +) + +pkg_tar( name = "examples_rst", - outs = ["examples_rst.tar"], - cmd = """ - tar cfh $@ --transform='s/examples/start\\/sandboxes\\/_include/' \\ - $(locations //examples:files) - """, - tools = ["//examples:files"], + srcs = ["//examples:files"], + extension = "tar", + package_dir = "/start/sandboxes/_include", + strip_prefix = "/examples", ) genrule( @@ -171,23 +167,37 @@ genrule( tools = ["//tools/docs:generate_api_rst"], ) -genrule( +pkg_tar( name = "rst", - outs = ["rst.tar"], - cmd = """ - cat $(location base_rst) > $@ \\ - && tar -Af $@ $(location api_rst) \\ - && tar -Af $@ $(location examples_rst) \\ - && tar -Af $@ $(location extensions_security_rst) \\ - && tar -Af $@ $(location external_deps_rst) \\ - && tar -Af $@ $(location empty_protos_rst) - """, - tools = [ + extension = "tar", + deps = [ ":api_rst", - ":base_rst", + ":base_rst.tar", ":empty_protos_rst", - ":examples_rst", + ":examples_rst.tar", ":extensions_security_rst", ":external_deps_rst", ], ) + +genrule( + name = "html", + outs = ["html.tar"], + cmd = """ + $(location //tools/docs:sphinx_runner) \\ + --build_sha="$${BUILD_SHA:-}" \\ + --docs_tag="$${DOCS_TAG:-}" \\ + --version_file=$(location //:VERSION) \\ + --validator_path=$(location //tools/config_validation:validate_fragment) \\ + --descriptor_path=$(location //tools/type_whisperer:all_protos_with_ext_pb_text.pb_text) \\ + $(location rst) \\ + $@ + """, + exec_tools = [ + "//tools/docs:sphinx_runner", + ":rst", + "//tools/config_validation:validate_fragment", + "//tools/type_whisperer:all_protos_with_ext_pb_text.pb_text", + "//:VERSION", + ], +) diff --git a/docs/build.sh b/docs/build.sh index 76c7e9923bd19..e9ccb53a0d1cd 100755 --- a/docs/build.sh +++ b/docs/build.sh @@ -3,8 +3,6 @@ # set SPHINX_SKIP_CONFIG_VALIDATION environment variable to true to skip # validation of configuration examples -. tools/shell_utils.sh - set -e if [[ ! $(command -v bazel) ]]; then @@ -21,83 +19,25 @@ fi RELEASE_TAG_REGEX="^refs/tags/v.*" if [[ "${AZP_BRANCH}" =~ ${RELEASE_TAG_REGEX} ]]; then - DOCS_TAG="${AZP_BRANCH/refs\/tags\//}" -fi - -# We need to set ENVOY_DOCS_VERSION_STRING and ENVOY_DOCS_RELEASE_LEVEL for Sphinx. -# We also validate that the tag and version match at this point if needed. -VERSION_NUMBER=$(cat VERSION) -export DOCKER_IMAGE_TAG_NAME -DOCKER_IMAGE_TAG_NAME=$(echo "$VERSION_NUMBER" | sed -E 's/([0-9]+\.[0-9]+)\.[0-9]+.*/v\1-latest/') -if [[ -n "${DOCS_TAG}" ]]; then - # Check the git tag matches the version number in the VERSION file. - if [[ "v${VERSION_NUMBER}" != "${DOCS_TAG}" ]]; then - echo "Given git tag does not match the VERSION file content:" - echo "${DOCS_TAG} vs $(cat VERSION)" - exit 1 - fi - # Check the version_history.rst contains current release version. - grep --fixed-strings "$VERSION_NUMBER" docs/root/version_history/current.rst \ - || (echo "Git tag not found in version_history/current.rst" && exit 1) - - # Now that we know there is a match, we can use the tag. - export ENVOY_DOCS_VERSION_STRING="tag-${DOCS_TAG}" - export ENVOY_DOCS_RELEASE_LEVEL=tagged - export ENVOY_BLOB_SHA="${DOCS_TAG}" + DOCS_TAG="${AZP_BRANCH/refs\/tags\//}" + export DOCS_TAG else - BUILD_SHA=$(git rev-parse HEAD) - export ENVOY_DOCS_VERSION_STRING="${VERSION_NUMBER}"-"${BUILD_SHA:0:6}" - export ENVOY_DOCS_RELEASE_LEVEL=pre-release - export ENVOY_BLOB_SHA="$BUILD_SHA" + BUILD_SHA=$(git rev-parse HEAD) + export BUILD_SHA fi -SCRIPT_DIR="$(dirname "$0")" -BUILD_DIR=build_docs -[[ -z "${DOCS_OUTPUT_DIR}" ]] && DOCS_OUTPUT_DIR=generated/docs -[[ -z "${GENERATED_RST_DIR}" ]] && GENERATED_RST_DIR=generated/rst - -rm -rf "${DOCS_OUTPUT_DIR}" -mkdir -p "${DOCS_OUTPUT_DIR}" - -rm -rf "${GENERATED_RST_DIR}" -mkdir -p "${GENERATED_RST_DIR}" - -source_venv "$BUILD_DIR" -pip3 install --require-hashes -r "${SCRIPT_DIR}"/requirements.txt - -# Clean up any stale files in the API tree output. Bazel remembers valid cached -# files still. -rm -rf bazel-bin/external/envoy_api_canonical - -GENERATED_RST_DIR="$(realpath "${GENERATED_RST_DIR}")" -export GENERATED_RST_DIR - # This is for local RBE setup, should be no-op for builds without RBE setting in bazelrc files. IFS=" " read -ra BAZEL_BUILD_OPTIONS <<< "${BAZEL_BUILD_OPTIONS:-}" BAZEL_BUILD_OPTIONS+=( "--remote_download_outputs=all" "--strategy=protodoc=sandboxed,local" - "--action_env=ENVOY_BLOB_SHA") - -bazel build "${BAZEL_BUILD_OPTIONS[@]}" //docs:rst - -# TODO(phlax): once all of above jobs are moved to bazel build genrules these can be done as part of the sphinx build -tar -xf bazel-bin/docs/rst.tar -C "${GENERATED_RST_DIR}" + "--action_env=DOCS_TAG" + "--action_env=BUILD_SHA" + "--action_env=SPHINX_SKIP_CONFIG_VALIDATION") -# TODO(phlax): these will move to python -ENVOY_DOCS_BUILD_CONFIG="${GENERATED_RST_DIR}/build.yaml" -{ - echo "blob_sha: ${ENVOY_BLOB_SHA}" - echo "release_level: ${ENVOY_DOCS_RELEASE_LEVEL}" - echo "version_string: ${ENVOY_DOCS_VERSION_STRING}" - echo "docker_image_tag_name: ${DOCKER_IMAGE_TAG_NAME}" -} > "$ENVOY_DOCS_BUILD_CONFIG" -if [[ -n "$SPHINX_SKIP_CONFIG_VALIDATION" ]]; then - echo "skip_validation: true" >> "$ENVOY_DOCS_BUILD_CONFIG" -fi -export ENVOY_DOCS_BUILD_CONFIG - -# To speed up validate_fragment invocations in validating_code_block -bazel build "${BAZEL_BUILD_OPTIONS[@]}" //tools/config_validation:validate_fragment +bazel build "${BAZEL_BUILD_OPTIONS[@]}" //docs:html -sphinx-build -W --keep-going -b html "${GENERATED_RST_DIR}" "${DOCS_OUTPUT_DIR}" +[[ -z "${DOCS_OUTPUT_DIR}" ]] && DOCS_OUTPUT_DIR=generated/docs +rm -rf "${DOCS_OUTPUT_DIR}" +mkdir -p "${DOCS_OUTPUT_DIR}" +tar -xf bazel-bin/docs/html.tar -C "$DOCS_OUTPUT_DIR" diff --git a/docs/requirements.txt b/docs/requirements.txt deleted file mode 100644 index 50edcc50a932d..0000000000000 --- a/docs/requirements.txt +++ /dev/null @@ -1,261 +0,0 @@ -# -# This file is autogenerated by pip-compile -# To update, run: -# -# pip-compile --generate-hashes docs/requirements.txt -# -alabaster==0.7.12 \ - --hash=sha256:446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359 \ - --hash=sha256:a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02 - # via - # -r docs/requirements.txt - # sphinx -babel==2.9.1 \ - --hash=sha256:ab49e12b91d937cd11f0b67cb259a57ab4ad2b59ac7a3b41d6c06c0ac5b0def9 \ - --hash=sha256:bc0c176f9f6a994582230df350aa6e05ba2ebe4b3ac317eab29d9be5d2768da0 - # via - # -r docs/requirements.txt - # sphinx -certifi==2021.5.30 \ - --hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 \ - --hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee - # via - # -r docs/requirements.txt - # requests -chardet==4.0.0 \ - --hash=sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa \ - --hash=sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5 - # via - # -r docs/requirements.txt - # requests -docutils==0.16 \ - --hash=sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af \ - --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc - # via - # -r docs/requirements.txt - # sphinx - # sphinx-rtd-theme - # sphinx-tabs -gitdb==4.0.7 \ - --hash=sha256:6c4cc71933456991da20917998acbe6cf4fb41eeaab7d6d67fbc05ecd4c865b0 \ - --hash=sha256:96bf5c08b157a666fec41129e6d327235284cca4c81e92109260f353ba138005 - # via - # -r docs/requirements.txt - # gitpython -gitpython==3.1.17 \ - --hash=sha256:29fe82050709760081f588dd50ce83504feddbebdc4da6956d02351552b1c135 \ - --hash=sha256:ee24bdc93dce357630764db659edaf6b8d664d4ff5447ccfeedd2dc5c253f41e - # via -r docs/requirements.txt -idna==2.10 \ - --hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 \ - --hash=sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0 - # via - # -r docs/requirements.txt - # requests -imagesize==1.2.0 \ - --hash=sha256:6965f19a6a2039c7d48bca7dba2473069ff854c36ae6f19d2cde309d998228a1 \ - --hash=sha256:b1f6b5a4eab1f73479a50fb79fcf729514a900c341d8503d62a62dbc4127a2b1 - # via - # -r docs/requirements.txt - # sphinx -jinja2==3.0.1 \ - --hash=sha256:1f06f2da51e7b56b8f238affdd6b4e2c61e39598a378cc49345bc1bd42a978a4 \ - --hash=sha256:703f484b47a6af502e743c9122595cc812b0271f661722403114f71a79d0f5a4 - # via - # -r docs/requirements.txt - # sphinx -markupsafe==2.0.1 \ - --hash=sha256:f9081981fe268bd86831e5c75f7de206ef275defcb82bc70740ae6dc507aee51 \ - --hash=sha256:0955295dd5eec6cb6cc2fe1698f4c6d84af2e92de33fbcac4111913cd100a6ff \ - --hash=sha256:0446679737af14f45767963a1a9ef7620189912317d095f2d9ffa183a4d25d2b \ - --hash=sha256:f826e31d18b516f653fe296d967d700fddad5901ae07c622bb3705955e1faa94 \ - --hash=sha256:fa130dd50c57d53368c9d59395cb5526eda596d3ffe36666cd81a44d56e48872 \ - --hash=sha256:905fec760bd2fa1388bb5b489ee8ee5f7291d692638ea5f67982d968366bef9f \ - --hash=sha256:6c4ca60fa24e85fe25b912b01e62cb969d69a23a5d5867682dd3e80b5b02581d \ - --hash=sha256:b2f4bf27480f5e5e8ce285a8c8fd176c0b03e93dcc6646477d4630e83440c6a9 \ - --hash=sha256:0717a7390a68be14b8c793ba258e075c6f4ca819f15edfc2a3a027c823718567 \ - --hash=sha256:6557b31b5e2c9ddf0de32a691f2312a32f77cd7681d8af66c2692efdbef84c18 \ - --hash=sha256:49e3ceeabbfb9d66c3aef5af3a60cc43b85c33df25ce03d0031a608b0a8b2e3f \ - --hash=sha256:d7f9850398e85aba693bb640262d3611788b1f29a79f0c93c565694658f4071f \ - --hash=sha256:6a7fae0dd14cf60ad5ff42baa2e95727c3d81ded453457771d02b7d2b3f9c0c2 \ - --hash=sha256:b7f2d075102dc8c794cbde1947378051c4e5180d52d276987b8d28a3bd58c17d \ - --hash=sha256:a30e67a65b53ea0a5e62fe23682cfe22712e01f453b95233b25502f7c61cb415 \ - --hash=sha256:611d1ad9a4288cf3e3c16014564df047fe08410e628f89805e475368bd304914 \ - --hash=sha256:be98f628055368795d818ebf93da628541e10b75b41c559fdf36d104c5787066 \ - --hash=sha256:1d609f577dc6e1aa17d746f8bd3c31aa4d258f4070d61b2aa5c4166c1539de35 \ - --hash=sha256:7d91275b0245b1da4d4cfa07e0faedd5b0812efc15b702576d103293e252af1b \ - --hash=sha256:01a9b8ea66f1658938f65b93a85ebe8bc016e6769611be228d797c9d998dd298 \ - --hash=sha256:47ab1e7b91c098ab893b828deafa1203de86d0bc6ab587b160f78fe6c4011f75 \ - --hash=sha256:97383d78eb34da7e1fa37dd273c20ad4320929af65d156e35a5e2d89566d9dfb \ - --hash=sha256:023cb26ec21ece8dc3907c0e8320058b2e0cb3c55cf9564da612bc325bed5e64 \ - --hash=sha256:984d76483eb32f1bcb536dc27e4ad56bba4baa70be32fa87152832cdd9db0833 \ - --hash=sha256:2ef54abee730b502252bcdf31b10dacb0a416229b72c18b19e24a4509f273d26 \ - --hash=sha256:3c112550557578c26af18a1ccc9e090bfe03832ae994343cfdacd287db6a6ae7 \ - --hash=sha256:53edb4da6925ad13c07b6d26c2a852bd81e364f95301c66e930ab2aef5b5ddd8 \ - --hash=sha256:f5653a225f31e113b152e56f154ccbe59eeb1c7487b39b9d9f9cdb58e6c79dc5 \ - --hash=sha256:4efca8f86c54b22348a5467704e3fec767b2db12fc39c6d963168ab1d3fc9135 \ - --hash=sha256:ab3ef638ace319fa26553db0624c4699e31a28bb2a835c5faca8f8acf6a5a902 \ - --hash=sha256:f8ba0e8349a38d3001fae7eadded3f6606f0da5d748ee53cc1dab1d6527b9509 \ - --hash=sha256:10f82115e21dc0dfec9ab5c0223652f7197feb168c940f3ef61563fc2d6beb74 \ - --hash=sha256:693ce3f9e70a6cf7d2fb9e6c9d8b204b6b39897a2c4a1aa65728d5ac97dcc1d8 \ - --hash=sha256:594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a - # via - # -r docs/requirements.txt - # jinja2 - # sphinx -packaging==20.9 \ - --hash=sha256:5b327ac1320dc863dca72f4514ecc086f31186744b84a230374cc1fd776feae5 \ - --hash=sha256:67714da7f7bc052e064859c05c595155bd1ee9f69f76557e21f051443c20947a - # via - # -r docs/requirements.txt - # sphinx -pygments==2.9.0 \ - --hash=sha256:a18f47b506a429f6f4b9df81bb02beab9ca21d0a5fee38ed15aef65f0545519f \ - --hash=sha256:d66e804411278594d764fc69ec36ec13d9ae9147193a1740cd34d272ca383b8e - # via - # -r docs/requirements.txt - # sphinx - # sphinx-tabs -pyparsing==2.4.7 \ - --hash=sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1 \ - --hash=sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b - # via - # -r docs/requirements.txt - # packaging -pytz==2021.1 \ - --hash=sha256:83a4a90894bf38e243cf052c8b58f381bfe9a7a483f6a9cab140bc7f702ac4da \ - --hash=sha256:eb10ce3e7736052ed3623d49975ce333bcd712c7bb19a58b9e2089d4057d0798 - # via - # -r docs/requirements.txt - # babel -pyyaml==5.4.1 \ - --hash=sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf \ - --hash=sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696 \ - --hash=sha256:129def1b7c1bf22faffd67b8f3724645203b79d8f4cc81f674654d9902cb4393 \ - --hash=sha256:294db365efa064d00b8d1ef65d8ea2c3426ac366c0c4368d930bf1c5fb497f77 \ - --hash=sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922 \ - --hash=sha256:3bd0e463264cf257d1ffd2e40223b197271046d09dadf73a0fe82b9c1fc385a5 \ - --hash=sha256:4465124ef1b18d9ace298060f4eccc64b0850899ac4ac53294547536533800c8 \ - --hash=sha256:49d4cdd9065b9b6e206d0595fee27a96b5dd22618e7520c33204a4a3239d5b10 \ - --hash=sha256:4e0583d24c881e14342eaf4ec5fbc97f934b999a6828693a99157fde912540cc \ - --hash=sha256:5accb17103e43963b80e6f837831f38d314a0495500067cb25afab2e8d7a4018 \ - --hash=sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e \ - --hash=sha256:6c78645d400265a062508ae399b60b8c167bf003db364ecb26dcab2bda048253 \ - --hash=sha256:72a01f726a9c7851ca9bfad6fd09ca4e090a023c00945ea05ba1638c09dc3347 \ - --hash=sha256:74c1485f7707cf707a7aef42ef6322b8f97921bd89be2ab6317fd782c2d53183 \ - --hash=sha256:895f61ef02e8fed38159bb70f7e100e00f471eae2bc838cd0f4ebb21e28f8541 \ - --hash=sha256:8c1be557ee92a20f184922c7b6424e8ab6691788e6d86137c5d93c1a6ec1b8fb \ - --hash=sha256:bb4191dfc9306777bc594117aee052446b3fa88737cd13b7188d0e7aa8162185 \ - --hash=sha256:bfb51918d4ff3d77c1c856a9699f8492c612cde32fd3bcd344af9be34999bfdc \ - --hash=sha256:c20cfa2d49991c8b4147af39859b167664f2ad4561704ee74c1de03318e898db \ - --hash=sha256:cb333c16912324fd5f769fff6bc5de372e9e7a202247b48870bc251ed40239aa \ - --hash=sha256:d2d9808ea7b4af864f35ea216be506ecec180628aced0704e34aca0b040ffe46 \ - --hash=sha256:d483ad4e639292c90170eb6f7783ad19490e7a8defb3e46f97dfe4bacae89122 \ - --hash=sha256:dd5de0646207f053eb0d6c74ae45ba98c3395a571a2891858e87df7c9b9bd51b \ - --hash=sha256:e1d4970ea66be07ae37a3c2e48b5ec63f7ba6804bdddfdbd3cfd954d25a82e63 \ - --hash=sha256:e4fac90784481d221a8e4b1162afa7c47ed953be40d31ab4629ae917510051df \ - --hash=sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc \ - --hash=sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247 \ - --hash=sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6 \ - --hash=sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0 - # via -r docs/requirements.txt -requests==2.25.1 \ - --hash=sha256:27973dd4a904a4f13b263a19c866c13b92a39ed1c964655f025f3f8d3d75b804 \ - --hash=sha256:c210084e36a42ae6b9219e00e48287def368a26d03a048ddad7bfee44f75871e - # via - # -r docs/requirements.txt - # sphinx -six==1.16.0 \ - --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ - --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 - # via - # -r docs/requirements.txt - # sphinxcontrib-httpdomain -smmap==4.0.0 \ - --hash=sha256:7e65386bd122d45405ddf795637b7f7d2b532e7e401d46bbe3fb49b9986d5182 \ - --hash=sha256:a9a7479e4c572e2e775c404dcd3080c8dc49f39918c2cf74913d30c4c478e3c2 - # via - # -r docs/requirements.txt - # gitdb -snowballstemmer==2.1.0 \ - --hash=sha256:b51b447bea85f9968c13b650126a888aabd4cb4463fca868ec596826325dedc2 \ - --hash=sha256:e997baa4f2e9139951b6f4c631bad912dfd3c792467e2f03d7239464af90e914 - # via - # -r docs/requirements.txt - # sphinx -sphinx-copybutton==0.3.1 \ - --hash=sha256:0e0461df394515284e3907e3f418a0c60ef6ab6c9a27a800c8552772d0a402a2 \ - --hash=sha256:5125c718e763596e6e52d92e15ee0d6f4800ad3817939be6dee51218870b3e3d - # via -r docs/requirements.txt -sphinx-rtd-theme==0.5.2 \ - --hash=sha256:32bd3b5d13dc8186d7a42fc816a23d32e83a4827d7d9882948e7b837c232da5a \ - --hash=sha256:4a05bdbe8b1446d77a01e20a23ebc6777c74f43237035e76be89699308987d6f - # via -r docs/requirements.txt -sphinx-tabs==3.0.0 \ - --hash=sha256:2abbcaaa3b8a857de06f3db31762a7bdd17aba1b8979d000f193debe6f917c2c \ - --hash=sha256:3f766762fffacc99828cb877a9e4cb8ac0ba3582f2a054ea68248e5e026e5612 - # via -r docs/requirements.txt -sphinx==4.0.2 \ - --hash=sha256:d1cb10bee9c4231f1700ec2e24a91be3f3a3aba066ea4ca9f3bbe47e59d5a1d4 \ - --hash=sha256:b5c2ae4120bf00c799ba9b3699bc895816d272d120080fbc967292f29b52b48c - # via - # -r docs/requirements.txt - # sphinx-copybutton - # sphinx-rtd-theme - # sphinx-tabs - # sphinxcontrib-httpdomain - # sphinxext-rediraffe -sphinxcontrib-applehelp==1.0.2 \ - --hash=sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a \ - --hash=sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58 - # via - # -r docs/requirements.txt - # sphinx -sphinxcontrib-devhelp==1.0.2 \ - --hash=sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e \ - --hash=sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4 - # via - # -r docs/requirements.txt - # sphinx -sphinxcontrib-htmlhelp==2.0.0 \ - --hash=sha256:d412243dfb797ae3ec2b59eca0e52dac12e75a241bf0e4eb861e450d06c6ed07 \ - --hash=sha256:f5f8bb2d0d629f398bf47d0d69c07bc13b65f75a81ad9e2f71a63d4b7a2f6db2 - # via - # -r docs/requirements.txt - # sphinx -sphinxcontrib-httpdomain==1.7.0 \ - --hash=sha256:1fb5375007d70bf180cdd1c79e741082be7aa2d37ba99efe561e1c2e3f38191e \ - --hash=sha256:ac40b4fba58c76b073b03931c7b8ead611066a6aebccafb34dc19694f4eb6335 - # via -r docs/requirements.txt -sphinxcontrib-jsmath==1.0.1 \ - --hash=sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178 \ - --hash=sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8 - # via - # -r docs/requirements.txt - # sphinx -sphinxcontrib-qthelp==1.0.3 \ - --hash=sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72 \ - --hash=sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6 - # via - # -r docs/requirements.txt - # sphinx -sphinxcontrib-serializinghtml==1.1.5 \ - --hash=sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd \ - --hash=sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952 - # via - # -r docs/requirements.txt - # sphinx -sphinxext-rediraffe==0.2.7 \ - --hash=sha256:651dcbfae5ffda9ffd534dfb8025f36120e5efb6ea1a33f5420023862b9f725d \ - --hash=sha256:9e430a52d4403847f4ffb3a8dd6dfc34a9fe43525305131f52ed899743a5fd8c - # via -r docs/requirements.txt -urllib3==1.26.5 \ - --hash=sha256:753a0374df26658f99d826cfe40394a686d05985786d946fbe4165b5148f5a7c \ - --hash=sha256:a7acd0977125325f516bda9735fa7142b909a8d01e8b2e4c8108d0984e6e0098 - # via - # -r docs/requirements.txt - # requests - -# WARNING: The following packages were not pinned, but pip requires them to be -# pinned when the requirements file includes hashes. Consider using the --allow-unsafe flag. -# setuptools diff --git a/docs/root/api-v3/common_messages/common_messages.rst b/docs/root/api-v3/common_messages/common_messages.rst index 2826b6c67ecdb..ea123c074ca1e 100644 --- a/docs/root/api-v3/common_messages/common_messages.rst +++ b/docs/root/api-v3/common_messages/common_messages.rst @@ -27,4 +27,5 @@ Common messages ../extensions/filters/common/dependency/v3/dependency.proto ../extensions/filters/common/matcher/action/v3/skip_action.proto ../extensions/matching/input_matchers/consistent_hashing/v3/consistent_hashing.proto + ../extensions/matching/input_matchers/ip/v3/ip.proto ../extensions/matching/common_inputs/environment_variable/v3/input.proto diff --git a/docs/root/configuration/advanced/well_known_dynamic_metadata.rst b/docs/root/configuration/advanced/well_known_dynamic_metadata.rst index f6b596441eab1..b051545ea9253 100644 --- a/docs/root/configuration/advanced/well_known_dynamic_metadata.rst +++ b/docs/root/configuration/advanced/well_known_dynamic_metadata.rst @@ -4,8 +4,8 @@ Well Known Dynamic Metadata =========================== Filters can emit dynamic metadata via the *setDynamicMetadata* routine in the -:repo:`StreamInfo ` interface on a -:repo:`Connection `. This metadata emitted by a filter can be +:repo:`StreamInfo ` interface on a +:repo:`Connection `. This metadata emitted by a filter can be consumed by other filters and useful features can be built by stacking such filters. For example, a logging filter can consume dynamic metadata from an RBAC filter to log details about runtime shadow rule behavior. Another example is where an RBAC filter permits/restricts MySQL/MongoDB operations @@ -15,6 +15,8 @@ The following Envoy filters emit dynamic metadata that other filters can leverag * :ref:`External Authorization Filter ` * :ref:`External Authorization Network Filter ` +* :ref:`Header-To-Metadata Filter ` +* :ref:`JWT Authentication Filter ` * :ref:`Mongo Proxy Filter ` * :ref:`MySQL Proxy Filter ` * :ref:`Postgres Proxy Filter ` diff --git a/docs/root/configuration/best_practices/_include/edge.yaml b/docs/root/configuration/best_practices/_include/edge.yaml index 29f56fbac089c..7bb8280965356 100644 --- a/docs/root/configuration/best_practices/_include/edge.yaml +++ b/docs/root/configuration/best_practices/_include/edge.yaml @@ -32,7 +32,6 @@ static_resources: port_value: 443 listener_filters: - name: "envoy.filters.listener.tls_inspector" - typed_config: {} # Uncomment if Envoy is behind a load balancer that exposes client IP address using the PROXY protocol. # - name: envoy.filters.listener.proxy_protocol # typed_config: diff --git a/docs/root/configuration/http/http_conn_man/stats.rst b/docs/root/configuration/http/http_conn_man/stats.rst index d3c4b049d17bc..8df5877557b62 100644 --- a/docs/root/configuration/http/http_conn_man/stats.rst +++ b/docs/root/configuration/http/http_conn_man/stats.rst @@ -195,6 +195,12 @@ On the upstream side all http3 statistics are rooted at *cluster..http3.* rx_reset, Counter, Total number of reset stream frames received by Envoy tx_reset, Counter, Total number of reset stream frames transmitted by Envoy metadata_not_supported_error, Counter, Total number of metadata dropped during HTTP/3 encoding + quic_version_43, Counter, Total number of quic connections that use transport version 43. This is expected to be removed when this version is deprecated. + quic_version_46, Counter, Total number of quic connections that use transport version 46. This is expected to be removed when this version is deprecated. + quic_version_50, Counter, Total number of quic connections that use transport version 50. This is expected to be removed when this version is deprecated. + quic_version_51, Counter, Total number of quic connections that use transport version 51. This is expected to be removed when this version is deprecated. + quic_version_h3_29, Counter, Total number of quic connections that use transport version h3-29. This is expected to be removed when this version is deprecated. + quic_version_rfc_v1, Counter, Total number of quic connections that use transport version rfc-v1. Tracing statistics diff --git a/docs/root/configuration/http/http_filters/_include/bandwidth-limit-filter.yaml b/docs/root/configuration/http/http_filters/_include/bandwidth-limit-filter.yaml index 76ab6d35ad922..00d3415149f52 100644 --- a/docs/root/configuration/http/http_filters/_include/bandwidth-limit-filter.yaml +++ b/docs/root/configuration/http/http_filters/_include/bandwidth-limit-filter.yaml @@ -34,8 +34,6 @@ static_resources: "@type": type.googleapis.com/envoy.extensions.filters.http.bandwidth_limit.v3alpha.BandwidthLimit stat_prefix: bandwidth_limiter_default - name: envoy.filters.http.router - typed_config: {} - clusters: - name: service_protected_by_bandwidth_limit type: STRICT_DNS diff --git a/docs/root/configuration/http/http_filters/_include/grpc-reverse-bridge-filter.yaml b/docs/root/configuration/http/http_filters/_include/grpc-reverse-bridge-filter.yaml index 9d62826782fbc..5ba17b1bf17e5 100644 --- a/docs/root/configuration/http/http_filters/_include/grpc-reverse-bridge-filter.yaml +++ b/docs/root/configuration/http/http_filters/_include/grpc-reverse-bridge-filter.yaml @@ -50,7 +50,6 @@ static_resources: content_type: application/grpc+proto withhold_grpc_frames: true - name: envoy.filters.http.router - typed_config: {} clusters: - name: other type: LOGICAL_DNS diff --git a/docs/root/configuration/http/http_filters/lua_filter.rst b/docs/root/configuration/http/http_filters/lua_filter.rst index 05cd149fc6ba8..72ec360587ca3 100644 --- a/docs/root/configuration/http/http_filters/lua_filter.rst +++ b/docs/root/configuration/http/http_filters/lua_filter.rst @@ -459,7 +459,7 @@ streamInfo() local streamInfo = handle:streamInfo() -Returns :repo:`information ` related to the current request. +Returns :repo:`information ` related to the current request. Returns a :ref:`stream info object `. @@ -470,7 +470,7 @@ connection() local connection = handle:connection() -Returns the current request's underlying :repo:`connection `. +Returns the current request's underlying :repo:`connection `. Returns a :ref:`connection object `. @@ -658,7 +658,7 @@ protocol() streamInfo:protocol() -Returns the string representation of :repo:`HTTP protocol ` +Returns the string representation of :repo:`HTTP protocol ` used by the current request. The possible values are: ``HTTP/1.0``, ``HTTP/1.1``, ``HTTP/2`` and ``HTTP/3*``. downstreamLocalAddress() @@ -668,7 +668,7 @@ downstreamLocalAddress() streamInfo:downstreamLocalAddress() -Returns the string representation of :repo:`downstream remote address ` +Returns the string representation of :repo:`downstream remote address ` used by the current request. downstreamDirectRemoteAddress() @@ -678,7 +678,7 @@ downstreamDirectRemoteAddress() streamInfo:downstreamDirectRemoteAddress() -Returns the string representation of :repo:`downstream directly connected address ` +Returns the string representation of :repo:`downstream directly connected address ` used by the current request. This is equivalent to the address of the physical connection. dynamicMetadata() @@ -697,7 +697,7 @@ downstreamSslConnection() streamInfo:downstreamSslConnection() -Returns :repo:`information ` related to the current SSL connection. +Returns :repo:`information ` related to the current SSL connection. Returns a downstream :ref:`SSL connection info object `. @@ -710,7 +710,7 @@ requestedServerName() streamInfo:requestedServerName() -Returns the string representation of :repo:`requested server name ` +Returns the string representation of :repo:`requested server name ` (e.g. SNI in TLS) for the current request if present. Dynamic metadata object API @@ -784,7 +784,7 @@ ssl() print("secure") end -Returns :repo:`SSL connection ` object when the connection is +Returns :repo:`SSL connection ` object when the connection is secured and *nil* when it is not. Returns an :ref:`SSL connection info object `. diff --git a/docs/root/configuration/http/http_filters/original_src_filter.rst b/docs/root/configuration/http/http_filters/original_src_filter.rst index ee6b1106aac93..189cbaa352043 100644 --- a/docs/root/configuration/http/http_filters/original_src_filter.rst +++ b/docs/root/configuration/http/http_filters/original_src_filter.rst @@ -73,4 +73,3 @@ The following example configures Envoy to use the original source for all connec "@type": type.googleapis.com/envoy.extensions.filters.listener.original_src.v3.OriginalSrc mark: 123 - name: envoy.filters.http.router - typed_config: {} diff --git a/docs/root/configuration/listeners/listener_filters/http_inspector.rst b/docs/root/configuration/listeners/listener_filters/http_inspector.rst index a7a6b83180a9d..8f6217bfcffce 100644 --- a/docs/root/configuration/listeners/listener_filters/http_inspector.rst +++ b/docs/root/configuration/listeners/listener_filters/http_inspector.rst @@ -20,7 +20,6 @@ A sample filter configuration could be: listener_filters: - name: "envoy.filters.listener.http_inspector" - typed_config: {} Statistics ---------- diff --git a/docs/root/configuration/listeners/listener_filters/tls_inspector.rst b/docs/root/configuration/listeners/listener_filters/tls_inspector.rst index 75081b8b32909..e863d895946c1 100644 --- a/docs/root/configuration/listeners/listener_filters/tls_inspector.rst +++ b/docs/root/configuration/listeners/listener_filters/tls_inspector.rst @@ -29,7 +29,6 @@ A sample filter configuration could be: listener_filters: - name: "envoy.filters.listener.tls_inspector" - typed_config: {} Or by specifying the `type_url `_ of the *typed_config*: diff --git a/docs/root/configuration/listeners/network_filters/rocketmq_proxy_filter.rst b/docs/root/configuration/listeners/network_filters/rocketmq_proxy_filter.rst index 111a56446cf75..727b74b8eef35 100644 --- a/docs/root/configuration/listeners/network_filters/rocketmq_proxy_filter.rst +++ b/docs/root/configuration/listeners/network_filters/rocketmq_proxy_filter.rst @@ -3,6 +3,9 @@ RocketMQ proxy ============== +* :ref:`v3 API reference ` +* This filter should be configured with the name ``envoy.filters.network.rocketmq_proxy``. + Apache RocketMQ is a distributed messaging system, which is composed of four types of roles: producer, consumer, name server and broker server. The former two are embedded into user application in form of SDK; whilst the latter are standalone servers. diff --git a/docs/root/configuration/listeners/network_filters/wasm_filter.rst b/docs/root/configuration/listeners/network_filters/wasm_filter.rst index de8baaaf4e6e9..fe14e1c586830 100644 --- a/docs/root/configuration/listeners/network_filters/wasm_filter.rst +++ b/docs/root/configuration/listeners/network_filters/wasm_filter.rst @@ -3,7 +3,7 @@ Wasm Network Filter =================== -* :ref:`v3 API reference ` +* :ref:`v3 API reference ` .. attention:: diff --git a/docs/root/configuration/observability/access_log/usage.rst b/docs/root/configuration/observability/access_log/usage.rst index ef4e0ec9b0103..deccf0abb0a9a 100644 --- a/docs/root/configuration/observability/access_log/usage.rst +++ b/docs/root/configuration/observability/access_log/usage.rst @@ -323,6 +323,7 @@ The following command operators are supported: * **DPE**: The downstream request had an HTTP protocol error. * **UPE**: The upstream response had an HTTP protocol error. * **UMSDR**: The upstream request reached to max stream duration. + * **OM**: Overload Manager terminated the request. %ROUTE_NAME% Name of the route. @@ -449,7 +450,7 @@ The following command operators are supported: where NAMESPACE is the filter namespace used when setting the metadata, KEY is an optional lookup up key in the namespace with the option of specifying nested keys separated by ':', and Z is an optional parameter denoting string truncation up to Z characters long. Dynamic Metadata - can be set by filters using the :repo:`StreamInfo ` API: + can be set by filters using the :repo:`StreamInfo ` API: *setDynamicMetadata*. The data will be logged as a JSON string. For example, for the following dynamic metadata: ``com.test.my_filter: {"test_key": "foo", "test_object": {"inner_key": "bar"}}`` diff --git a/docs/root/configuration/overview/mgmt_server.rst b/docs/root/configuration/overview/mgmt_server.rst index 4d1d8406789ec..33e10ee404b8f 100644 --- a/docs/root/configuration/overview/mgmt_server.rst +++ b/docs/root/configuration/overview/mgmt_server.rst @@ -53,6 +53,7 @@ The following statistics are generated for all subscriptions. :widths: 1, 1, 2 config_reload, Counter, Total API fetches that resulted in a config reload due to a different config + config_reload_time_ms, Gauge, Timestamp of the last config reload as milliseconds since the epoch init_fetch_timeout, Counter, Total :ref:`initial fetch timeouts ` update_attempt, Counter, Total API fetches attempted update_success, Counter, Total API fetches completed successfully diff --git a/docs/root/faq/configuration/sni.rst b/docs/root/faq/configuration/sni.rst index e35cf141df02f..9b33302c595ec 100644 --- a/docs/root/faq/configuration/sni.rst +++ b/docs/root/faq/configuration/sni.rst @@ -19,7 +19,6 @@ The following is a YAML example of the above requirement. socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - typed_config: {} filter_chains: - filter_chain_match: server_names: ["example.com", "www.example.com"] diff --git a/docs/root/intro/_include/life-of-a-request.yaml b/docs/root/intro/_include/life-of-a-request.yaml index f1517c3807cde..f85b9e3b0d48f 100644 --- a/docs/root/intro/_include/life-of-a-request.yaml +++ b/docs/root/intro/_include/life-of-a-request.yaml @@ -11,7 +11,6 @@ static_resources: # A single listener filter exists for TLS inspector. listener_filters: - name: "envoy.filters.listener.tls_inspector" - typed_config: {} # On the listener, there is a single filter chain that matches SNI for acme.com. filter_chains: - filter_chain_match: diff --git a/docs/root/intro/arch_overview/other_features/ip_transparency.rst b/docs/root/intro/arch_overview/other_features/ip_transparency.rst index a648f1ca54f80..76ed11b5f5928 100644 --- a/docs/root/intro/arch_overview/other_features/ip_transparency.rst +++ b/docs/root/intro/arch_overview/other_features/ip_transparency.rst @@ -137,3 +137,7 @@ Some drawbacks to the Original Source filter: :ref:`x-forwarded-for ` header. * Its configuration is relatively complex. * It may introduce a slight performance hit due to restrictions on connection pooling. + +.. note:: + + This feature is not supported on Windows. diff --git a/docs/root/intro/arch_overview/upstream/outlier.rst b/docs/root/intro/arch_overview/upstream/outlier.rst index 7b8505f67d340..a0c6293c90a54 100644 --- a/docs/root/intro/arch_overview/upstream/outlier.rst +++ b/docs/root/intro/arch_overview/upstream/outlier.rst @@ -13,7 +13,8 @@ independently, and form the basis for an overall upstream health checking soluti Outlier detection is part of the :ref:`cluster configuration ` and it needs filters to report errors, timeouts, and resets. Currently, the following filters support outlier detection: :ref:`http router `, -:ref:`tcp proxy ` and :ref:`redis proxy `. +:ref:`tcp proxy `, +:ref:`redis proxy ` and :ref:`thrift proxy `. Detected errors fall into two categories: externally and locally originated errors. Externally generated errors are transaction specific and occur on the upstream server in response to the received request. For example, an HTTP server returning error code 500 or a redis server returning a payload which cannot be decoded. Those errors are generated on the upstream host after Envoy has connected to it successfully. diff --git a/docs/root/intro/life_of_a_request.rst b/docs/root/intro/life_of_a_request.rst index fbd7cccdf21f5..0d0fba49628a7 100644 --- a/docs/root/intro/life_of_a_request.rst +++ b/docs/root/intro/life_of_a_request.rst @@ -254,7 +254,7 @@ chain. :width: 80% :align: center -The TLS inspector filter implements the :repo:`ListenerFilter ` +The TLS inspector filter implements the :repo:`ListenerFilter ` interface. All filter interfaces, whether listener or network/HTTP, require that filters implement callbacks for specific connection or stream events. In the case of ``ListenerFilter``, this is: @@ -281,7 +281,7 @@ connection. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Envoy offers pluggable transport sockets via the -:repo:`TransportSocket ` +:repo:`TransportSocket ` extension interface. Transport sockets follow the lifecycle events of a TCP connection and read/write into network buffers. Some key methods that transport sockets must implement are: @@ -323,11 +323,11 @@ lifecycle events and are invoked as data becomes available from the transport so Network filters are composed as a pipeline, unlike transport sockets which are one-per-connection. Network filters come in three varieties: -* :repo:`ReadFilter ` implementing ``onData()``, called when data is +* :repo:`ReadFilter ` implementing ``onData()``, called when data is available from the connection (due to some request). -* :repo:`WriteFilter ` implementing ``onWrite()``, called when data +* :repo:`WriteFilter ` implementing ``onWrite()``, called when data is about to be written to the connection (due to some response). -* :repo:`Filter ` implementing both *ReadFilter* and *WriteFilter*. +* :repo:`Filter ` implementing both *ReadFilter* and *WriteFilter*. The method signatures for the key filter methods are: @@ -388,9 +388,9 @@ following the pattern established above for listener and network filter chains. There are three kinds of HTTP filter interfaces: -* :repo:`StreamDecoderFilter ` with callbacks for request processing. -* :repo:`StreamEncoderFilter ` with callbacks for response processing. -* :repo:`StreamFilter ` implementing both ``StreamDecoderFilter`` and +* :repo:`StreamDecoderFilter ` with callbacks for request processing. +* :repo:`StreamEncoderFilter ` with callbacks for response processing. +* :repo:`StreamFilter ` implementing both ``StreamDecoderFilter`` and ``StreamEncoderFilter``. Looking at the decoder filter interface: diff --git a/docs/root/operations/admin.rst b/docs/root/operations/admin.rst index 8d53d4c613ad0..6c785184a3cdb 100644 --- a/docs/root/operations/admin.rst +++ b/docs/root/operations/admin.rst @@ -194,6 +194,25 @@ modify different aspects of the server: field, use the mask query parameter documented above. If you want only a subset of fields from the repeated resource, use both as documented below. +.. _operations_admin_interface_config_dump_by_name_regex: + +.. http:get:: /config_dump?name_regex={} + + Dump only the currently loaded configurations whose names match the specified regex. Can be used with + both `resource` and `mask` query parameters. + + For example, ``/config_dump?name_regex=.*substring.*`` would return all resource types + whose name field matches the given regex. + + Per resource, the matched name field is: + + - :ref:`envoy.config.listener.v3.Listener.name ` + - :ref:`envoy.config.route.v3.RouteConfiguration.name ` + - :ref:`envoy.config.route.v3.ScopedRouteConfiguration.name ` + - :ref:`envoy.config.cluster.v3.Cluster.name ` + - :ref:`envoy.extensions.transport_sockets.tls.v3.Secret ` + - :ref:`envoy.config.endpoint.v3.ClusterLoadAssignment ` + .. _operations_admin_interface_config_dump_by_resource_and_mask: .. http:get:: /config_dump?resource={}&mask={} diff --git a/docs/root/start/install.rst b/docs/root/start/install.rst index 90c72fc2940cf..752075ad4f81a 100644 --- a/docs/root/start/install.rst +++ b/docs/root/start/install.rst @@ -5,9 +5,6 @@ Installing Envoy The Envoy project :ref:`provides a number of pre-built Docker images ` for both ``amd64`` and ``arm64`` architectures. -The `Get Envoy `__ project also maintains a number of binaries -and repositories to accommodate many popular distributions. - If you are :ref:`installing on Mac OSX `, you can install natively with ``brew``. Once you have installed Envoy, check out the :ref:`quick start ` guide for more information on @@ -17,7 +14,7 @@ Install Envoy on Debian GNU/Linux ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can `install Envoy on Debian `_ -using `Get Envoy `__. +using `Get Envoy `__ until `official packages exist `_. .. code-block:: console @@ -30,16 +27,11 @@ using `Get Envoy `__. $ sudo apt update $ sudo apt install getenvoy-envoy -.. tip:: - - To add the nightly repository instead, replace the word ``stable`` with ``nightly``, - when adding the ``apt`` repository. - Install Envoy on Ubuntu Linux ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can `install Envoy on Ubuntu `_ -using `Get Envoy `__. +using `Get Envoy `__ until `official packages exist `_. .. code-block:: console @@ -52,16 +44,11 @@ using `Get Envoy `__. $ sudo apt update $ sudo apt install -y getenvoy-envoy -.. tip:: - - To add the nightly repository instead, replace the word ``stable`` with ``nightly``, - when adding the ``apt`` repository. - Install Envoy on RPM-based distros ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can `install Envoy on Centos/Redhat Enterprise Linux (RHEL) `_ -using `Get Envoy `__. +using `Get Envoy `__ until `official packages exist `_. .. code-block:: console @@ -69,22 +56,12 @@ using `Get Envoy `__. $ sudo yum-config-manager --add-repo https://getenvoy.io/linux/rpm/tetrate-getenvoy.repo $ sudo yum install getenvoy-envoy -.. tip:: - - You can enable/disable ``nightly`` using ``yum-config-manager``: - - .. code-block:: console - - $ sudo yum-config-manager --enable tetrate-getenvoy-nightly - $ sudo yum-config-manager --disable tetrate-getenvoy-nightly - .. _start_install_macosx: Install Envoy on Mac OSX ~~~~~~~~~~~~~~~~~~~~~~~~ -You can install Envoy on Mac OSX using the official brew repositories, or from -`Get Envoy `__. +You can install Envoy on Mac OSX using the official brew repositories. .. tabs:: @@ -93,19 +70,6 @@ You can install Envoy on Mac OSX using the official brew repositories, or from $ brew update $ brew install envoy - .. tab:: Get Envoy - - .. code-block:: console - - $ brew tap tetratelabs/getenvoy - $ brew install envoy - - .. tip:: - - You can install the ``nightly`` version from - `Get Envoy `__ by adding the ``--HEAD`` flag to - the install command. - .. _start_install_windows: Install Envoy on Windows @@ -123,8 +87,7 @@ You can run Envoy using the official Windows Docker image. Install Envoy using Docker ~~~~~~~~~~~~~~~~~~~~~~~~~~ -You can run Envoy using the official Docker images, or by -using images provided by `Get Envoy `__. +You can run Envoy using the official Docker images. The following commands will pull and show the Envoy version of current images. @@ -144,18 +107,6 @@ The following commands will pull and show the Envoy version of current images. $ docker pull envoyproxy/|envoy_distroless_docker_image| $ docker run --rm envoyproxy/|envoy_distroless_docker_image| --version - .. tab:: Get Envoy - - .. code-block:: console - - $ docker pull getenvoy/envoy:stable - $ docker run --rm getenvoy/envoy:stable --version - - .. tip:: - - To use the ``nightly`` version from `Get Envoy `__ - replace the word ``stable`` with ``nightly`` in the above commands. - .. _install_binaries: Pre-built Envoy Docker images diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 2b4e64afe0fc7..21531ab791649 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -21,7 +21,7 @@ Minor Behavior Changes never buffered, which only produced correct signatures for requests without a body, or for requests to S3, ES or Glacier, which used the literal string ``UNSIGNED-PAYLOAD``. Buffering can be now be disabled in favor of using unsigned payloads with compatible services via the new - `use_unsigned_payload` filter option (default false). + ``use_unsigned_payload`` filter option (default false). * cluster: added default value of 5 seconds for :ref:`connect_timeout `. * http: disable the integration between :ref:`ExtensionWithMatcher ` and HTTP filters by default to reflects its experimental status. This feature can be enabled by seting @@ -34,7 +34,7 @@ Minor Behavior Changes * http: serve HEAD requests from cache. * http: stop sending the transfer-encoding header for 304. This behavior can be temporarily reverted by setting ``envoy.reloadable_features.no_chunked_encoding_header_for_304`` to false. -* http: the behavior of the *present_match* in route header matcher changed. The value of *present_match* is ignored in the past. The new behavior is *present_match* performed when value is true. absent match performed when the value is false. Please reference :ref:`present_match +* http: the behavior of the ``present_match`` in route header matcher changed. The value of ``present_match`` is ignored in the past. The new behavior is ``present_match`` performed when value is true. absent match performed when the value is false. Please reference :ref:`present_match `. * listener: respect the :ref:`connection balance config ` defined within the listener where the sockets are redirected to. Clear that field to restore the previous behavior. @@ -44,11 +44,11 @@ Bug Fixes --------- *Changes expected to improve the state of the world and are unlikely to have negative effects* -* aws_lambda: if `payload_passthrough` is set to ``false``, the downstream response content-type header will now be set from the content-type entry in the JSON response's headers map, if present. +* aws_lambda: if ``payload_passthrough`` is set to ``false``, the downstream response content-type header will now be set from the content-type entry in the JSON response's headers map, if present. * cluster: fixed the :ref:`cluster stats ` histograms by moving the accounting into the router filter. This means that we now properly compute the number of bytes sent as well as handling retries which were previously ignored. -* hot_restart: fix double counting of `server.seconds_until_first_ocsp_response_expiring` and `server.days_until_first_cert_expiring` during hot-restart. This stat was only incorrect until the parent process terminated. -* http: fix erroneous handling of invalid nghttp2 frames with the `NGHTTP2_ERR_REFUSED_STREAM` error. Prior to the fix, +* hot_restart: fix double counting of ``server.seconds_until_first_ocsp_response_expiring`` and ``server.days_until_first_cert_expiring`` during hot-restart. This stat was only incorrect until the parent process terminated. +* http: fix erroneous handling of invalid nghttp2 frames with the ``NGHTTP2_ERR_REFUSED_STREAM`` error. Prior to the fix, Envoy would close the entire connection when nghttp2 triggered the invalid frame callback for the said error. The fix will cause Envoy to terminate just the refused stream and retain the connection. This behavior can be temporarily reverted by setting the ``envoy.reloadable_features.http2_consume_stream_refused_errors`` runtime guard to false. @@ -65,7 +65,7 @@ Removed Config or Runtime *Normally occurs at the end of the* :ref:`deprecation period ` * event: removed ``envoy.reloadable_features.activate_timers_next_event_loop`` runtime guard and legacy code path. -* gzip: removed legacy HTTP Gzip filter and runtime guard `envoy.deprecated_features.allow_deprecated_gzip_http_filter`. +* gzip: removed legacy HTTP Gzip filter and runtime guard ``envoy.deprecated_features.allow_deprecated_gzip_http_filter``. * http: removed ``envoy.reloadable_features.allow_500_after_100`` runtime guard and the legacy code path. * http: removed ``envoy.reloadable_features.always_apply_route_header_rules`` runtime guard and legacy code path. * http: removed ``envoy.reloadable_features.hcm_stream_error_on_invalid_message`` for disabling closing HTTP/1.1 connections on error. Connection-closing can still be disabled by setting the HTTP/1 configuration :ref:`override_stream_error_on_invalid_http_message `. @@ -78,30 +78,38 @@ Removed Config or Runtime New Features ------------ +* access_log: added the new response flag for :ref:`overload manager termination `. The response flag will be set when the http stream is terminated by overload manager. * admission control: added :ref:`admission control ` option that when average RPS of the sampling window is below this threshold, the filter will not throttle requests. Added :ref:`admission control ` option to set an upper limit on the probability of rejection. * bandwidth_limit: added new :ref:`HTTP bandwidth limit filter `. -* bootstrap: added :ref:`dns_resolution_config ` to aggregate all of the DNS resolver configuration in a single message. By setting one such configuration option *no_default_search_domain* as true the DNS resolver will not use the default search domains. And by setting the configuration *resolvers* we can specify the external DNS servers to be used for external DNS query. -* cluster: added :ref:`dns_resolution_config ` to aggregate all of the DNS resolver configuration in a single message. By setting one such configuration option *no_default_search_domain* as true the DNS resolver will not use the default search domains. +* bootstrap: added :ref:`dns_resolution_config ` to aggregate all of the DNS resolver configuration in a single message. By setting one such configuration option ``no_default_search_domain`` as true the DNS resolver will not use the default search domains. And by setting the configuration ``resolvers`` we can specify the external DNS servers to be used for external DNS query. +* cluster: added :ref:`dns_resolution_config ` to aggregate all of the DNS resolver configuration in a single message. By setting one such configuration option ``no_default_search_domain`` as true the DNS resolver will not use the default search domains. +* cluster: added :ref:`host_rewrite_literal ` to WeightedCluster. +* cluster: added :ref:`wait_for_warm_on_init `, which allows cluster readiness to not block on cluster warm-up. It is true by default, which preserves existing behavior. Currently, only applicable for DNS-based clusters. * composite filter: can now be used with filters that also add an access logger, such as the WASM filter. +* config: added stat :ref:`config_reload_time_ms `. * connection_limit: added new :ref:`Network connection limit filter `. * crash support: restore crash context when continuing to processing requests or responses as a result of an asynchronous callback that invokes a filter directly. This is unlike the call stacks that go through the various network layers, to eventually reach the filter. For a concrete example see: ``Envoy::Extensions::HttpFilters::Cache::CacheFilter::getHeaders`` which posts a callback on the dispatcher that will invoke the filter directly. -* dns resolver: added *DnsResolverOptions* protobuf message to reconcile all of the DNS lookup option flags. By setting the configuration option :ref:`use_tcp_for_dns_lookups ` as true we can make the underlying dns resolver library to make only TCP queries to the DNS servers and by setting the configuration option :ref:`no_default_search_domain ` as true the DNS resolver library will not use the default search domains. -* dns resolver: added *DnsResolutionConfig* to combine :ref:`dns_resolver_options ` and :ref:`resolvers ` in a single protobuf message. The field *resolvers* can be specified with a list of DNS resolver addresses. If specified, DNS client library will perform resolution via the underlying DNS resolvers. Otherwise, the default system resolvers (e.g., /etc/resolv.conf) will be used. -* dns_filter: added :ref:`dns_resolution_config ` to aggregate all of the DNS resolver configuration in a single message. By setting the configuration option *use_tcp_for_dns_lookups* to true we can make dns filter's external resolvers to answer queries using TCP only, by setting the configuration option *no_default_search_domain* as true the DNS resolver will not use the default search domains. And by setting the configuration *resolvers* we can specify the external DNS servers to be used for external DNS query which replaces the pre-existing alpha api field *upstream_resolvers*. -* dynamic_forward_proxy: added :ref:`dns_resolution_config ` option to the DNS cache config in order to aggregate all of the DNS resolver configuration in a single message. By setting one such configuration option *no_default_search_domain* as true the DNS resolver will not use the default search domains. And by setting the configuration *resolvers* we can specify the external DNS servers to be used for external DNS query instead of the system default resolvers. -* http: a new field `is_optional` is added to `extensions.filters.network.http_connection_manager.v3.HttpFilter`. When - value is `true`, the unsupported http filter will be ignored by envoy. This is also same with unsupported http filter +* dns cache: added :ref:`preresolve_hostnames ` option to the DNS cache config. This option allows hostnames to be preresolved into the cache upon cache creation. This might provide performance improvement, in the form of cache hits, for hostnames that are going to be resolved during steady state and are known at config load time. +* dns resolver: added ``DnsResolverOptions`` protobuf message to reconcile all of the DNS lookup option flags. By setting the configuration option :ref:`use_tcp_for_dns_lookups ` as true we can make the underlying dns resolver library to make only TCP queries to the DNS servers and by setting the configuration option :ref:`no_default_search_domain ` as true the DNS resolver library will not use the default search domains. +* dns resolver: added ``DnsResolutionConfig`` to combine :ref:`dns_resolver_options ` and :ref:`resolvers ` in a single protobuf message. The field ``resolvers`` can be specified with a list of DNS resolver addresses. If specified, DNS client library will perform resolution via the underlying DNS resolvers. Otherwise, the default system resolvers (e.g., /etc/resolv.conf) will be used. +* dns_filter: added :ref:`dns_resolution_config ` to aggregate all of the DNS resolver configuration in a single message. By setting the configuration option ``use_tcp_for_dns_lookups`` to true we can make dns filter's external resolvers to answer queries using TCP only, by setting the configuration option ``no_default_search_domain`` as true the DNS resolver will not use the default search domains. And by setting the configuration ``resolvers`` we can specify the external DNS servers to be used for external DNS query which replaces the pre-existing alpha api field ``upstream_resolvers``. +* dynamic_forward_proxy: added :ref:`dns_resolution_config ` option to the DNS cache config in order to aggregate all of the DNS resolver configuration in a single message. By setting one such configuration option ``no_default_search_domain`` as true the DNS resolver will not use the default search domains. And by setting the configuration ``resolvers`` we can specify the external DNS servers to be used for external DNS query instead of the system default resolvers. +* http: a new field ``is_optional`` is added to ``extensions.filters.network.http_connection_manager.v3.HttpFilter``. When + value is ``true``, the unsupported http filter will be ignored by envoy. This is also same with unsupported http filter in the typed per filter config. For more information, please reference :ref:`HttpFilter `. -* http: added :ref:`stripping trailing host dot from host header` support. -* http: added support for :ref:`original IP detection extensions`. +* http: added :ref:`stripping trailing host dot from host header ` support. +* http: added support for :ref:`original IP detection extensions `. Two initial extensions were added, the :ref:`custom header ` extension and the :ref:`xff ` extension. * http: added a new option to upstream HTTP/2 :ref:`keepalive ` to send a PING ahead of a new stream if the connection has been idle for a sufficient duration. -* http: added the ability to :ref:`unescape slash sequences` in the path. Requests with unescaped slashes can be proxied, rejected or redirected to the new unescaped path. By default this feature is disabled. The default behavior can be overridden through :ref:`http_connection_manager.path_with_escaped_slashes_action` runtime variable. This action can be selectively enabled for a portion of requests by setting the :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling` runtime variable. +* http: added the ability to :ref:`unescape slash sequences ` in the path. Requests with unescaped slashes can be proxied, rejected or redirected to the new unescaped path. By default this feature is disabled. The default behavior can be overridden through :ref:`http_connection_manager.path_with_escaped_slashes_action` runtime variable. This action can be selectively enabled for a portion of requests by setting the :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling` runtime variable. * http: added upstream and downstream alpha HTTP/3 support! See :ref:`quic_options ` for downstream and the new http3_protocol_options in :ref:`http_protocol_options ` for upstream HTTP/3. +* input matcher: a new input matcher that :ref:`matches an IP address against a list of CIDR ranges `. * jwt_authn: added support to fetch remote jwks asynchronously specified by :ref:`async_fetch `. +* jwt_authn: added support to add padding in the forwarded JWT payload specified by :ref:`pad_forward_payload_header `. * listener: added ability to change an existing listener's address. +* listener: added filter chain match support for :ref:`direct source address `. * local_rate_limit_filter: added suppoort for locally rate limiting http requests on a per connection basis. This can be enabled by setting the :ref:`local_rate_limit_per_downstream_connection ` field to true. * metric service: added support for sending metric tags as labels. This can be enabled by setting the :ref:`emit_tags_as_labels ` field to true. * proxy protocol: added support for generating the header while using the :ref:`HTTP connection manager `. This is done using the using the :ref:`Proxy Protocol Transport Socket ` on upstream clusters. @@ -111,6 +119,7 @@ New Features * stats: added native :ref:`Graphite-formatted tag ` support. * tcp: added support for :ref:`preconnecting `. Preconnecting is off by default, but recommended for clusters serving latency-sensitive traffic. * thrift_proxy: added per upstream metrics within the :ref:`thrift router ` for request and response size histograms. +* thrift_proxy: added support for :ref:`outlier detection `. * tls: allow dual ECDSA/RSA certs via SDS. Previously, SDS only supported a single certificate per context, and dual cert was only supported via non-SDS. * udp_proxy: added :ref:`key ` as another hash policy to support hash based routing on any given key. * windows container image: added user, EnvoyUser which is part of the Network Configuration Operators group to the container image. diff --git a/envoy/buffer/buffer.h b/envoy/buffer/buffer.h index a4029715e6c7b..c30cbd84f2ca4 100644 --- a/envoy/buffer/buffer.h +++ b/envoy/buffer/buffer.h @@ -494,9 +494,9 @@ class WatermarkFactory { * high watermark. * @return a newly created InstancePtr. */ - virtual InstancePtr create(std::function below_low_watermark, - std::function above_high_watermark, - std::function above_overflow_watermark) PURE; + virtual InstancePtr createBuffer(std::function below_low_watermark, + std::function above_high_watermark, + std::function above_overflow_watermark) PURE; }; using WatermarkFactoryPtr = std::unique_ptr; diff --git a/envoy/http/BUILD b/envoy/http/BUILD index dab2c3fab66c1..09d26b373967a 100644 --- a/envoy/http/BUILD +++ b/envoy/http/BUILD @@ -115,6 +115,7 @@ envoy_cc_library( ], deps = [ ":header_formatter_interface", + "//envoy/tracing:trace_context_interface", "//source/common/common:assert_lib", "//source/common/common:hash_lib", ], diff --git a/envoy/http/codec.h b/envoy/http/codec.h index 606fe1d5bde46..3674fd88c3123 100644 --- a/envoy/http/codec.h +++ b/envoy/http/codec.h @@ -284,7 +284,9 @@ enum class StreamResetReason { // Either there was an early TCP error for a CONNECT request or the peer reset with CONNECT_ERROR ConnectError, // Received payload did not conform to HTTP protocol. - ProtocolError + ProtocolError, + // If the stream was locally reset by the Overload Manager. + OverloadManager }; /** diff --git a/envoy/http/header_map.h b/envoy/http/header_map.h index 202f2e492f449..a3f1fd855b417 100644 --- a/envoy/http/header_map.h +++ b/envoy/http/header_map.h @@ -11,6 +11,7 @@ #include "envoy/common/optref.h" #include "envoy/common/pure.h" #include "envoy/http/header_formatter.h" +#include "envoy/tracing/trace_context.h" #include "source/common/common/assert.h" #include "source/common/common/hash.h" @@ -64,7 +65,7 @@ class LowerCaseString { return *this; } - explicit LowerCaseString(const std::string& new_string) : string_(new_string) { + explicit LowerCaseString(absl::string_view new_string) : string_(new_string) { ASSERT(valid()); lower(); } @@ -78,6 +79,9 @@ class LowerCaseString { return os << lower_case_string.string_; } + // Implicit conversion to absl::string_view. + operator absl::string_view() const { return string_; } + private: void lower() { std::transform(string_.begin(), string_.end(), string_.begin(), absl::ascii_tolower); @@ -817,7 +821,8 @@ class RequestOrResponseHeaderMap : public HeaderMap { // Request headers. class RequestHeaderMap : public RequestOrResponseHeaderMap, - public CustomInlineHeaderBase { + public CustomInlineHeaderBase, + public Tracing::TraceContext { public: INLINE_REQ_STRING_HEADERS(DEFINE_INLINE_STRING_HEADER) INLINE_REQ_NUMERIC_HEADERS(DEFINE_INLINE_NUMERIC_HEADER) diff --git a/envoy/network/BUILD b/envoy/network/BUILD index 10a3458c687ac..a76a4a4dfe544 100644 --- a/envoy/network/BUILD +++ b/envoy/network/BUILD @@ -66,6 +66,8 @@ envoy_cc_library( envoy_cc_library( name = "drain_decision_interface", hdrs = ["drain_decision.h"], + external_deps = ["abseil_base"], + deps = ["//envoy/common:callback"], ) envoy_cc_library( diff --git a/envoy/network/drain_decision.h b/envoy/network/drain_decision.h index e071dfdc84800..28b259bbe6258 100644 --- a/envoy/network/drain_decision.h +++ b/envoy/network/drain_decision.h @@ -1,12 +1,20 @@ #pragma once +#include +#include + +#include "envoy/common/callback.h" #include "envoy/common/pure.h" +#include "absl/base/attributes.h" + namespace Envoy { namespace Network { class DrainDecision { public: + using DrainCloseCb = std::function; + virtual ~DrainDecision() = default; /** @@ -14,6 +22,16 @@ class DrainDecision { * filters to determine when this should be called for the least impact possible. */ virtual bool drainClose() const PURE; + + /** + * @brief Register a callback to be called proactively when a drain decision enters into a + * 'close' state. + * + * @param cb Callback to be called once drain decision enters close state + * @return handle to remove callback + */ + ABSL_MUST_USE_RESULT + virtual Common::CallbackHandlePtr addOnDrainCloseCb(DrainCloseCb cb) const PURE; }; } // namespace Network diff --git a/envoy/network/socket.h b/envoy/network/socket.h index 692670e651695..ed37195de7088 100644 --- a/envoy/network/socket.h +++ b/envoy/network/socket.h @@ -46,6 +46,8 @@ struct SocketOptionName { * Interfaces for providing a socket's various addresses. This is split into a getters interface * and a getters + setters interface. This is so that only the getters portion can be overridden * in certain cases. + * TODO(soulxu): Since there are more than address information inside the provider, this will be + * renamed as ConnectionInfoProvider. Ref https://github.com/envoyproxy/envoy/issues/17168 */ class SocketAddressProvider { public: @@ -73,6 +75,11 @@ class SocketAddressProvider { */ virtual const Address::InstanceConstSharedPtr& directRemoteAddress() const PURE; + /** + * @return SNI value for downstream host. + */ + virtual absl::string_view requestedServerName() const PURE; + /** * Dumps the state of the SocketAddressProvider to the given ostream. * @@ -109,6 +116,11 @@ class SocketAddressSetter : public SocketAddressProvider { * Set the remote address of the socket. */ virtual void setRemoteAddress(const Address::InstanceConstSharedPtr& remote_address) PURE; + + /** + * @param SNI value requested. + */ + virtual void setRequestedServerName(const absl::string_view requested_server_name) PURE; }; using SocketAddressSetterSharedPtr = std::shared_ptr; diff --git a/envoy/network/transport_socket.h b/envoy/network/transport_socket.h index a7cec14f8ecf9..f911c4d99dec0 100644 --- a/envoy/network/transport_socket.h +++ b/envoy/network/transport_socket.h @@ -218,8 +218,7 @@ class TransportSocketOptions { const Network::TransportSocketFactory& factory) const PURE; }; -// TODO(mattklein123): Rename to TransportSocketOptionsConstSharedPtr in a dedicated follow up. -using TransportSocketOptionsSharedPtr = std::shared_ptr; +using TransportSocketOptionsConstSharedPtr = std::shared_ptr; /** * A factory for creating transport socket. It will be associated to filter chains and clusters. @@ -238,7 +237,7 @@ class TransportSocketFactory { * @return Network::TransportSocketPtr a transport socket to be passed to connection. */ virtual TransportSocketPtr - createTransportSocket(TransportSocketOptionsSharedPtr options) const PURE; + createTransportSocket(TransportSocketOptionsConstSharedPtr options) const PURE; /** * @return bool whether the transport socket will use proxy protocol options. diff --git a/envoy/server/BUILD b/envoy/server/BUILD index e7914be881528..ccd2478deea7d 100644 --- a/envoy/server/BUILD +++ b/envoy/server/BUILD @@ -53,6 +53,7 @@ envoy_cc_library( name = "config_tracker_interface", hdrs = ["config_tracker.h"], deps = [ + "//envoy/common:matchers_interface", "//source/common/common:non_copyable", "//source/common/protobuf", ], @@ -61,7 +62,12 @@ envoy_cc_library( envoy_cc_library( name = "drain_manager_interface", hdrs = ["drain_manager.h"], - deps = ["//envoy/network:drain_decision_interface"], + deps = [ + "//envoy/event:dispatcher_interface", + "//envoy/network:drain_decision_interface", + "//envoy/thread_local:thread_local_object", + "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", + ], ) envoy_cc_library( diff --git a/envoy/server/config_tracker.h b/envoy/server/config_tracker.h index 62932be2c853c..72fe2a88e2564 100644 --- a/envoy/server/config_tracker.h +++ b/envoy/server/config_tracker.h @@ -4,6 +4,7 @@ #include #include +#include "envoy/common/matchers.h" #include "envoy/common/pure.h" #include "source/common/common/non_copyable.h" @@ -21,7 +22,9 @@ namespace Server { */ class ConfigTracker { public: - using Cb = std::function; + // The passed StringMatcher will be matched against the `name` field of whatever + // proto is returned. + using Cb = std::function; using CbsMap = std::map; /** diff --git a/envoy/server/drain_manager.h b/envoy/server/drain_manager.h index 49ecc194166a1..0e37091fa5dde 100644 --- a/envoy/server/drain_manager.h +++ b/envoy/server/drain_manager.h @@ -3,17 +3,43 @@ #include #include +#include "envoy/config/listener/v3/listener.pb.h" +#include "envoy/event/dispatcher.h" #include "envoy/network/drain_decision.h" +#include "envoy/thread_local/thread_local_object.h" namespace Envoy { namespace Server { +class DrainManager; +using DrainManagerPtr = std::unique_ptr; + /** * Handles connection draining. This concept is used globally during hot restart / server draining - * as well as on individual listeners when they are being dynamically removed. + * as well as on individual listeners and filter-chains when they are being dynamically removed. */ -class DrainManager : public Network::DrainDecision { +class DrainManager : public Network::DrainDecision, public ThreadLocal::ThreadLocalObject { public: + /** + * @brief Create a child drain-manager. Will proxy the drain status from the parent, but can also + * be used to enact local draining. + * + * Child managers can be used to construct "drain trees" where each node in the tree can drain + * independently of it's parent node, but the drain status cascades to child nodes. + * + * A notable difference to drain callbacks is that child managers are notified immediately and + * without a delay timing. Additionally, notifications from parent to child is a thread-safe + * operation whereas callback registration and triggering is not. + * + * @param dispatcher Dispatcher for the current thread in which the new child drain-manager will + * exist. + * @param drain_type The drain-type for the manager. May be different from the parent manager. + */ + virtual DrainManagerPtr + createChildManager(Event::Dispatcher& dispatcher, + envoy::config::listener::v3::Listener::DrainType drain_type) PURE; + virtual DrainManagerPtr createChildManager(Event::Dispatcher& dispatcher) PURE; + /** * Invoked to begin the drain procedure. (Making drain close operations more likely). * @param drain_complete_cb will be invoked once the drain sequence is finished. The parameter is diff --git a/envoy/ssl/certificate_validation_context_config.h b/envoy/ssl/certificate_validation_context_config.h index 4098c0b7369c0..544215a73daea 100644 --- a/envoy/ssl/certificate_validation_context_config.h +++ b/envoy/ssl/certificate_validation_context_config.h @@ -40,11 +40,6 @@ class CertificateValidationContextConfig { */ virtual const std::string& certificateRevocationListPath() const PURE; - /** - * @return The subject alt names to be verified, if enabled. - */ - virtual const std::vector& verifySubjectAltNameList() const PURE; - /** * @return The subject alt name matchers to be verified, if enabled. */ diff --git a/envoy/stream_info/stream_info.h b/envoy/stream_info/stream_info.h index d409aecb89103..002f0ce8bdf63 100644 --- a/envoy/stream_info/stream_info.h +++ b/envoy/stream_info/stream_info.h @@ -86,8 +86,10 @@ enum ResponseFlag { UpstreamProtocolError = 0x800000, // No cluster found for a given request. NoClusterFound = 0x1000000, + // Overload Manager terminated the stream. + OverloadManager = 0x2000000, // ATTENTION: MAKE SURE THIS REMAINS EQUAL TO THE LAST FLAG. - LastFlag = NoClusterFound, + LastFlag = OverloadManager, }; /** @@ -527,16 +529,6 @@ class StreamInfo { virtual const FilterStateSharedPtr& upstreamFilterState() const PURE; virtual void setUpstreamFilterState(const FilterStateSharedPtr& filter_state) PURE; - /** - * @param SNI value requested. - */ - virtual void setRequestedServerName(const absl::string_view requested_server_name) PURE; - - /** - * @return SNI value for downstream host. - */ - virtual const std::string& requestedServerName() const PURE; - /** * @param failure_reason the upstream transport failure reason. */ diff --git a/envoy/tcp/conn_pool.h b/envoy/tcp/conn_pool.h index 14dd5677907f9..6b31a7e6bc12b 100644 --- a/envoy/tcp/conn_pool.h +++ b/envoy/tcp/conn_pool.h @@ -103,10 +103,11 @@ class Callbacks { /** * Called when a pool error occurred and no connection could be acquired for making the request. * @param reason supplies the failure reason. + * @param transport_failure_reason supplies the details of the transport failure reason. * @param host supplies the description of the host that caused the failure. This may be nullptr * if no host was involved in the failure (for example overflow). */ - virtual void onPoolFailure(PoolFailureReason reason, + virtual void onPoolFailure(PoolFailureReason reason, absl::string_view transport_failure_reason, Upstream::HostDescriptionConstSharedPtr host) PURE; /** diff --git a/envoy/tracing/BUILD b/envoy/tracing/BUILD index c1711b1a92ac5..e19f71626a133 100644 --- a/envoy/tracing/BUILD +++ b/envoy/tracing/BUILD @@ -40,3 +40,8 @@ envoy_cc_library( "//envoy/stream_info:stream_info_interface", ], ) + +envoy_cc_library( + name = "trace_context_interface", + hdrs = ["trace_context.h"], +) diff --git a/envoy/tracing/trace_context.h b/envoy/tracing/trace_context.h new file mode 100644 index 0000000000000..4bb88f0f59cf7 --- /dev/null +++ b/envoy/tracing/trace_context.h @@ -0,0 +1,68 @@ +#pragma once + +#include + +#include "envoy/common/pure.h" + +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" + +namespace Envoy { +namespace Tracing { + +/** + * Protocol-independent abstraction for traceable stream. It hides the differences between different + * protocol and provides tracer driver with common methods for obtaining and setting the tracing + * context. + */ +class TraceContext { +public: + virtual ~TraceContext() = default; + + /** + * Get tracing context value by key. + * + * @param key The context key of string view type. + * @return The optional context value of string_view type. + */ + virtual absl::optional getTraceContext(absl::string_view key) const PURE; + + /** + * Set new tracing context key/value pair. + * + * @param key The context key of string view type. + * @param val The context value of string view type. + */ + virtual void setTraceContext(absl::string_view key, absl::string_view val) PURE; + + /** + * Set new tracing context key/value pair. The key MUST point to data that will live beyond + * the lifetime of any traceable stream that using the string. + * + * @param key The context key of string view type. + * @param val The context value of string view type. + */ + virtual void setTraceContextReferenceKey(absl::string_view key, absl::string_view val) { + // The reference semantics of key and value are ignored by default. Derived classes that wish to + // use reference semantics to improve performance or reduce memory overhead can override this + // method. + setTraceContext(key, val); + } + + /** + * Set new tracing context key/value pair. Both key and val MUST point to data that will live + * beyond the lifetime of any traceable stream that using the string. + * + * @param key The context key of string view type. + * @param val The context value of string view type. + */ + virtual void setTraceContextReference(absl::string_view key, absl::string_view val) { + // The reference semantics of key and value are ignored by default. Derived classes that wish to + // use reference semantics to improve performance or reduce memory overhead can override this + // method. + setTraceContext(key, val); + } +}; + +} // namespace Tracing +} // namespace Envoy diff --git a/envoy/upstream/cluster_manager.h b/envoy/upstream/cluster_manager.h index e06a45c14ffa3..180045116ec32 100644 --- a/envoy/upstream/cluster_manager.h +++ b/envoy/upstream/cluster_manager.h @@ -363,7 +363,7 @@ class ClusterManagerFactory { const absl::optional& alternate_protocol_options, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options, + const Network::TransportSocketOptionsConstSharedPtr& transport_socket_options, TimeSource& time_source, ClusterConnectivityState& state) PURE; /** @@ -374,7 +374,7 @@ class ClusterManagerFactory { allocateTcpConnPool(Event::Dispatcher& dispatcher, HostConstSharedPtr host, ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, - Network::TransportSocketOptionsSharedPtr transport_socket_options, + Network::TransportSocketOptionsConstSharedPtr transport_socket_options, ClusterConnectivityState& state) PURE; /** diff --git a/envoy/upstream/load_balancer.h b/envoy/upstream/load_balancer.h index fe8bb7e73b92d..b1b20324b84ef 100644 --- a/envoy/upstream/load_balancer.h +++ b/envoy/upstream/load_balancer.h @@ -82,7 +82,7 @@ class LoadBalancerContext { /** * Returns the transport socket options which should be applied on upstream connections */ - virtual Network::TransportSocketOptionsSharedPtr upstreamTransportSocketOptions() const PURE; + virtual Network::TransportSocketOptionsConstSharedPtr upstreamTransportSocketOptions() const PURE; }; /** diff --git a/envoy/upstream/upstream.h b/envoy/upstream/upstream.h index d238be74aad29..d8b73f39de23f 100644 --- a/envoy/upstream/upstream.h +++ b/envoy/upstream/upstream.h @@ -92,10 +92,9 @@ class Host : virtual public HostDescription { * will be returned along with the connection vs. the host the method was called on. * If it matters, callers should not assume that the returned host will be the same. */ - virtual CreateConnectionData - createConnection(Event::Dispatcher& dispatcher, - const Network::ConnectionSocket::OptionsSharedPtr& options, - Network::TransportSocketOptionsSharedPtr transport_socket_options) const PURE; + virtual CreateConnectionData createConnection( + Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options, + Network::TransportSocketOptionsConstSharedPtr transport_socket_options) const PURE; /** * Create a health check connection for this host. @@ -104,10 +103,10 @@ class Host : virtual public HostDescription { * connection. * @return the connection data. */ - virtual CreateConnectionData - createHealthCheckConnection(Event::Dispatcher& dispatcher, - Network::TransportSocketOptionsSharedPtr transport_socket_options, - const envoy::config::core::v3::Metadata* metadata) const PURE; + virtual CreateConnectionData createHealthCheckConnection( + Event::Dispatcher& dispatcher, + Network::TransportSocketOptionsConstSharedPtr transport_socket_options, + const envoy::config::core::v3::Metadata* metadata) const PURE; /** * @return host specific gauges. diff --git a/examples/brotli/brotli-envoy.yaml b/examples/brotli/brotli-envoy.yaml index f3bb8efbe5bcf..06db571370c99 100644 --- a/examples/brotli/brotli-envoy.yaml +++ b/examples/brotli/brotli-envoy.yaml @@ -37,7 +37,6 @@ static_resources: "@type": type.googleapis.com/envoy.extensions.compression.brotli.compressor.v3.Brotli window_bits: 10 - name: envoy.filters.http.router - typed_config: {} transport_socket: name: envoy.transport_sockets.tls typed_config: @@ -138,7 +137,6 @@ static_resources: "@type": type.googleapis.com/envoy.extensions.compression.brotli.compressor.v3.Brotli window_bits: 10 - name: envoy.filters.http.router - typed_config: {} transport_socket: name: envoy.transport_sockets.tls typed_config: diff --git a/examples/cache/front-envoy.yaml b/examples/cache/front-envoy.yaml index 00091a052ca86..e6111b745eeca 100644 --- a/examples/cache/front-envoy.yaml +++ b/examples/cache/front-envoy.yaml @@ -33,7 +33,6 @@ static_resources: typed_config: "@type": "type.googleapis.com/envoy.extensions.cache.simple_http_cache.v3alpha.SimpleHttpCacheConfig" - name: envoy.filters.http.router - typed_config: {} clusters: - name: service1 diff --git a/examples/cors/backend/front-envoy.yaml b/examples/cors/backend/front-envoy.yaml index 3b480ab24fdc3..f7cd4c7c14c09 100644 --- a/examples/cors/backend/front-envoy.yaml +++ b/examples/cors/backend/front-envoy.yaml @@ -67,9 +67,7 @@ static_resources: cluster: backend_service http_filters: - name: envoy.filters.http.cors - typed_config: {} - name: envoy.filters.http.router - typed_config: {} clusters: - name: backend_service type: STRICT_DNS diff --git a/examples/cors/frontend/front-envoy.yaml b/examples/cors/frontend/front-envoy.yaml index 50fa897b238ed..5a61d4279efe5 100644 --- a/examples/cors/frontend/front-envoy.yaml +++ b/examples/cors/frontend/front-envoy.yaml @@ -28,9 +28,7 @@ static_resources: cluster: frontend_service http_filters: - name: envoy.filters.http.cors - typed_config: {} - name: envoy.filters.http.router - typed_config: {} clusters: - name: frontend_service type: STRICT_DNS diff --git a/examples/csrf/crosssite/front-envoy.yaml b/examples/csrf/crosssite/front-envoy.yaml index a6cb4f9eb5962..9e47676d02732 100644 --- a/examples/csrf/crosssite/front-envoy.yaml +++ b/examples/csrf/crosssite/front-envoy.yaml @@ -28,7 +28,6 @@ static_resources: cluster: generic_service http_filters: - name: envoy.filters.http.router - typed_config: {} clusters: - name: generic_service type: STRICT_DNS diff --git a/examples/csrf/samesite/front-envoy.yaml b/examples/csrf/samesite/front-envoy.yaml index 57773f803257a..37e3082517b5d 100644 --- a/examples/csrf/samesite/front-envoy.yaml +++ b/examples/csrf/samesite/front-envoy.yaml @@ -91,7 +91,6 @@ static_resources: cluster: generic_service http_filters: - name: envoy.filters.http.cors - typed_config: {} - name: envoy.filters.http.csrf typed_config: "@type": type.googleapis.com/envoy.extensions.filters.http.csrf.v3.CsrfPolicy @@ -100,7 +99,6 @@ static_resources: numerator: 0 denominator: HUNDRED - name: envoy.filters.http.router - typed_config: {} clusters: - name: generic_service type: STRICT_DNS diff --git a/examples/double-proxy/envoy-backend.yaml b/examples/double-proxy/envoy-backend.yaml index d04f33f2996f5..1d764fe77d64c 100644 --- a/examples/double-proxy/envoy-backend.yaml +++ b/examples/double-proxy/envoy-backend.yaml @@ -7,7 +7,6 @@ static_resources: port_value: 5432 listener_filters: - name: "envoy.filters.listener.tls_inspector" - typed_config: {} filter_chains: - filters: - name: envoy.filters.network.postgres_proxy diff --git a/examples/ext_authz/config/grpc-service/v3.yaml b/examples/ext_authz/config/grpc-service/v3.yaml index f42f332137957..f9e2bea51c409 100644 --- a/examples/ext_authz/config/grpc-service/v3.yaml +++ b/examples/ext_authz/config/grpc-service/v3.yaml @@ -32,7 +32,6 @@ static_resources: timeout: 0.250s transport_api_version: V3 - name: envoy.filters.http.router - typed_config: {} clusters: - name: upstream-service diff --git a/examples/ext_authz/config/http-service.yaml b/examples/ext_authz/config/http-service.yaml index 4bc3fc9ea39bb..e80b0735473ea 100644 --- a/examples/ext_authz/config/http-service.yaml +++ b/examples/ext_authz/config/http-service.yaml @@ -37,7 +37,6 @@ static_resources: patterns: - exact: x-current-user - name: envoy.filters.http.router - typed_config: {} clusters: - name: upstream-service diff --git a/examples/ext_authz/config/opa-service/v3.yaml b/examples/ext_authz/config/opa-service/v3.yaml index f5a6697cb4661..83bc56d8dbb46 100644 --- a/examples/ext_authz/config/opa-service/v3.yaml +++ b/examples/ext_authz/config/opa-service/v3.yaml @@ -32,7 +32,6 @@ static_resources: timeout: 0.250s transport_api_version: V3 - name: envoy.filters.http.router - typed_config: {} clusters: - name: upstream-service diff --git a/examples/fault-injection/envoy.yaml b/examples/fault-injection/envoy.yaml index a526edb6fa016..491517fc2b5ad 100644 --- a/examples/fault-injection/envoy.yaml +++ b/examples/fault-injection/envoy.yaml @@ -41,7 +41,6 @@ static_resources: numerator: 0 denominator: HUNDRED - name: envoy.filters.http.router - typed_config: {} clusters: - name: local_service type: STRICT_DNS diff --git a/examples/front-proxy/front-envoy.yaml b/examples/front-proxy/front-envoy.yaml index bbd124295898f..be4367db4b1da 100644 --- a/examples/front-proxy/front-envoy.yaml +++ b/examples/front-proxy/front-envoy.yaml @@ -28,7 +28,6 @@ static_resources: cluster: service2 http_filters: - name: envoy.filters.http.router - typed_config: {} - address: socket_address: @@ -58,7 +57,6 @@ static_resources: cluster: service2 http_filters: - name: envoy.filters.http.router - typed_config: {} transport_socket: name: envoy.transport_sockets.tls diff --git a/examples/front-proxy/service-envoy.yaml b/examples/front-proxy/service-envoy.yaml index eaa849dd40797..e9365e49bf0d9 100644 --- a/examples/front-proxy/service-envoy.yaml +++ b/examples/front-proxy/service-envoy.yaml @@ -24,7 +24,6 @@ static_resources: cluster: local_service http_filters: - name: envoy.filters.http.router - typed_config: {} clusters: - name: local_service type: STRICT_DNS diff --git a/examples/grpc-bridge/client/envoy-proxy.yaml b/examples/grpc-bridge/client/envoy-proxy.yaml index c96d32a6dfc43..31668610eb039 100644 --- a/examples/grpc-bridge/client/envoy-proxy.yaml +++ b/examples/grpc-bridge/client/envoy-proxy.yaml @@ -32,9 +32,7 @@ static_resources: cluster: backend-proxy http_filters: - name: envoy.filters.http.grpc_http1_bridge - typed_config: {} - name: envoy.filters.http.router - typed_config: {} clusters: - name: backend-proxy type: LOGICAL_DNS diff --git a/examples/grpc-bridge/server/envoy-proxy.yaml b/examples/grpc-bridge/server/envoy-proxy.yaml index 8e63706b5aaa6..1608836126e0b 100644 --- a/examples/grpc-bridge/server/envoy-proxy.yaml +++ b/examples/grpc-bridge/server/envoy-proxy.yaml @@ -29,7 +29,6 @@ static_resources: cluster: backend_grpc_service http_filters: - name: envoy.filters.http.router - typed_config: {} clusters: - name: backend_grpc_service type: STRICT_DNS diff --git a/examples/gzip/gzip-envoy.yaml b/examples/gzip/gzip-envoy.yaml index 034333fc5aae6..30ffbe32f8768 100644 --- a/examples/gzip/gzip-envoy.yaml +++ b/examples/gzip/gzip-envoy.yaml @@ -38,7 +38,6 @@ static_resources: memory_level: 3 window_bits: 10 - name: envoy.filters.http.router - typed_config: {} - address: socket_address: address: 0.0.0.0 @@ -77,8 +76,6 @@ static_resources: memory_level: 3 window_bits: 10 - name: envoy.filters.http.router - typed_config: {} - clusters: - name: envoy-stats connect_timeout: 0.25s diff --git a/examples/jaeger-native-tracing/docker-compose.yaml b/examples/jaeger-native-tracing/docker-compose.yaml index 5294cffd99aa6..f6d363aeb1324 100644 --- a/examples/jaeger-native-tracing/docker-compose.yaml +++ b/examples/jaeger-native-tracing/docker-compose.yaml @@ -44,7 +44,7 @@ services: jaeger: image: jaegertracing/all-in-one environment: - - COLLECTOR_ZIPKIN_HTTP_PORT=9411 + - COLLECTOR_ZIPKIN_HOST_PORT=9411 networks: - envoymesh ports: diff --git a/examples/jaeger-native-tracing/front-envoy-jaeger.yaml b/examples/jaeger-native-tracing/front-envoy-jaeger.yaml index a32051d5dc7bb..5b76fd5e48f98 100644 --- a/examples/jaeger-native-tracing/front-envoy-jaeger.yaml +++ b/examples/jaeger-native-tracing/front-envoy-jaeger.yaml @@ -48,7 +48,6 @@ static_resources: operation: checkAvailability http_filters: - name: envoy.filters.http.router - typed_config: {} use_remote_address: true clusters: - name: service1 diff --git a/examples/jaeger-native-tracing/service1-envoy-jaeger.yaml b/examples/jaeger-native-tracing/service1-envoy-jaeger.yaml index 7614f6354c883..853df7b43d71e 100644 --- a/examples/jaeger-native-tracing/service1-envoy-jaeger.yaml +++ b/examples/jaeger-native-tracing/service1-envoy-jaeger.yaml @@ -27,7 +27,6 @@ static_resources: operation: checkAvailability http_filters: - name: envoy.filters.http.router - typed_config: {} - address: socket_address: address: 0.0.0.0 @@ -75,7 +74,6 @@ static_resources: operation: checkStock http_filters: - name: envoy.filters.http.router - typed_config: {} clusters: - name: local_service type: strict_dns diff --git a/examples/jaeger-native-tracing/service2-envoy-jaeger.yaml b/examples/jaeger-native-tracing/service2-envoy-jaeger.yaml index a56ba608c3120..51ba180802f18 100644 --- a/examples/jaeger-native-tracing/service2-envoy-jaeger.yaml +++ b/examples/jaeger-native-tracing/service2-envoy-jaeger.yaml @@ -47,7 +47,6 @@ static_resources: operation: checkStock http_filters: - name: envoy.filters.http.router - typed_config: {} clusters: - name: local_service type: strict_dns diff --git a/examples/jaeger-tracing/docker-compose.yaml b/examples/jaeger-tracing/docker-compose.yaml index a803fa94c28d6..e280d66776848 100644 --- a/examples/jaeger-tracing/docker-compose.yaml +++ b/examples/jaeger-tracing/docker-compose.yaml @@ -36,7 +36,7 @@ services: jaeger: image: jaegertracing/all-in-one environment: - - COLLECTOR_ZIPKIN_HTTP_PORT=9411 + - COLLECTOR_ZIPKIN_HOST_PORT=9411 networks: - envoymesh ports: diff --git a/examples/jaeger-tracing/front-envoy-jaeger.yaml b/examples/jaeger-tracing/front-envoy-jaeger.yaml index b5ac02f727e66..dc4d256b7bc12 100644 --- a/examples/jaeger-tracing/front-envoy-jaeger.yaml +++ b/examples/jaeger-tracing/front-envoy-jaeger.yaml @@ -37,7 +37,6 @@ static_resources: operation: checkAvailability http_filters: - name: envoy.filters.http.router - typed_config: {} use_remote_address: true clusters: - name: service1 diff --git a/examples/jaeger-tracing/service1-envoy-jaeger.yaml b/examples/jaeger-tracing/service1-envoy-jaeger.yaml index 7ef911a69ea80..8d9b383562820 100644 --- a/examples/jaeger-tracing/service1-envoy-jaeger.yaml +++ b/examples/jaeger-tracing/service1-envoy-jaeger.yaml @@ -36,7 +36,6 @@ static_resources: operation: checkAvailability http_filters: - name: envoy.filters.http.router - typed_config: {} - address: socket_address: address: 0.0.0.0 @@ -73,7 +72,6 @@ static_resources: operation: checkStock http_filters: - name: envoy.filters.http.router - typed_config: {} clusters: - name: local_service type: STRICT_DNS diff --git a/examples/jaeger-tracing/service2-envoy-jaeger.yaml b/examples/jaeger-tracing/service2-envoy-jaeger.yaml index afb0a2d70df5f..d1f09f5537fb9 100644 --- a/examples/jaeger-tracing/service2-envoy-jaeger.yaml +++ b/examples/jaeger-tracing/service2-envoy-jaeger.yaml @@ -36,7 +36,6 @@ static_resources: operation: checkStock http_filters: - name: envoy.filters.http.router - typed_config: {} clusters: - name: local_service type: STRICT_DNS diff --git a/examples/load-reporting-service/service-envoy-w-lrs.yaml b/examples/load-reporting-service/service-envoy-w-lrs.yaml index 8c40861470bca..ac0ebca7774e4 100644 --- a/examples/load-reporting-service/service-envoy-w-lrs.yaml +++ b/examples/load-reporting-service/service-envoy-w-lrs.yaml @@ -24,7 +24,6 @@ static_resources: cluster: local_service http_filters: - name: envoy.filters.http.router - typed_config: {} clusters: - name: local_service type: STRICT_DNS diff --git a/examples/lua/envoy.yaml b/examples/lua/envoy.yaml index 2876d1e9d8cb6..e287dac1202b2 100644 --- a/examples/lua/envoy.yaml +++ b/examples/lua/envoy.yaml @@ -38,7 +38,6 @@ static_resources: response_handle:headers():add("response-body-size", tostring(body_size)) end - name: envoy.filters.http.router - typed_config: {} clusters: - name: web_service diff --git a/examples/skywalking-tracing/docker-compose.yaml b/examples/skywalking-tracing/docker-compose.yaml index 9ae58205f4e75..6962e59e0adc1 100644 --- a/examples/skywalking-tracing/docker-compose.yaml +++ b/examples/skywalking-tracing/docker-compose.yaml @@ -56,7 +56,7 @@ services: soft: -1 hard: -1 skywalking-oap: - image: apache/skywalking-oap-server:8.4.0-es7 + image: apache/skywalking-oap-server:8.6.0-es7 networks: - envoymesh depends_on: @@ -72,7 +72,7 @@ services: start_period: 40s restart: on-failure skywalking-ui: - image: apache/skywalking-ui:8.4.0 + image: apache/skywalking-ui:8.6.0 networks: - envoymesh depends_on: diff --git a/examples/wasm-cc/envoy.yaml b/examples/wasm-cc/envoy.yaml index e65d4e6ce8265..fac7045ee94f8 100644 --- a/examples/wasm-cc/envoy.yaml +++ b/examples/wasm-cc/envoy.yaml @@ -43,7 +43,6 @@ static_resources: local: filename: "lib/envoy_filter_http_wasm_example.wasm" - name: envoy.filters.http.router - typed_config: {} clusters: - name: web_service diff --git a/examples/wasm-cc/envoy_filter_http_wasm_example.cc b/examples/wasm-cc/envoy_filter_http_wasm_example.cc index 2d02f70fda534..06478271add8c 100644 --- a/examples/wasm-cc/envoy_filter_http_wasm_example.cc +++ b/examples/wasm-cc/envoy_filter_http_wasm_example.cc @@ -78,9 +78,9 @@ FilterDataStatus ExampleContext::onRequestBody(size_t body_buffer_length, return FilterDataStatus::Continue; } -FilterDataStatus ExampleContext::onResponseBody(size_t /* body_buffer_length */, +FilterDataStatus ExampleContext::onResponseBody(size_t body_buffer_length, bool /* end_of_stream */) { - setBuffer(WasmBufferType::HttpResponseBody, 0, 12, "Hello, world"); + setBuffer(WasmBufferType::HttpResponseBody, 0, body_buffer_length, "Hello, world\n"); return FilterDataStatus::Continue; } diff --git a/examples/win32-front-proxy/front-envoy.yaml b/examples/win32-front-proxy/front-envoy.yaml index 2cc1c547bef8c..7ef8b6917b069 100644 --- a/examples/win32-front-proxy/front-envoy.yaml +++ b/examples/win32-front-proxy/front-envoy.yaml @@ -28,7 +28,6 @@ static_resources: cluster: service2 http_filters: - name: envoy.filters.http.router - typed_config: {} - address: socket_address: @@ -58,7 +57,6 @@ static_resources: cluster: service2 http_filters: - name: envoy.filters.http.router - typed_config: {} transport_socket: name: envoy.transport_sockets.tls diff --git a/examples/win32-front-proxy/service-envoy.yaml b/examples/win32-front-proxy/service-envoy.yaml index b3ea728cc5f60..fd9ed4457de74 100644 --- a/examples/win32-front-proxy/service-envoy.yaml +++ b/examples/win32-front-proxy/service-envoy.yaml @@ -24,7 +24,6 @@ static_resources: cluster: local_service http_filters: - name: envoy.filters.http.router - typed_config: {} clusters: - name: local_service connect_timeout: 0.25s diff --git a/examples/zipkin-tracing/front-envoy-zipkin.yaml b/examples/zipkin-tracing/front-envoy-zipkin.yaml index 7fc5cfb61f70e..0469b5895f387 100644 --- a/examples/zipkin-tracing/front-envoy-zipkin.yaml +++ b/examples/zipkin-tracing/front-envoy-zipkin.yaml @@ -43,7 +43,6 @@ static_resources: value: "%REQ(x-request-id)%" http_filters: - name: envoy.filters.http.router - typed_config: {} clusters: - name: service1 type: STRICT_DNS diff --git a/examples/zipkin-tracing/service1-envoy-zipkin.yaml b/examples/zipkin-tracing/service1-envoy-zipkin.yaml index fd790aa3c1e94..bcee3c873d654 100644 --- a/examples/zipkin-tracing/service1-envoy-zipkin.yaml +++ b/examples/zipkin-tracing/service1-envoy-zipkin.yaml @@ -35,7 +35,6 @@ static_resources: operation: checkAvailability http_filters: - name: envoy.filters.http.router - typed_config: {} - address: socket_address: address: 0.0.0.0 @@ -71,7 +70,6 @@ static_resources: operation: checkStock http_filters: - name: envoy.filters.http.router - typed_config: {} clusters: - name: local_service type: STRICT_DNS diff --git a/examples/zipkin-tracing/service2-envoy-zipkin.yaml b/examples/zipkin-tracing/service2-envoy-zipkin.yaml index 2078d93d00777..c13ba31ab6352 100644 --- a/examples/zipkin-tracing/service2-envoy-zipkin.yaml +++ b/examples/zipkin-tracing/service2-envoy-zipkin.yaml @@ -35,7 +35,6 @@ static_resources: operation: checkStock http_filters: - name: envoy.filters.http.router - typed_config: {} clusters: - name: local_service type: STRICT_DNS diff --git a/generated_api_shadow/BUILD b/generated_api_shadow/BUILD index cb40c29c8e407..4b11cc147633a 100644 --- a/generated_api_shadow/BUILD +++ b/generated_api_shadow/BUILD @@ -195,6 +195,7 @@ proto_library( "//envoy/extensions/internal_redirect/safe_cross_scheme/v3:pkg", "//envoy/extensions/matching/common_inputs/environment_variable/v3:pkg", "//envoy/extensions/matching/input_matchers/consistent_hashing/v3:pkg", + "//envoy/extensions/matching/input_matchers/ip/v3:pkg", "//envoy/extensions/network/socket_interface/v3:pkg", "//envoy/extensions/quic/crypto_stream/v3:pkg", "//envoy/extensions/quic/proof_source/v3:pkg", diff --git a/generated_api_shadow/bazel/external_proto_deps.bzl b/generated_api_shadow/bazel/external_proto_deps.bzl index 010eeb145785f..6b11495d3c0dc 100644 --- a/generated_api_shadow/bazel/external_proto_deps.bzl +++ b/generated_api_shadow/bazel/external_proto_deps.bzl @@ -11,7 +11,7 @@ EXTERNAL_PROTO_IMPORT_BAZEL_DEP_MAP = { "google/api/expr/v1alpha1/checked.proto": "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto", "google/api/expr/v1alpha1/syntax.proto": "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", - "metrics.proto": "@prometheus_metrics_model//:client_model", + "io/prometheus/client/metrics.proto": "@prometheus_metrics_model//:client_model", "opencensus/proto/trace/v1/trace.proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_proto", "opencensus/proto/trace/v1/trace_config.proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto", "opentelemetry/proto/common/v1/common.proto": "@opentelemetry_proto//:common", diff --git a/generated_api_shadow/bazel/repositories.bzl b/generated_api_shadow/bazel/repositories.bzl index 7183613a47831..74e19f831179f 100644 --- a/generated_api_shadow/bazel/repositories.bzl +++ b/generated_api_shadow/bazel/repositories.bzl @@ -55,7 +55,7 @@ load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") api_cc_py_proto_library( name = "client_model", srcs = [ - "metrics.proto", + "io/prometheus/client/metrics.proto", ], visibility = ["//visibility:public"], ) diff --git a/generated_api_shadow/bazel/repository_locations.bzl b/generated_api_shadow/bazel/repository_locations.bzl index f2685aaeb014e..968c6a9ffa286 100644 --- a/generated_api_shadow/bazel/repository_locations.bzl +++ b/generated_api_shadow/bazel/repository_locations.bzl @@ -89,9 +89,9 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Prometheus client model", project_desc = "Data model artifacts for Prometheus", project_url = "https://github.com/prometheus/client_model", - version = "0255a22d35ad5661ef7aa89c95fdf5dfd685283f", - sha256 = "a83fd26a80c5f9b82d1231448141a148c1d7a0c8f581ddf49fdbd8c1545e5661", - release_date = "2021-01-16", + version = "147c58e9608a4f9628b53b6cc863325ca746f63a", + sha256 = "f7da30879dcdfae367fa65af1969945c3148cfbfc462b30b7d36f17134675047", + release_date = "2021-06-07", strip_prefix = "client_model-{version}", urls = ["https://github.com/prometheus/client_model/archive/{version}.tar.gz"], use_category = ["api"], diff --git a/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto b/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto index dc3e611b6c1a0..2161f80478c23 100644 --- a/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto +++ b/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto @@ -247,6 +247,7 @@ message ResponseFlagFilter { in: "DT" in: "UPE" in: "NC" + in: "OM" } } }]; diff --git a/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto b/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto index 7559a3b82c79f..3e0c7f53598cc 100644 --- a/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto +++ b/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto @@ -245,6 +245,7 @@ message ResponseFlagFilter { in: "DT" in: "UPE" in: "NC" + in: "OM" } } }]; diff --git a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto index 7ba5e5c8fb436..1a26fe75afd03 100644 --- a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto @@ -44,7 +44,7 @@ message ClusterCollection { } // Configuration for a single upstream cluster. -// [#next-free-field: 54] +// [#next-free-field: 55] message Cluster { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster"; @@ -416,8 +416,8 @@ message Cluster { // The table size for Maglev hashing. The Maglev aims for ‘minimal disruption’ rather than an absolute guarantee. // Minimal disruption means that when the set of upstreams changes, a connection will likely be sent to the same // upstream as it was before. Increasing the table size reduces the amount of disruption. - // The table size must be prime number. If it is not specified, the default is 65537. - google.protobuf.UInt64Value table_size = 1; + // The table size must be prime number limited to 5000011. If it is not specified, the default is 65537. + google.protobuf.UInt64Value table_size = 1 [(validate.rules).uint64 = {lte: 5000011}]; } // Specific configuration for the @@ -877,6 +877,13 @@ message Cluster { // DNS resolution configuration which includes the underlying dns resolver addresses and options. core.v3.DnsResolutionConfig dns_resolution_config = 53; + // Optional configuration for having cluster readiness block on warm-up. Currently, only applicable for + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`. + // If true, cluster readiness blocks on warm-up. If false, the cluster will complete + // initialization whether or not warm-up has completed. Defaults to true. + google.protobuf.BoolValue wait_for_warm_on_init = 54; + // If specified, outlier detection will be enabled for this upstream cluster. // Each of the configuration values can be overridden via // :ref:`runtime values `. @@ -931,7 +938,7 @@ message Cluster { CommonLbConfig common_lb_config = 27; // Optional custom transport socket implementation to use for upstream connections. - // To setup TLS, set a transport socket with name `tls` and + // To setup TLS, set a transport socket with name `envoy.transport_sockets.tls` and // :ref:`UpstreamTlsContexts ` in the `typed_config`. // If no transport socket configuration is specified, new connections // will be set up with plaintext. diff --git a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto index 1ead224bc876f..863a6d48974be 100644 --- a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto @@ -45,7 +45,7 @@ message ClusterCollection { } // Configuration for a single upstream cluster. -// [#next-free-field: 54] +// [#next-free-field: 55] message Cluster { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Cluster"; @@ -418,8 +418,8 @@ message Cluster { // The table size for Maglev hashing. The Maglev aims for ‘minimal disruption’ rather than an absolute guarantee. // Minimal disruption means that when the set of upstreams changes, a connection will likely be sent to the same // upstream as it was before. Increasing the table size reduces the amount of disruption. - // The table size must be prime number. If it is not specified, the default is 65537. - google.protobuf.UInt64Value table_size = 1; + // The table size must be prime number limited to 5000011. If it is not specified, the default is 65537. + google.protobuf.UInt64Value table_size = 1 [(validate.rules).uint64 = {lte: 5000011}]; } // Specific configuration for the @@ -885,6 +885,13 @@ message Cluster { // DNS resolution configuration which includes the underlying dns resolver addresses and options. core.v4alpha.DnsResolutionConfig dns_resolution_config = 53; + // Optional configuration for having cluster readiness block on warm-up. Currently, only applicable for + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`. + // If true, cluster readiness blocks on warm-up. If false, the cluster will complete + // initialization whether or not warm-up has completed. Defaults to true. + google.protobuf.BoolValue wait_for_warm_on_init = 54; + // If specified, outlier detection will be enabled for this upstream cluster. // Each of the configuration values can be overridden via // :ref:`runtime values `. @@ -939,7 +946,7 @@ message Cluster { CommonLbConfig common_lb_config = 27; // Optional custom transport socket implementation to use for upstream connections. - // To setup TLS, set a transport socket with name `tls` and + // To setup TLS, set a transport socket with name `envoy.transport_sockets.tls` and // :ref:`UpstreamTlsContexts ` in the `typed_config`. // If no transport socket configuration is specified, new connections // will be set up with plaintext. diff --git a/generated_api_shadow/envoy/config/listener/v3/listener_components.proto b/generated_api_shadow/envoy/config/listener/v3/listener_components.proto index e8950d13c2c41..5de8b265d8806 100644 --- a/generated_api_shadow/envoy/config/listener/v3/listener_components.proto +++ b/generated_api_shadow/envoy/config/listener/v3/listener_components.proto @@ -67,9 +67,12 @@ message Filter { // 3. Server name (e.g. SNI for TLS protocol), // 4. Transport protocol. // 5. Application protocols (e.g. ALPN for TLS protocol). -// 6. Source type (e.g. any, local or external network). -// 7. Source IP address. -// 8. Source port. +// 6. Directly connected source IP address (this will only be different from the source IP address +// when using a listener filter that overrides the source address, such as the :ref:`Proxy Protocol +// listener filter `). +// 7. Source type (e.g. any, local or external network). +// 8. Source IP address. +// 9. Source port. // // For criteria that allow ranges or wildcards, the most specific value in any // of the configured filter chains that matches the incoming connection is going @@ -93,7 +96,7 @@ message Filter { // listed at the end, because that's how we want to list them in the docs. // // [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules] -// [#next-free-field: 13] +// [#next-free-field: 14] message FilterChainMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.listener.FilterChainMatch"; @@ -127,6 +130,11 @@ message FilterChainMatch { // [#not-implemented-hide:] google.protobuf.UInt32Value suffix_len = 5; + // The criteria is satisfied if the directly connected source IP address of the downstream + // connection is contained in at least one of the specified subnets. If the parameter is not + // specified or the list is empty, the directly connected source IP address is ignored. + repeated core.v3.CidrRange direct_source_prefix_ranges = 13; + // Specifies the connection source IP match type. Can be any, local or external network. ConnectionSourceType source_type = 12 [(validate.rules).enum = {defined_only: true}]; @@ -237,7 +245,7 @@ message FilterChain { core.v3.Metadata metadata = 5; // Optional custom transport socket implementation to use for downstream connections. - // To setup TLS, set a transport socket with name `tls` and + // To setup TLS, set a transport socket with name `envoy.transport_sockets.tls` and // :ref:`DownstreamTlsContext ` in the `typed_config`. // If no transport socket configuration is specified, new connections // will be set up with plaintext. diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/listener_components.proto b/generated_api_shadow/envoy/config/listener/v4alpha/listener_components.proto index a261d36c70cf1..e4db4367d1c6a 100644 --- a/generated_api_shadow/envoy/config/listener/v4alpha/listener_components.proto +++ b/generated_api_shadow/envoy/config/listener/v4alpha/listener_components.proto @@ -64,9 +64,12 @@ message Filter { // 3. Server name (e.g. SNI for TLS protocol), // 4. Transport protocol. // 5. Application protocols (e.g. ALPN for TLS protocol). -// 6. Source type (e.g. any, local or external network). -// 7. Source IP address. -// 8. Source port. +// 6. Directly connected source IP address (this will only be different from the source IP address +// when using a listener filter that overrides the source address, such as the :ref:`Proxy Protocol +// listener filter `). +// 7. Source type (e.g. any, local or external network). +// 8. Source IP address. +// 9. Source port. // // For criteria that allow ranges or wildcards, the most specific value in any // of the configured filter chains that matches the incoming connection is going @@ -90,7 +93,7 @@ message Filter { // listed at the end, because that's how we want to list them in the docs. // // [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules] -// [#next-free-field: 13] +// [#next-free-field: 14] message FilterChainMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.FilterChainMatch"; @@ -124,6 +127,11 @@ message FilterChainMatch { // [#not-implemented-hide:] google.protobuf.UInt32Value suffix_len = 5; + // The criteria is satisfied if the directly connected source IP address of the downstream + // connection is contained in at least one of the specified subnets. If the parameter is not + // specified or the list is empty, the directly connected source IP address is ignored. + repeated core.v4alpha.CidrRange direct_source_prefix_ranges = 13; + // Specifies the connection source IP match type. Can be any, local or external network. ConnectionSourceType source_type = 12 [(validate.rules).enum = {defined_only: true}]; @@ -242,7 +250,7 @@ message FilterChain { core.v4alpha.Metadata metadata = 5; // Optional custom transport socket implementation to use for downstream connections. - // To setup TLS, set a transport socket with name `tls` and + // To setup TLS, set a transport socket with name `envoy.transport_sockets.tls` and // :ref:`DownstreamTlsContext ` in the `typed_config`. // If no transport socket configuration is specified, new connections // will be set up with plaintext. diff --git a/generated_api_shadow/envoy/config/route/v3/route.proto b/generated_api_shadow/envoy/config/route/v3/route.proto index 80956fdeb4e23..e2bf52165be92 100644 --- a/generated_api_shadow/envoy/config/route/v3/route.proto +++ b/generated_api_shadow/envoy/config/route/v3/route.proto @@ -4,6 +4,7 @@ package envoy.config.route.v3; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/config_source.proto"; +import "envoy/config/core/v3/extension.proto"; import "envoy/config/route/v3/route_components.proto"; import "google/protobuf/wrappers.proto"; @@ -21,7 +22,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // * Routing :ref:`architecture overview ` // * HTTP :ref:`router filter ` -// [#next-free-field: 12] +// [#next-free-field: 13] message RouteConfiguration { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.RouteConfiguration"; @@ -119,6 +120,18 @@ message RouteConfiguration { // is not subject to data plane buffering controls. // google.protobuf.UInt32Value max_direct_response_body_size_bytes = 11; + + // [#not-implemented-hide:] + // A list of plugins and their configurations which may be used by a + // :ref:`envoy_v3_api_field_config.route.v3.RouteAction.cluster_specifier_plugin` + // within the route. All *extension.name* fields in this list must be unique. + repeated ClusterSpecifierPlugin cluster_specifier_plugins = 12; +} + +// Configuration for a cluster specifier plugin. +message ClusterSpecifierPlugin { + // The name of the plugin and its opaque configuration. + core.v3.TypedExtensionConfig extension = 1; } message Vhds { diff --git a/generated_api_shadow/envoy/config/route/v3/route_components.proto b/generated_api_shadow/envoy/config/route/v3/route_components.proto index b8f03cde3a9dd..5a55b4d60ac05 100644 --- a/generated_api_shadow/envoy/config/route/v3/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v3/route_components.proto @@ -314,7 +314,7 @@ message Route { message WeightedCluster { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.WeightedCluster"; - // [#next-free-field: 11] + // [#next-free-field: 12] message ClusterWeight { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.WeightedCluster.ClusterWeight"; @@ -380,6 +380,13 @@ message WeightedCluster { // message to specify additional options.] map typed_per_filter_config = 10; + oneof host_rewrite_specifier { + // Indicates that during forwarding, the host header will be swapped with + // this value. + string host_rewrite_literal = 11 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + } + map hidden_envoy_deprecated_per_filter_config = 8 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; } @@ -475,7 +482,7 @@ message RouteMatch { } // Indicates that prefix/path matching should be case sensitive. The default - // is true. + // is true. Ignored for safe_regex matching. google.protobuf.BoolValue case_sensitive = 4; // Indicates that the route should additionally match on a runtime key. Every time the route @@ -586,7 +593,7 @@ message CorsPolicy { ]; } -// [#next-free-field: 37] +// [#next-free-field: 38] message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteAction"; @@ -862,6 +869,14 @@ message RouteAction { // :ref:`traffic splitting ` // for additional documentation. WeightedCluster weighted_clusters = 3; + + // [#not-implemented-hide:] + // Name of the cluster specifier plugin to use to determine the cluster for + // requests on this route. The plugin name must be defined in the associated + // :ref:`envoy_v3_api_field_config.route.v3.RouteConfiguration.cluster_specifier_plugins` + // in the + // :ref:`envoy_v3_api_field_config.core.v3.TypedExtensionConfig.name` field. + string cluster_specifier_plugin = 37; } // The HTTP status code to use when configured cluster is not found. diff --git a/generated_api_shadow/envoy/config/route/v4alpha/route.proto b/generated_api_shadow/envoy/config/route/v4alpha/route.proto index 912fc8051556e..4a19386824821 100644 --- a/generated_api_shadow/envoy/config/route/v4alpha/route.proto +++ b/generated_api_shadow/envoy/config/route/v4alpha/route.proto @@ -4,6 +4,7 @@ package envoy.config.route.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/config_source.proto"; +import "envoy/config/core/v4alpha/extension.proto"; import "envoy/config/route/v4alpha/route_components.proto"; import "google/protobuf/wrappers.proto"; @@ -21,7 +22,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // * Routing :ref:`architecture overview ` // * HTTP :ref:`router filter ` -// [#next-free-field: 12] +// [#next-free-field: 13] message RouteConfiguration { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteConfiguration"; @@ -120,6 +121,21 @@ message RouteConfiguration { // is not subject to data plane buffering controls. // google.protobuf.UInt32Value max_direct_response_body_size_bytes = 11; + + // [#not-implemented-hide:] + // A list of plugins and their configurations which may be used by a + // :ref:`envoy_v3_api_field_config.route.v3.RouteAction.cluster_specifier_plugin` + // within the route. All *extension.name* fields in this list must be unique. + repeated ClusterSpecifierPlugin cluster_specifier_plugins = 12; +} + +// Configuration for a cluster specifier plugin. +message ClusterSpecifierPlugin { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.ClusterSpecifierPlugin"; + + // The name of the plugin and its opaque configuration. + core.v4alpha.TypedExtensionConfig extension = 1; } message Vhds { diff --git a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto index 6b8c146582a33..3bcfa659ac13a 100644 --- a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto @@ -311,7 +311,7 @@ message WeightedCluster { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.WeightedCluster"; - // [#next-free-field: 11] + // [#next-free-field: 12] message ClusterWeight { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.WeightedCluster.ClusterWeight"; @@ -378,6 +378,13 @@ message WeightedCluster { // :ref:`FilterConfig` // message to specify additional options.] map typed_per_filter_config = 10; + + oneof host_rewrite_specifier { + // Indicates that during forwarding, the host header will be swapped with + // this value. + string host_rewrite_literal = 11 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + } } // Specifies one or more upstream clusters associated with the route. @@ -468,7 +475,7 @@ message RouteMatch { } // Indicates that prefix/path matching should be case sensitive. The default - // is true. + // is true. Ignored for safe_regex matching. google.protobuf.BoolValue case_sensitive = 4; // Indicates that the route should additionally match on a runtime key. Every time the route @@ -565,7 +572,7 @@ message CorsPolicy { core.v4alpha.RuntimeFractionalPercent shadow_enabled = 10; } -// [#next-free-field: 37] +// [#next-free-field: 38] message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteAction"; @@ -847,6 +854,14 @@ message RouteAction { // :ref:`traffic splitting ` // for additional documentation. WeightedCluster weighted_clusters = 3; + + // [#not-implemented-hide:] + // Name of the cluster specifier plugin to use to determine the cluster for + // requests on this route. The plugin name must be defined in the associated + // :ref:`envoy_v3_api_field_config.route.v3.RouteConfiguration.cluster_specifier_plugins` + // in the + // :ref:`envoy_v3_api_field_config.core.v3.TypedExtensionConfig.name` field. + string cluster_specifier_plugin = 37; } // The HTTP status code to use when configured cluster is not found. diff --git a/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto b/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto index 98bdd1d6e8322..c53ae0d6ab852 100644 --- a/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto +++ b/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto @@ -186,7 +186,7 @@ message AccessLogCommon { } // Flags indicating occurrences during request/response processing. -// [#next-free-field: 26] +// [#next-free-field: 27] message ResponseFlags { option (udpa.annotations.versioning).previous_message_type = "envoy.data.accesslog.v2.ResponseFlags"; @@ -281,6 +281,9 @@ message ResponseFlags { // Indicates no cluster was found for the request. bool no_cluster_found = 25; + + // Indicates overload manager terminated the request. + bool overload_manager = 26; } // Properties of a negotiated TLS connection. diff --git a/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto b/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto index 79d6752502094..5c35e80d591fd 100644 --- a/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto +++ b/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.extensions.common.dynamic_forward_proxy.v3; import "envoy/config/cluster/v3/cluster.proto"; +import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/resolver.proto"; import "google/protobuf/duration.proto"; @@ -29,7 +30,7 @@ message DnsCacheCircuitBreakers { // Configuration for the dynamic forward proxy DNS cache. See the :ref:`architecture overview // ` for more information. -// [#next-free-field: 10] +// [#next-free-field: 11] message DnsCacheConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.common.dynamic_forward_proxy.v2alpha.DnsCacheConfig"; @@ -108,4 +109,9 @@ message DnsCacheConfig { // DNS resolution configuration which includes the underlying dns resolver addresses and options. config.core.v3.DnsResolutionConfig dns_resolution_config = 9; + + // Hostnames that should be preresolved into the cache upon creation. This might provide a + // performance improvement, in the form of cache hits, for hostnames that are going to be + // resolved during steady state and are known at config load time. + repeated config.core.v3.SocketAddress preresolve_hostnames = 10; } diff --git a/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v4alpha/dns_cache.proto b/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v4alpha/dns_cache.proto index a9040a90dfc20..dde756a1608a2 100644 --- a/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v4alpha/dns_cache.proto +++ b/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v4alpha/dns_cache.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.extensions.common.dynamic_forward_proxy.v4alpha; import "envoy/config/cluster/v4alpha/cluster.proto"; +import "envoy/config/core/v4alpha/address.proto"; import "envoy/config/core/v4alpha/resolver.proto"; import "google/protobuf/duration.proto"; @@ -32,7 +33,7 @@ message DnsCacheCircuitBreakers { // Configuration for the dynamic forward proxy DNS cache. See the :ref:`architecture overview // ` for more information. -// [#next-free-field: 10] +// [#next-free-field: 11] message DnsCacheConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.common.dynamic_forward_proxy.v3.DnsCacheConfig"; @@ -111,4 +112,9 @@ message DnsCacheConfig { // DNS resolution configuration which includes the underlying dns resolver addresses and options. config.core.v4alpha.DnsResolutionConfig dns_resolution_config = 9; + + // Hostnames that should be preresolved into the cache upon creation. This might provide a + // performance improvement, in the form of cache hits, for hostnames that are going to be + // resolved during steady state and are known at config load time. + repeated config.core.v4alpha.SocketAddress preresolve_hostnames = 10; } diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto index ee9be861fc073..d86b53ad6f537 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto @@ -218,18 +218,21 @@ message AuthorizationRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.ext_authz.v2.AuthorizationRequest"; - // Authorization request will include the client request headers that have a correspondent match - // in the :ref:`list `. Note that in addition to the - // user's supplied matchers: + // Authorization request includes the client request headers that have a correspondent match + // in the :ref:`list `. // - // 1. *Host*, *Method*, *Path* and *Content-Length* are automatically included to the list. + // .. note:: + // + // In addition to the the user's supplied matchers, ``Host``, ``Method``, ``Path``, + // ``Content-Length``, and ``Authorization`` are **automatically included** to the list. + // + // .. note:: // - // 2. *Content-Length* will be set to 0 and the request to the authorization service will not have - // a message body. However, the authorization request can include the buffered client request body - // (controlled by :ref:`with_request_body - // ` setting), - // consequently the value of *Content-Length* of the authorization request reflects the size of - // its payload size. + // By default, ``Content-Length`` header is set to ``0`` and the request to the authorization + // service has no message body. However, the authorization request *may* include the buffered + // client request body (controlled by :ref:`with_request_body + // ` + // setting) hence the value of its ``Content-Length`` reflects the size of its payload size. // type.matcher.v3.ListStringMatcher allowed_headers = 1; diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto index 90f003b0a137c..07114e041ff04 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto @@ -215,18 +215,21 @@ message AuthorizationRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.ext_authz.v3.AuthorizationRequest"; - // Authorization request will include the client request headers that have a correspondent match - // in the :ref:`list `. Note that in addition to the - // user's supplied matchers: + // Authorization request includes the client request headers that have a correspondent match + // in the :ref:`list `. // - // 1. *Host*, *Method*, *Path* and *Content-Length* are automatically included to the list. + // .. note:: + // + // In addition to the the user's supplied matchers, ``Host``, ``Method``, ``Path``, + // ``Content-Length``, and ``Authorization`` are **automatically included** to the list. + // + // .. note:: // - // 2. *Content-Length* will be set to 0 and the request to the authorization service will not have - // a message body. However, the authorization request can include the buffered client request body - // (controlled by :ref:`with_request_body - // ` setting), - // consequently the value of *Content-Length* of the authorization request reflects the size of - // its payload size. + // By default, ``Content-Length`` header is set to ``0`` and the request to the authorization + // service has no message body. However, the authorization request *may* include the buffered + // client request body (controlled by :ref:`with_request_body + // ` + // setting) hence the value of its ``Content-Length`` reflects the size of its payload size. // type.matcher.v4alpha.ListStringMatcher allowed_headers = 1; diff --git a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto index afc761c07c7e1..a79e3382d6334 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto @@ -52,7 +52,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // cache_duration: // seconds: 300 // -// [#next-free-field: 11] +// [#next-free-field: 12] message JwtProvider { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.jwt_authn.v2alpha.JwtProvider"; @@ -190,6 +190,15 @@ message JwtProvider { string forward_payload_header = 8 [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; + // When :ref:`forward_payload_header ` + // is specified, the base64 encoded payload will be added to the headers. + // Normally JWT based64 encode doesn't add padding. If this field is true, + // the header will be padded. + // + // This field is only relevant if :ref:`forward_payload_header ` + // is specified. + bool pad_forward_payload_header = 11; + // If non empty, successfully verified JWT payloads will be written to StreamInfo DynamicMetadata // in the format as: *namespace* is the jwt_authn filter name as **envoy.filters.http.jwt_authn** // The value is the *protobuf::Struct*. The value of this field will be the key for its *fields* diff --git a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto index 442ba7df061ee..82f6bef04eae4 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto @@ -52,7 +52,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // cache_duration: // seconds: 300 // -// [#next-free-field: 11] +// [#next-free-field: 12] message JwtProvider { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.jwt_authn.v3.JwtProvider"; @@ -190,6 +190,15 @@ message JwtProvider { string forward_payload_header = 8 [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; + // When :ref:`forward_payload_header ` + // is specified, the base64 encoded payload will be added to the headers. + // Normally JWT based64 encode doesn't add padding. If this field is true, + // the header will be padded. + // + // This field is only relevant if :ref:`forward_payload_header ` + // is specified. + bool pad_forward_payload_header = 11; + // If non empty, successfully verified JWT payloads will be written to StreamInfo DynamicMetadata // in the format as: *namespace* is the jwt_authn filter name as **envoy.filters.http.jwt_authn** // The value is the *protobuf::Struct*. The value of this field will be the key for its *fields* diff --git a/generated_api_shadow/envoy/extensions/matching/input_matchers/ip/v3/BUILD b/generated_api_shadow/envoy/extensions/matching/input_matchers/ip/v3/BUILD new file mode 100644 index 0000000000000..1c1a6f6b44235 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/matching/input_matchers/ip/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/matching/input_matchers/ip/v3/ip.proto b/generated_api_shadow/envoy/extensions/matching/input_matchers/ip/v3/ip.proto new file mode 100644 index 0000000000000..3c7cb4eb5f19a --- /dev/null +++ b/generated_api_shadow/envoy/extensions/matching/input_matchers/ip/v3/ip.proto @@ -0,0 +1,38 @@ +syntax = "proto3"; + +package envoy.extensions.matching.input_matchers.ip.v3; + +import "envoy/config/core/v3/address.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.matching.input_matchers.ip.v3"; +option java_outer_classname = "IpProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: IP matcher] +// [#extension: envoy.matching.input_matchers.ip] + +// This input matcher matches IPv4 or IPv6 addresses against a list of CIDR +// ranges. It returns true if and only if the input IP belongs to at least one +// of these CIDR ranges. Internally, it uses a Level-Compressed trie, as +// described in the paper `IP-address lookup using LC-tries +// `_ +// by S. Nilsson and G. Karlsson. For "big" lists of IPs, this matcher is more +// efficient than multiple single IP matcher, that would have a linear cost. +message Ip { + // Match if the IP belongs to any of these CIDR ranges. + repeated config.core.v3.CidrRange cidr_ranges = 1 [(validate.rules).repeated = {min_items: 1}]; + + // The human readable prefix to use when emitting statistics for the IP input + // matcher. Names in the table below are concatenated to this prefix. + // + // .. csv-table:: + // :header: Name, Type, Description + // :widths: 1, 1, 2 + // + // ip_parsing_failed, Counter, Total number of IP addresses the matcher was unable to parse + string stat_prefix = 2 [(validate.rules).string = {min_len: 1}]; +} diff --git a/generated_api_shadow/envoy/extensions/wasm/v3/wasm.proto b/generated_api_shadow/envoy/extensions/wasm/v3/wasm.proto index 35af0cf690c20..b4566c826ed08 100644 --- a/generated_api_shadow/envoy/extensions/wasm/v3/wasm.proto +++ b/generated_api_shadow/envoy/extensions/wasm/v3/wasm.proto @@ -77,6 +77,7 @@ message VmConfig { // **envoy.wasm.runtime.wasmtime**: `Wasmtime `_-based WebAssembly runtime. // This runtime is not enabled in the official build. // + // [#extension-category: envoy.wasm.runtime] string runtime = 2 [(validate.rules).string = {min_len: 1}]; // The Wasm code that Envoy will execute. @@ -86,7 +87,6 @@ message VmConfig { // (proxy_on_start). `google.protobuf.Struct` is serialized as JSON before // passing it to the plugin. `google.protobuf.BytesValue` and // `google.protobuf.StringValue` are passed directly without the wrapper. - // [#extension-category: envoy.wasm.runtime] google.protobuf.Any configuration = 4; // Allow the wasm file to include pre-compiled code on VMs which support it. diff --git a/generated_api_shadow/envoy/service/metrics/v2/metrics_service.proto b/generated_api_shadow/envoy/service/metrics/v2/metrics_service.proto index aa5e703850155..78d6e47e20ab1 100644 --- a/generated_api_shadow/envoy/service/metrics/v2/metrics_service.proto +++ b/generated_api_shadow/envoy/service/metrics/v2/metrics_service.proto @@ -4,7 +4,7 @@ package envoy.service.metrics.v2; import "envoy/api/v2/core/base.proto"; -import "metrics.proto"; +import "io/prometheus/client/metrics.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; diff --git a/generated_api_shadow/envoy/service/metrics/v3/metrics_service.proto b/generated_api_shadow/envoy/service/metrics/v3/metrics_service.proto index 033c168c32ba1..e86bda356f7d2 100644 --- a/generated_api_shadow/envoy/service/metrics/v3/metrics_service.proto +++ b/generated_api_shadow/envoy/service/metrics/v3/metrics_service.proto @@ -4,7 +4,7 @@ package envoy.service.metrics.v3; import "envoy/config/core/v3/base.proto"; -import "metrics.proto"; +import "io/prometheus/client/metrics.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; diff --git a/generated_api_shadow/envoy/service/metrics/v4alpha/metrics_service.proto b/generated_api_shadow/envoy/service/metrics/v4alpha/metrics_service.proto index d4f2378d35f32..5e1412f103e93 100644 --- a/generated_api_shadow/envoy/service/metrics/v4alpha/metrics_service.proto +++ b/generated_api_shadow/envoy/service/metrics/v4alpha/metrics_service.proto @@ -4,7 +4,7 @@ package envoy.service.metrics.v4alpha; import "envoy/config/core/v4alpha/base.proto"; -import "metrics.proto"; +import "io/prometheus/client/metrics.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; diff --git a/source/common/buffer/watermark_buffer.h b/source/common/buffer/watermark_buffer.h index 3a432c76481c2..6afc4d8602323 100644 --- a/source/common/buffer/watermark_buffer.h +++ b/source/common/buffer/watermark_buffer.h @@ -75,9 +75,9 @@ using WatermarkBufferPtr = std::unique_ptr; class WatermarkBufferFactory : public WatermarkFactory { public: // Buffer::WatermarkFactory - InstancePtr create(std::function below_low_watermark, - std::function above_high_watermark, - std::function above_overflow_watermark) override { + InstancePtr createBuffer(std::function below_low_watermark, + std::function above_high_watermark, + std::function above_overflow_watermark) override { return std::make_unique(below_low_watermark, above_high_watermark, above_overflow_watermark); } diff --git a/source/common/common/BUILD b/source/common/common/BUILD index 6fd4fd29cce63..5614a214ba5a8 100644 --- a/source/common/common/BUILD +++ b/source/common/common/BUILD @@ -415,10 +415,14 @@ envoy_cc_library( envoy_cc_library( name = "callback_impl_lib", + srcs = ["callback_impl.cc"], hdrs = ["callback_impl.h"], deps = [ ":assert_lib", + ":lock_guard_lib", + ":thread_lib", "//envoy/common:callback", + "//source/common/event:dispatcher_lib", ], ) diff --git a/source/common/common/base64.cc b/source/common/common/base64.cc index 8d0f02e090c4d..9eab86b1c48bc 100644 --- a/source/common/common/base64.cc +++ b/source/common/common/base64.cc @@ -7,6 +7,7 @@ #include "source/common/common/empty_string.h" #include "absl/container/fixed_array.h" +#include "absl/strings/str_cat.h" namespace Envoy { namespace { @@ -234,6 +235,13 @@ std::string Base64::encode(const char* input, uint64_t length, bool add_padding) return ret; } +void Base64::completePadding(std::string& encoded) { + if (encoded.length() % 4 != 0) { + std::string trailing_padding(4 - encoded.length() % 4, '='); + absl::StrAppend(&encoded, trailing_padding); + } +} + std::string Base64Url::decode(const std::string& input) { if (input.empty()) { return EMPTY_STRING; diff --git a/source/common/common/base64.h b/source/common/common/base64.h index 13beff40b64ab..a69ffbf910a3b 100644 --- a/source/common/common/base64.h +++ b/source/common/common/base64.h @@ -54,6 +54,12 @@ class Base64 { * bytes. */ static std::string decodeWithoutPadding(absl::string_view input); + + /** + * Add the padding in the base64 encoded binary if the padding is missing. + * @param encoded is the target to complete the padding. + */ + static void completePadding(std::string& encoded); }; /** diff --git a/source/common/common/callback_impl.cc b/source/common/common/callback_impl.cc new file mode 100644 index 0000000000000..00e4348575576 --- /dev/null +++ b/source/common/common/callback_impl.cc @@ -0,0 +1,56 @@ +#include "source/common/common/callback_impl.h" + +namespace Envoy { +namespace Common { + +CallbackHandlePtr ThreadSafeCallbackManager::add(Event::Dispatcher& dispatcher, Callback callback) { + Thread::LockGuard lock(lock_); + auto new_callback = std::make_unique(shared_from_this(), callback, dispatcher); + callbacks_.push_back(CallbackListEntry(new_callback.get(), dispatcher, + std::weak_ptr(new_callback->still_alive_))); + // Get the list iterator of added callback handle, which will be used to remove itself from + // callbacks_ list. + new_callback->it_ = (--callbacks_.end()); + return new_callback; +} + +void ThreadSafeCallbackManager::runCallbacks() { + Thread::LockGuard lock(lock_); + for (auto it = callbacks_.cbegin(); it != callbacks_.cend();) { + auto& [cb, cb_dispatcher, still_alive] = *(it++); + + cb_dispatcher.post([cb = cb, still_alive = still_alive] { + // Once we're running on the thread that scheduled the callback, validate the + // callback is still valid and execute. Even though 'expired()' is racy, because + // we are on the scheduling thread, this should not race with destruction. + if (!still_alive.expired()) { + cb->cb_(); + } + }); + } +} + +size_t ThreadSafeCallbackManager::size() const noexcept { + Thread::LockGuard lock(lock_); + return callbacks_.size(); +} + +void ThreadSafeCallbackManager::remove(typename std::list::iterator& it) { + Thread::LockGuard lock(lock_); + callbacks_.erase(it); +} + +ThreadSafeCallbackManager::CallbackHolder::CallbackHolder( + std::shared_ptr parent, Callback cb, + Event::Dispatcher& cb_dispatcher) + : parent_(parent), cb_(cb), callback_dispatcher_(cb_dispatcher) {} + +ThreadSafeCallbackManager::CallbackHolder::~CallbackHolder() { + // Validate that destruction of the callback is happening on the same thread in which it was + // intended to be executed. + ASSERT(callback_dispatcher_.isThreadSafe()); + parent_->remove(it_); +} + +} // namespace Common +} // namespace Envoy diff --git a/source/common/common/callback_impl.h b/source/common/common/callback_impl.h index 367aa05591ecb..5c36f478c0970 100644 --- a/source/common/common/callback_impl.h +++ b/source/common/common/callback_impl.h @@ -2,16 +2,24 @@ #include #include +#include +#include #include "envoy/common/callback.h" +#include "envoy/event/dispatcher.h" +#include "envoy/thread/thread.h" #include "source/common/common/assert.h" +#include "source/common/common/lock_guard.h" +#include "source/common/common/thread.h" namespace Envoy { namespace Common { /** * Utility class for managing callbacks. + * + * @see ThreadSafeCallbackManager for dealing with callbacks across multiple threads */ template class CallbackManager { public: @@ -45,6 +53,23 @@ template class CallbackManager { } } + /** + * @brief Run all callbacks with a function that returns the input arguments + * + * NOTE: This code is currently safe if a callback deletes ITSELF from within a callback. It is + * not safe if a callback deletes other callbacks. + * @param run_with function that is responsible for generating inputs to callbacks. This will be + * executed once for each callback. + */ + void runCallbacksWith(std::function(void)> run_with) { + for (auto it = callbacks_.cbegin(); it != callbacks_.cend();) { + auto cb = *(it++); + std::apply(cb->cb_, run_with()); + } + } + + size_t size() const noexcept { return callbacks_.size(); } + private: struct CallbackHolder : public CallbackHandle { CallbackHolder(CallbackManager& parent, Callback cb) @@ -81,5 +106,72 @@ template class CallbackManager { const std::shared_ptr still_alive_{std::make_shared(true)}; }; +/** + * @brief Utility class for managing callbacks across multiple threads. + * + * @see CallbackManager for a non-thread-safe version + */ +class ThreadSafeCallbackManager : public std::enable_shared_from_this { + struct CallbackHolder; + using CallbackListEntry = std::tuple>; + +public: + using Callback = std::function; + + /** + * @brief Create a ThreadSafeCallbackManager + * + * @note The ThreadSafeCallbackManager must always be represented as a std::shared_ptr in + * order to satisfy internal conditions to how callbacks are managed. + */ + static std::shared_ptr create() { + return std::shared_ptr(new ThreadSafeCallbackManager()); + } + + /** + * @brief Add a callback. + * @param dispatcher Dispatcher from the same thread as the registered callback. This will be used + * to schedule the execution of the callback. + * @param callback callback to add + * @return Handle that can be used to remove the callback. + */ + ABSL_MUST_USE_RESULT CallbackHandlePtr add(Event::Dispatcher& dispatcher, Callback callback); + + /** + * @brief Run all callbacks + */ + void runCallbacks(); + + size_t size() const noexcept; + +private: + struct CallbackHolder : public CallbackHandle { + CallbackHolder(std::shared_ptr parent, Callback cb, + Event::Dispatcher& cb_dispatcher); + + ~CallbackHolder() override; + + std::shared_ptr parent_; + Callback cb_; + Event::Dispatcher& callback_dispatcher_; + std::shared_ptr still_alive_{std::make_shared(true)}; + + typename std::list::iterator it_; + }; + + ThreadSafeCallbackManager() = default; + + /** + * Remove a member update callback added via add(). + * @param handle supplies the callback handle to remove. + */ + void remove(typename std::list::iterator& it); + + // This must be held on all read/writes of callbacks_ + mutable Thread::MutexBasicLockable lock_{}; + + std::list callbacks_ ABSL_GUARDED_BY(lock_); +}; + } // namespace Common } // namespace Envoy diff --git a/source/common/common/matchers.h b/source/common/common/matchers.h index 396203ce5b50d..bdc284f2d480f 100644 --- a/source/common/common/matchers.h +++ b/source/common/common/matchers.h @@ -76,6 +76,11 @@ class DoubleMatcher : public ValueMatcher { const envoy::type::matcher::v3::DoubleMatcher matcher_; }; +class UniversalStringMatcher : public StringMatcher { +public: + bool match(absl::string_view) const override { return true; } +}; + class StringMatcherImpl : public ValueMatcher, public StringMatcher { public: explicit StringMatcherImpl(const envoy::type::matcher::v3::StringMatcher& matcher); diff --git a/source/common/common/regex.cc b/source/common/common/regex.cc index deeef41aa12bf..6b2e0050b487b 100644 --- a/source/common/common/regex.cc +++ b/source/common/common/regex.cc @@ -15,32 +15,6 @@ namespace Envoy { namespace Regex { namespace { -class CompiledStdMatcher : public CompiledMatcher { -public: - CompiledStdMatcher(std::regex&& regex) : regex_(std::move(regex)) {} - - // CompiledMatcher - bool match(absl::string_view value) const override { - try { - return std::regex_match(value.begin(), value.end(), regex_); - } catch (const std::regex_error& e) { - return false; - } - } - - // CompiledMatcher - std::string replaceAll(absl::string_view value, absl::string_view substitution) const override { - try { - return std::regex_replace(std::string(value), regex_, std::string(substitution)); - } catch (const std::regex_error& e) { - return std::string(value); - } - } - -private: - const std::regex regex_; -}; - class CompiledGoogleReMatcher : public CompiledMatcher { public: CompiledGoogleReMatcher(const envoy::type::matcher::v3::RegexMatcher& config) @@ -127,11 +101,6 @@ CompiledMatcherPtr Utility::parseRegex(const envoy::type::matcher::v3::RegexMatc return std::make_unique(matcher); } -CompiledMatcherPtr Utility::parseStdRegexAsCompiledMatcher(const std::string& regex, - std::regex::flag_type flags) { - return std::make_unique(parseStdRegex(regex, flags)); -} - std::regex Utility::parseStdRegex(const std::string& regex, std::regex::flag_type flags) { // TODO(zuercher): In the future, PGV (https://github.com/envoyproxy/protoc-gen-validate) // annotations may allow us to remove this in favor of direct validation of regular diff --git a/source/common/common/regex.h b/source/common/common/regex.h index 2fdcd52ebc1ce..e360cab01b96b 100644 --- a/source/common/common/regex.h +++ b/source/common/common/regex.h @@ -26,16 +26,6 @@ class Utility { static std::regex parseStdRegex(const std::string& regex, std::regex::flag_type flags = std::regex::optimize); - /** - * Construct an std::regex compiled regex matcher. - * - * TODO(mattklein123): In general this is only currently used in deprecated code paths and can be - * removed once all of those code paths are removed. - */ - static CompiledMatcherPtr - parseStdRegexAsCompiledMatcher(const std::string& regex, - std::regex::flag_type flags = std::regex::optimize); - /** * Construct a compiled regex matcher from a match config. */ diff --git a/source/common/common/thread.cc b/source/common/common/thread.cc index e6ee78c8e5208..282858399000b 100644 --- a/source/common/common/thread.cc +++ b/source/common/common/thread.cc @@ -13,6 +13,16 @@ bool MainThread::isMainThread() { return main_thread_singleton->inMainThread() || main_thread_singleton->inTestThread(); } +bool MainThread::isWorkerThread() { + auto main_thread_singleton = MainThreadSingleton::getExisting(); + // Allow worker thread code to be executed in test thread. + if (main_thread_singleton == nullptr) { + return true; + } + // When threading is on, compare thread id with main thread id. + return !main_thread_singleton->inMainThread(); +} + void MainThread::clear() { delete MainThreadSingleton::getExisting(); MainThreadSingleton::clear(); diff --git a/source/common/common/thread.h b/source/common/common/thread.h index 1ade49a3d1b93..347df89c9fab1 100644 --- a/source/common/common/thread.h +++ b/source/common/common/thread.h @@ -194,6 +194,7 @@ struct MainThread { */ static void clear(); static bool isMainThread(); + static bool isWorkerThread(); private: std::thread::id main_thread_id_; diff --git a/source/common/config/config_provider_impl.cc b/source/common/config/config_provider_impl.cc index 172c5305ee57d..b25115b40314d 100644 --- a/source/common/config/config_provider_impl.cc +++ b/source/common/config/config_provider_impl.cc @@ -52,8 +52,9 @@ bool ConfigSubscriptionInstance::checkAndApplyConfigUpdate(const Protobuf::Messa ConfigProviderManagerImplBase::ConfigProviderManagerImplBase(Server::Admin& admin, const std::string& config_name) { - config_tracker_entry_ = - admin.getConfigTracker().add(config_name, [this] { return dumpConfigs(); }); + config_tracker_entry_ = admin.getConfigTracker().add( + config_name, + [this](const Matchers::StringMatcher& name_matcher) { return dumpConfigs(name_matcher); }); // ConfigTracker keys must be unique. We are asserting that no one has stolen the key // from us, since the returned entry will be nullptr if the key already exists. RELEASE_ASSERT(config_tracker_entry_, ""); diff --git a/source/common/config/config_provider_impl.h b/source/common/config/config_provider_impl.h index 8db998ebab884..7cbdfd4955128 100644 --- a/source/common/config/config_provider_impl.h +++ b/source/common/config/config_provider_impl.h @@ -383,7 +383,8 @@ class ConfigProviderManagerImplBase : public ConfigProviderManager, public Singl * @return ProtobufTypes::MessagePtr the config dump proto corresponding to the associated * config providers. */ - virtual ProtobufTypes::MessagePtr dumpConfigs() const PURE; + virtual ProtobufTypes::MessagePtr + dumpConfigs(const Matchers::StringMatcher& name_matcher) const PURE; protected: // Ordered set for deterministic config dump output. diff --git a/source/common/config/subscription_factory_impl.cc b/source/common/config/subscription_factory_impl.cc index eadf0dd3bfda0..2da2fc83032b8 100644 --- a/source/common/config/subscription_factory_impl.cc +++ b/source/common/config/subscription_factory_impl.cc @@ -29,7 +29,6 @@ SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( Stats::Scope& scope, SubscriptionCallbacks& callbacks, OpaqueResourceDecoder& resource_decoder, const SubscriptionOptions& options) { Config::Utility::checkLocalInfo(type_url, local_info_); - std::unique_ptr result; SubscriptionStats stats = Utility::generateStats(scope); switch (config.config_source_specifier_case()) { @@ -103,7 +102,6 @@ SubscriptionPtr SubscriptionFactoryImpl::collectionSubscriptionFromUrl( const envoy::config::core::v3::ConfigSource& config, absl::string_view resource_type, Stats::Scope& scope, SubscriptionCallbacks& callbacks, OpaqueResourceDecoder& resource_decoder) { - std::unique_ptr result; SubscriptionStats stats = Utility::generateStats(scope); switch (collection_locator.scheme()) { diff --git a/source/common/config/utility.cc b/source/common/config/utility.cc index d1743deeb042c..d6d526b792be1 100644 --- a/source/common/config/utility.cc +++ b/source/common/config/utility.cc @@ -44,12 +44,8 @@ void Utility::translateApiConfigSource( envoy::config::core::v3::GrpcService* grpc_service = api_config_source.add_grpc_services(); grpc_service->mutable_envoy_grpc()->set_cluster_name(cluster); } else { - if (api_type == ApiType::get().UnsupportedRestLegacy) { - api_config_source.set_api_type(envoy::config::core::v3::ApiConfigSource:: - hidden_envoy_deprecated_UNSUPPORTED_REST_LEGACY); - } else if (api_type == ApiType::get().Rest) { - api_config_source.set_api_type(envoy::config::core::v3::ApiConfigSource::REST); - } + ASSERT(api_type == ApiType::get().Rest); + api_config_source.set_api_type(envoy::config::core::v3::ApiConfigSource::REST); api_config_source.add_cluster_names(cluster); } diff --git a/source/common/conn_pool/conn_pool_base.cc b/source/common/conn_pool/conn_pool_base.cc index 198c08c33648d..c6276446f4a9d 100644 --- a/source/common/conn_pool/conn_pool_base.cc +++ b/source/common/conn_pool/conn_pool_base.cc @@ -21,7 +21,7 @@ namespace { ConnPoolImplBase::ConnPoolImplBase( Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options, + const Network::TransportSocketOptionsConstSharedPtr& transport_socket_options, Upstream::ClusterConnectivityState& state) : state_(state), host_(host), priority_(priority), dispatcher_(dispatcher), socket_options_(options), transport_socket_options_(transport_socket_options), diff --git a/source/common/conn_pool/conn_pool_base.h b/source/common/conn_pool/conn_pool_base.h index 322c7967912c3..f4822b7e77f64 100644 --- a/source/common/conn_pool/conn_pool_base.h +++ b/source/common/conn_pool/conn_pool_base.h @@ -140,7 +140,7 @@ class ConnPoolImplBase : protected Logger::Loggable { ConnPoolImplBase(Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options, + const Network::TransportSocketOptionsConstSharedPtr& transport_socket_options, Upstream::ClusterConnectivityState& state); virtual ~ConnPoolImplBase(); @@ -216,7 +216,7 @@ class ConnPoolImplBase : protected Logger::Loggable { Event::Dispatcher& dispatcher() { return dispatcher_; } Upstream::ResourcePriority priority() const { return priority_; } const Network::ConnectionSocket::OptionsSharedPtr& socketOptions() { return socket_options_; } - const Network::TransportSocketOptionsSharedPtr& transportSocketOptions() { + const Network::TransportSocketOptionsConstSharedPtr& transportSocketOptions() { return transport_socket_options_; } bool hasPendingStreams() const { return !pending_streams_.empty(); } @@ -297,7 +297,7 @@ class ConnPoolImplBase : protected Logger::Loggable { Event::Dispatcher& dispatcher_; const Network::ConnectionSocket::OptionsSharedPtr socket_options_; - const Network::TransportSocketOptionsSharedPtr transport_socket_options_; + const Network::TransportSocketOptionsConstSharedPtr transport_socket_options_; std::list drained_callbacks_; diff --git a/source/common/formatter/substitution_formatter.cc b/source/common/formatter/substitution_formatter.cc index 76261d9f882ac..c8419263f367c 100644 --- a/source/common/formatter/substitution_formatter.cc +++ b/source/common/formatter/substitution_formatter.cc @@ -820,8 +820,8 @@ StreamInfoFormatter::StreamInfoFormatter(const std::string& field_name) { field_extractor_ = std::make_unique( [](const StreamInfo::StreamInfo& stream_info) { absl::optional result; - if (!stream_info.requestedServerName().empty()) { - result = stream_info.requestedServerName(); + if (!stream_info.downstreamAddressProvider().requestedServerName().empty()) { + result = std::string(stream_info.downstreamAddressProvider().requestedServerName()); } return result; }); diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 12c29a1e0cbda..e792b2735d269 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -687,9 +687,6 @@ ConnectionManagerImpl::ActiveStream::ActiveStream(ConnectionManagerImpl& connect max_stream_duration_timer_->enableTimer(connection_manager_.config_.maxStreamDuration().value(), this); } - - filter_manager_.streamInfo().setRequestedServerName( - connection_manager_.read_callbacks_->connection().requestedServerName()); } void ConnectionManagerImpl::ActiveStream::completeRequest() { diff --git a/source/common/http/conn_pool_base.cc b/source/common/http/conn_pool_base.cc index e071614787b34..601295b8f9665 100644 --- a/source/common/http/conn_pool_base.cc +++ b/source/common/http/conn_pool_base.cc @@ -10,8 +10,8 @@ namespace Envoy { namespace Http { -Network::TransportSocketOptionsSharedPtr -wrapTransportSocketOptions(Network::TransportSocketOptionsSharedPtr transport_socket_options, +Network::TransportSocketOptionsConstSharedPtr +wrapTransportSocketOptions(Network::TransportSocketOptionsConstSharedPtr transport_socket_options, std::vector protocols) { std::vector fallbacks; for (auto protocol : protocols) { @@ -44,7 +44,7 @@ wrapTransportSocketOptions(Network::TransportSocketOptionsSharedPtr transport_so HttpConnPoolImplBase::HttpConnPoolImplBase( Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options, + const Network::TransportSocketOptionsConstSharedPtr& transport_socket_options, Random::RandomGenerator& random_generator, Upstream::ClusterConnectivityState& state, std::vector protocols) : Envoy::ConnectionPool::ConnPoolImplBase( @@ -154,6 +154,7 @@ void MultiplexedActiveClientBase::onStreamReset(Http::StreamResetReason reason) break; case StreamResetReason::LocalReset: case StreamResetReason::ProtocolError: + case StreamResetReason::OverloadManager: parent_.host()->cluster().stats().upstream_rq_tx_reset_.inc(); break; case StreamResetReason::RemoteReset: diff --git a/source/common/http/conn_pool_base.h b/source/common/http/conn_pool_base.h index 0be7cf2fc7c19..0d72454786204 100644 --- a/source/common/http/conn_pool_base.h +++ b/source/common/http/conn_pool_base.h @@ -47,13 +47,12 @@ class ActiveClient; class HttpConnPoolImplBase : public Envoy::ConnectionPool::ConnPoolImplBase, public Http::ConnectionPool::Instance { public: - HttpConnPoolImplBase(Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, - Event::Dispatcher& dispatcher, - const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options, - Random::RandomGenerator& random_generator, - Upstream::ClusterConnectivityState& state, - std::vector protocols); + HttpConnPoolImplBase( + Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, + Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsConstSharedPtr& transport_socket_options, + Random::RandomGenerator& random_generator, Upstream::ClusterConnectivityState& state, + std::vector protocols); ~HttpConnPoolImplBase() override; // ConnectionPool::Instance @@ -143,13 +142,12 @@ class FixedHttpConnPoolImpl : public HttpConnPoolImplBase { using CreateCodecFn = std::function; - FixedHttpConnPoolImpl(Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, - Event::Dispatcher& dispatcher, - const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options, - Random::RandomGenerator& random_generator, - Upstream::ClusterConnectivityState& state, CreateClientFn client_fn, - CreateCodecFn codec_fn, std::vector protocols) + FixedHttpConnPoolImpl( + Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, + Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsConstSharedPtr& transport_socket_options, + Random::RandomGenerator& random_generator, Upstream::ClusterConnectivityState& state, + CreateClientFn client_fn, CreateCodecFn codec_fn, std::vector protocols) : HttpConnPoolImplBase(host, priority, dispatcher, options, transport_socket_options, random_generator, state, protocols), codec_fn_(codec_fn), client_fn_(client_fn), protocol_(protocols[0]) { diff --git a/source/common/http/conn_pool_grid.cc b/source/common/http/conn_pool_grid.cc index 5938c0d1ed342..28ce8d1c89c30 100644 --- a/source/common/http/conn_pool_grid.cc +++ b/source/common/http/conn_pool_grid.cc @@ -190,7 +190,7 @@ ConnectivityGrid::ConnectivityGrid( Event::Dispatcher& dispatcher, Random::RandomGenerator& random_generator, Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options, + const Network::TransportSocketOptionsConstSharedPtr& transport_socket_options, Upstream::ClusterConnectivityState& state, TimeSource& time_source, AlternateProtocolsCacheSharedPtr alternate_protocols, std::chrono::milliseconds next_attempt_duration, ConnectivityOptions connectivity_options) diff --git a/source/common/http/conn_pool_grid.h b/source/common/http/conn_pool_grid.h index 3c1b74ed74ea0..5adf47dd4f7c3 100644 --- a/source/common/http/conn_pool_grid.h +++ b/source/common/http/conn_pool_grid.h @@ -130,7 +130,7 @@ class ConnectivityGrid : public ConnectionPool::Instance, ConnectivityGrid(Event::Dispatcher& dispatcher, Random::RandomGenerator& random_generator, Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options, + const Network::TransportSocketOptionsConstSharedPtr& transport_socket_options, Upstream::ClusterConnectivityState& state, TimeSource& time_source, AlternateProtocolsCacheSharedPtr alternate_protocols, std::chrono::milliseconds next_attempt_duration, @@ -186,7 +186,7 @@ class ConnectivityGrid : public ConnectionPool::Instance, Upstream::HostConstSharedPtr host_; Upstream::ResourcePriority priority_; const Network::ConnectionSocket::OptionsSharedPtr options_; - const Network::TransportSocketOptionsSharedPtr transport_socket_options_; + const Network::TransportSocketOptionsConstSharedPtr transport_socket_options_; Upstream::ClusterConnectivityState& state_; std::chrono::milliseconds next_attempt_duration_; TimeSource& time_source_; diff --git a/source/common/http/filter_manager.cc b/source/common/http/filter_manager.cc index a68e3207c7a3e..8f855eba2495d 100644 --- a/source/common/http/filter_manager.cc +++ b/source/common/http/filter_manager.cc @@ -298,7 +298,7 @@ bool ActiveStreamDecoderFilter::canContinue() { } Buffer::InstancePtr ActiveStreamDecoderFilter::createBuffer() { - auto buffer = dispatcher().getWatermarkFactory().create( + auto buffer = dispatcher().getWatermarkFactory().createBuffer( [this]() -> void { this->requestDataDrained(); }, [this]() -> void { this->requestDataTooLarge(); }, []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }); @@ -1459,7 +1459,7 @@ absl::optional ActiveStreamDecoderFilter::routeCon } Buffer::InstancePtr ActiveStreamEncoderFilter::createBuffer() { - auto buffer = dispatcher().getWatermarkFactory().create( + auto buffer = dispatcher().getWatermarkFactory().createBuffer( [this]() -> void { this->responseDataDrained(); }, [this]() -> void { this->responseDataTooLarge(); }, []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }); diff --git a/source/common/http/filter_manager.h b/source/common/http/filter_manager.h index 1e826f1df5d1c..008baa50bcd81 100644 --- a/source/common/http/filter_manager.h +++ b/source/common/http/filter_manager.h @@ -618,7 +618,9 @@ class OverridableRemoteSocketAddressSetterStreamInfo : public StreamInfo::Stream const Network::Address::InstanceConstSharedPtr& directRemoteAddress() const override { return StreamInfoImpl::downstreamAddressProvider().directRemoteAddress(); } - + absl::string_view requestedServerName() const override { + return StreamInfoImpl::downstreamAddressProvider().requestedServerName(); + } void dumpState(std::ostream& os, int indent_level) const override { StreamInfoImpl::dumpState(os, indent_level); diff --git a/source/common/http/header_map_impl.cc b/source/common/http/header_map_impl.cc index ed84303497cc1..ee6956d049d9b 100644 --- a/source/common/http/header_map_impl.cc +++ b/source/common/http/header_map_impl.cc @@ -40,6 +40,12 @@ InlineHeaderVector& getInVec(VariantHeader& buffer) { const InlineHeaderVector& getInVec(const VariantHeader& buffer) { return absl::get(buffer); } + +bool validatedLowerCaseString(absl::string_view str) { + auto lower_case_str = LowerCaseString(str); + return lower_case_str == str; +} + } // namespace // Initialize as a Type::Inline @@ -469,13 +475,13 @@ HeaderMap::GetResult HeaderMapImpl::get(const LowerCaseString& key) const { return HeaderMap::GetResult(const_cast(this)->getExisting(key)); } -HeaderMap::NonConstGetResult HeaderMapImpl::getExisting(const LowerCaseString& key) { +HeaderMap::NonConstGetResult HeaderMapImpl::getExisting(absl::string_view key) { // Attempt a trie lookup first to see if the user is requesting an O(1) header. This may be // relatively common in certain header matching / routing patterns. // TODO(mattklein123): Add inline handle support directly to the header matcher code to support // this use case more directly. HeaderMap::NonConstGetResult ret; - auto lookup = staticLookup(key.get()); + auto lookup = staticLookup(key); if (lookup.has_value()) { if (*lookup.value().entry_ != nullptr) { ret.push_back(*lookup.value().entry_); @@ -486,7 +492,7 @@ HeaderMap::NonConstGetResult HeaderMapImpl::getExisting(const LowerCaseString& k // If the requested header is not an O(1) header try using the lazy map to // search for it instead of iterating the headers list. if (headers_.maybeMakeMap()) { - HeaderList::HeaderLazyMap::iterator iter = headers_.mapFind(key.get()); + HeaderList::HeaderLazyMap::iterator iter = headers_.mapFind(key); if (iter != headers_.mapEnd()) { const HeaderList::HeaderNodeVector& v = iter->second; ASSERT(!v.empty()); // It's impossible to have a map entry with an empty vector as its value. @@ -502,7 +508,7 @@ HeaderMap::NonConstGetResult HeaderMapImpl::getExisting(const LowerCaseString& k // scan. Doing the trie lookup is wasteful in the miss case, but is present for code consistency // with other functions that do similar things. for (HeaderEntryImpl& header : headers_) { - if (header.key() == key.get().c_str()) { + if (header.key() == key) { ret.push_back(&header); } } @@ -556,16 +562,7 @@ size_t HeaderMapImpl::removeIf(const HeaderMap::HeaderMatchPredicate& predicate) return old_size - headers_.size(); } -size_t HeaderMapImpl::remove(const LowerCaseString& key) { - const size_t old_size = headers_.size(); - auto lookup = staticLookup(key.get()); - if (lookup.has_value()) { - removeInline(lookup.value().entry_); - } else { - subtractSize(headers_.remove(key.get())); - } - return old_size - headers_.size(); -} +size_t HeaderMapImpl::remove(const LowerCaseString& key) { return removeExisting(key); } size_t HeaderMapImpl::removePrefix(const LowerCaseString& prefix) { return HeaderMapImpl::removeIf([&prefix](const HeaderEntry& entry) -> bool { @@ -610,6 +607,17 @@ HeaderMapImpl::HeaderEntryImpl& HeaderMapImpl::maybeCreateInline(HeaderEntryImpl return **entry; } +size_t HeaderMapImpl::removeExisting(absl::string_view key) { + const size_t old_size = headers_.size(); + auto lookup = staticLookup(key); + if (lookup.has_value()) { + removeInline(lookup.value().entry_); + } else { + subtractSize(headers_.remove(key)); + } + return old_size - headers_.size(); +} + size_t HeaderMapImpl::removeInline(HeaderEntryImpl** ptr_to_entry) { if (!*ptr_to_entry) { return 0; @@ -650,5 +658,46 @@ HeaderMapImplUtility::getAllHeaderMapImplInfo() { return ret; } +absl::optional +RequestHeaderMapImpl::getTraceContext(absl::string_view key) const { + ASSERT(validatedLowerCaseString(key)); + auto result = const_cast(this)->getExisting(key); + + if (result.empty()) { + return absl::nullopt; + } + return result[0]->value().getStringView(); +} + +void RequestHeaderMapImpl::setTraceContext(absl::string_view key, absl::string_view val) { + ASSERT(validatedLowerCaseString(key)); + HeaderMapImpl::removeExisting(key); + + HeaderString new_key; + new_key.setCopy(key); + HeaderString new_val; + new_val.setCopy(val); + + HeaderMapImpl::insertByKey(std::move(new_key), std::move(new_val)); +} + +void RequestHeaderMapImpl::setTraceContextReferenceKey(absl::string_view key, + absl::string_view val) { + ASSERT(validatedLowerCaseString(key)); + HeaderMapImpl::removeExisting(key); + + HeaderString new_val; + new_val.setCopy(val); + + HeaderMapImpl::insertByKey(HeaderString(key), std::move(new_val)); +} + +void RequestHeaderMapImpl::setTraceContextReference(absl::string_view key, absl::string_view val) { + ASSERT(validatedLowerCaseString(key)); + HeaderMapImpl::removeExisting(key); + + HeaderMapImpl::insertByKey(HeaderString(key), HeaderString(val)); +} + } // namespace Http } // namespace Envoy diff --git a/source/common/http/header_map_impl.h b/source/common/http/header_map_impl.h index e146e1b4c2bfc..42a9bfde4a548 100644 --- a/source/common/http/header_map_impl.h +++ b/source/common/http/header_map_impl.h @@ -322,7 +322,10 @@ class HeaderMapImpl : NonCopyable { HeaderEntryImpl& maybeCreateInline(HeaderEntryImpl** entry, const LowerCaseString& key); HeaderEntryImpl& maybeCreateInline(HeaderEntryImpl** entry, const LowerCaseString& key, HeaderString&& value); - HeaderMap::NonConstGetResult getExisting(const LowerCaseString& key); + + HeaderMap::NonConstGetResult getExisting(absl::string_view key); + size_t removeExisting(absl::string_view key); + size_t removeInline(HeaderEntryImpl** entry); void updateSize(uint64_t from_size, uint64_t to_size); void addSize(uint64_t size); @@ -480,6 +483,12 @@ class RequestHeaderMapImpl final : public TypedHeaderMapImpl, INLINE_REQ_RESP_STRING_HEADERS(DEFINE_INLINE_HEADER_STRING_FUNCS) INLINE_REQ_RESP_NUMERIC_HEADERS(DEFINE_INLINE_HEADER_NUMERIC_FUNCS) + // Tracing::TraceContext + absl::optional getTraceContext(absl::string_view key) const override; + void setTraceContext(absl::string_view key, absl::string_view val) override; + void setTraceContextReferenceKey(absl::string_view key, absl::string_view val) override; + void setTraceContextReference(absl::string_view key, absl::string_view val) override; + protected: // NOTE: Because inline_headers_ is a variable size member, it must be the last member in the // most derived class. This forces the definition of the following three functions to also be diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index 01ff8b69f4399..d26328488f4d5 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -469,7 +469,7 @@ ConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stat "envoy.reloadable_features.send_strict_1xx_and_204_response_headers")), dispatching_(false), no_chunked_encoding_header_for_304_(Runtime::runtimeFeatureEnabled( "envoy.reloadable_features.no_chunked_encoding_header_for_304")), - output_buffer_(connection.dispatcher().getWatermarkFactory().create( + output_buffer_(connection.dispatcher().getWatermarkFactory().createBuffer( [&]() -> void { this->onBelowLowWatermark(); }, [&]() -> void { this->onAboveHighWatermark(); }, []() -> void { /* TODO(adisuissa): Handle overflow watermark */ })), diff --git a/source/common/http/http1/conn_pool.cc b/source/common/http/http1/conn_pool.cc index 9928b08819f4b..c7e9af0970108 100644 --- a/source/common/http/http1/conn_pool.cc +++ b/source/common/http/http1/conn_pool.cc @@ -103,7 +103,7 @@ ConnectionPool::InstancePtr allocateConnPool(Event::Dispatcher& dispatcher, Random::RandomGenerator& random_generator, Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options, + const Network::TransportSocketOptionsConstSharedPtr& transport_socket_options, Upstream::ClusterConnectivityState& state) { return std::make_unique( std::move(host), std::move(priority), dispatcher, options, transport_socket_options, diff --git a/source/common/http/http1/conn_pool.h b/source/common/http/http1/conn_pool.h index e6c975ecb39a9..235f98e94e44c 100644 --- a/source/common/http/http1/conn_pool.h +++ b/source/common/http/http1/conn_pool.h @@ -73,7 +73,7 @@ ConnectionPool::InstancePtr allocateConnPool(Event::Dispatcher& dispatcher, Random::RandomGenerator& random_generator, Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options, + const Network::TransportSocketOptionsConstSharedPtr& transport_socket_options, Upstream::ClusterConnectivityState& state); } // namespace Http1 diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index 3e46ce24206b8..0ae50eb547d27 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -131,11 +131,11 @@ template static T* removeConst(const void* object) { ConnectionImpl::StreamImpl::StreamImpl(ConnectionImpl& parent, uint32_t buffer_limit) : parent_(parent), - pending_recv_data_(parent_.connection_.dispatcher().getWatermarkFactory().create( + pending_recv_data_(parent_.connection_.dispatcher().getWatermarkFactory().createBuffer( [this]() -> void { this->pendingRecvBufferLowWatermark(); }, [this]() -> void { this->pendingRecvBufferHighWatermark(); }, []() -> void { /* TODO(adisuissa): Handle overflow watermark */ })), - pending_send_data_(parent_.connection_.dispatcher().getWatermarkFactory().create( + pending_send_data_(parent_.connection_.dispatcher().getWatermarkFactory().createBuffer( [this]() -> void { this->pendingSendBufferLowWatermark(); }, [this]() -> void { this->pendingSendBufferHighWatermark(); }, []() -> void { /* TODO(adisuissa): Handle overflow watermark */ })), @@ -1545,8 +1545,9 @@ void ConnectionImpl::StreamImpl::dumpState(std::ostream& os, int indent_level) c const char* spaces = spacesForLevel(indent_level); os << spaces << "ConnectionImpl::StreamImpl " << this << DUMP_MEMBER(stream_id_) << DUMP_MEMBER(unconsumed_bytes_) << DUMP_MEMBER(read_disable_count_) - << DUMP_MEMBER(local_end_stream_sent_) << DUMP_MEMBER(remote_end_stream_) - << DUMP_MEMBER(data_deferred_) << DUMP_MEMBER(received_noninformational_headers_) + << DUMP_MEMBER(local_end_stream_) << DUMP_MEMBER(local_end_stream_sent_) + << DUMP_MEMBER(remote_end_stream_) << DUMP_MEMBER(data_deferred_) + << DUMP_MEMBER(received_noninformational_headers_) << DUMP_MEMBER(pending_receive_buffer_high_watermark_called_) << DUMP_MEMBER(pending_send_buffer_high_watermark_called_) << DUMP_MEMBER(reset_due_to_messaging_error_) diff --git a/source/common/http/http2/conn_pool.cc b/source/common/http/http2/conn_pool.cc index 50cb7778b8bfc..3985c6bfe7527 100644 --- a/source/common/http/http2/conn_pool.cc +++ b/source/common/http/http2/conn_pool.cc @@ -27,7 +27,7 @@ ConnectionPool::InstancePtr allocateConnPool(Event::Dispatcher& dispatcher, Random::RandomGenerator& random_generator, Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options, + const Network::TransportSocketOptionsConstSharedPtr& transport_socket_options, Upstream::ClusterConnectivityState& state) { return std::make_unique( host, priority, dispatcher, options, transport_socket_options, random_generator, state, diff --git a/source/common/http/http2/conn_pool.h b/source/common/http/http2/conn_pool.h index 9d96c494873ca..f3cc7cb027704 100644 --- a/source/common/http/http2/conn_pool.h +++ b/source/common/http/http2/conn_pool.h @@ -26,7 +26,7 @@ ConnectionPool::InstancePtr allocateConnPool(Event::Dispatcher& dispatcher, Random::RandomGenerator& random_generator, Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options, + const Network::TransportSocketOptionsConstSharedPtr& transport_socket_options, Upstream::ClusterConnectivityState& state); } // namespace Http2 diff --git a/source/common/http/http3/codec_stats.h b/source/common/http/http3/codec_stats.h index d8044922cbdbf..47b69965896e3 100644 --- a/source/common/http/http3/codec_stats.h +++ b/source/common/http/http3/codec_stats.h @@ -17,7 +17,13 @@ namespace Http3 { COUNTER(requests_rejected_with_underscores_in_headers) \ COUNTER(rx_reset) \ COUNTER(tx_reset) \ - COUNTER(metadata_not_supported_error) + COUNTER(metadata_not_supported_error) \ + COUNTER(quic_version_43) \ + COUNTER(quic_version_46) \ + COUNTER(quic_version_50) \ + COUNTER(quic_version_51) \ + COUNTER(quic_version_h3_29) \ + COUNTER(quic_version_rfc_v1) /** * Wrapper struct for the HTTP/3 codec stats. @see stats_macros.h diff --git a/source/common/http/http3/conn_pool.cc b/source/common/http/http3/conn_pool.cc index 0c361b7a45886..ff7f39f21feeb 100644 --- a/source/common/http/http3/conn_pool.cc +++ b/source/common/http/http3/conn_pool.cc @@ -32,7 +32,7 @@ void Http3ConnPoolImpl::setQuicConfigFromClusterConfig(const Upstream::ClusterIn Http3ConnPoolImpl::Http3ConnPoolImpl( Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options, + const Network::TransportSocketOptionsConstSharedPtr& transport_socket_options, Random::RandomGenerator& random_generator, Upstream::ClusterConnectivityState& state, CreateClientFn client_fn, CreateCodecFn codec_fn, std::vector protocol, TimeSource& time_source) @@ -58,7 +58,7 @@ ConnectionPool::InstancePtr allocateConnPool(Event::Dispatcher& dispatcher, Random::RandomGenerator& random_generator, Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options, + const Network::TransportSocketOptionsConstSharedPtr& transport_socket_options, Upstream::ClusterConnectivityState& state, TimeSource& time_source) { return std::make_unique( host, priority, dispatcher, options, transport_socket_options, random_generator, state, diff --git a/source/common/http/http3/conn_pool.h b/source/common/http/http3/conn_pool.h index 7ab19f1f4a663..3eaf625609db6 100644 --- a/source/common/http/http3/conn_pool.h +++ b/source/common/http/http3/conn_pool.h @@ -40,7 +40,7 @@ class Http3ConnPoolImpl : public FixedHttpConnPoolImpl { Http3ConnPoolImpl(Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options, + const Network::TransportSocketOptionsConstSharedPtr& transport_socket_options, Random::RandomGenerator& random_generator, Upstream::ClusterConnectivityState& state, CreateClientFn client_fn, CreateCodecFn codec_fn, std::vector protocol, @@ -65,7 +65,7 @@ ConnectionPool::InstancePtr allocateConnPool(Event::Dispatcher& dispatcher, Random::RandomGenerator& random_generator, Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options, + const Network::TransportSocketOptionsConstSharedPtr& transport_socket_options, Upstream::ClusterConnectivityState& state, TimeSource& time_source); } // namespace Http3 diff --git a/source/common/http/mixed_conn_pool.h b/source/common/http/mixed_conn_pool.h index 4528aeb6eacd6..db0b44cbd4770 100644 --- a/source/common/http/mixed_conn_pool.h +++ b/source/common/http/mixed_conn_pool.h @@ -8,11 +8,12 @@ namespace Http { // An HTTP connection pool which supports both HTTP/1 and HTTP/2 based on ALPN class HttpConnPoolImplMixed : public HttpConnPoolImplBase { public: - HttpConnPoolImplMixed(Event::Dispatcher& dispatcher, Random::RandomGenerator& random_generator, - Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, - const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options, - Upstream::ClusterConnectivityState& state) + HttpConnPoolImplMixed( + Event::Dispatcher& dispatcher, Random::RandomGenerator& random_generator, + Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, + const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsConstSharedPtr& transport_socket_options, + Upstream::ClusterConnectivityState& state) : HttpConnPoolImplBase(std::move(host), std::move(priority), dispatcher, options, transport_socket_options, random_generator, state, {Protocol::Http2, Protocol::Http11}) {} diff --git a/source/common/http/utility.cc b/source/common/http/utility.cc index b8b3d9f0e1b16..50d2123b5a835 100644 --- a/source/common/http/utility.cc +++ b/source/common/http/utility.cc @@ -847,6 +847,8 @@ const std::string Utility::resetReasonToString(const Http::StreamResetReason res return "remote error with CONNECT request"; case Http::StreamResetReason::ProtocolError: return "protocol error"; + case Http::StreamResetReason::OverloadManager: + return "overload manager reset"; } NOT_REACHED_GCOVR_EXCL_LINE; diff --git a/source/common/network/BUILD b/source/common/network/BUILD index 650edfa192ea4..529e25ad6c05d 100644 --- a/source/common/network/BUILD +++ b/source/common/network/BUILD @@ -18,6 +18,8 @@ envoy_cc_library( "//source/common/api:os_sys_calls_lib", "//source/common/common:assert_lib", "//source/common/common:safe_memcpy_lib", + "//source/common/common:statusor_lib", + "//source/common/common:thread_lib", "//source/common/common:utility_lib", ], ) diff --git a/source/common/network/address_impl.cc b/source/common/network/address_impl.cc index be1d06a491148..e612505d83369 100644 --- a/source/common/network/address_impl.cc +++ b/source/common/network/address_impl.cc @@ -10,6 +10,7 @@ #include "source/common/common/assert.h" #include "source/common/common/fmt.h" #include "source/common/common/safe_memcpy.h" +#include "source/common/common/thread.h" #include "source/common/common/utility.h" #include "source/common/network/socket_interface.h" @@ -19,26 +20,6 @@ namespace Address { namespace { -// Validate that IPv4 is supported on this platform, raise an exception for the -// given address if not. -void validateIpv4Supported(const std::string& address) { - static const bool supported = SocketInterfaceSingleton::get().ipFamilySupported(AF_INET); - if (!supported) { - throw EnvoyException( - fmt::format("IPv4 addresses are not supported on this machine: {}", address)); - } -} - -// Validate that IPv6 is supported on this platform, raise an exception for the -// given address if not. -void validateIpv6Supported(const std::string& address) { - static const bool supported = SocketInterfaceSingleton::get().ipFamilySupported(AF_INET6); - if (!supported) { - throw EnvoyException( - fmt::format("IPv6 addresses are not supported on this machine: {}", address)); - } -} - // Constructs a readable string with the embedded nulls in the abstract path replaced with '@'. std::string friendlyNameFromAbstractPath(absl::string_view path) { std::string friendly_name(path.data(), path.size()); @@ -50,17 +31,30 @@ const SocketInterface* sockInterfaceOrDefault(const SocketInterface* sock_interf return sock_interface == nullptr ? &SocketInterfaceSingleton::get() : sock_interface; } +void throwOnError(absl::Status status) { + if (!status.ok()) { + throw EnvoyException(status.ToString()); + } +} + +InstanceConstSharedPtr throwOnError(StatusOr address) { + if (!address.ok()) { + throwOnError(address.status()); + } + return *address; +} + } // namespace -Address::InstanceConstSharedPtr addressFromSockAddr(const sockaddr_storage& ss, socklen_t ss_len, - bool v6only) { +StatusOr addressFromSockAddr(const sockaddr_storage& ss, + socklen_t ss_len, bool v6only) { RELEASE_ASSERT(ss_len == 0 || static_cast(ss_len) >= sizeof(sa_family_t), ""); switch (ss.ss_family) { case AF_INET: { RELEASE_ASSERT(ss_len == 0 || static_cast(ss_len) == sizeof(sockaddr_in), ""); const struct sockaddr_in* sin = reinterpret_cast(&ss); ASSERT(AF_INET == sin->sin_family); - return std::make_shared(sin); + return Address::InstanceFactory::createInstancePtr(sin); } case AF_INET6: { RELEASE_ASSERT(ss_len == 0 || static_cast(ss_len) == sizeof(sockaddr_in6), ""); @@ -77,9 +71,9 @@ Address::InstanceConstSharedPtr addressFromSockAddr(const sockaddr_storage& ss, #else struct sockaddr_in sin = {AF_INET, sin6->sin6_port, {sin6->sin6_addr.s6_addr32[3]}, {}}; #endif - return std::make_shared(&sin); + return Address::InstanceFactory::createInstancePtr(&sin); } else { - return std::make_shared(*sin6, v6only); + return Address::InstanceFactory::createInstancePtr(*sin6, v6only); } } case AF_UNIX: { @@ -88,27 +82,47 @@ Address::InstanceConstSharedPtr addressFromSockAddr(const sockaddr_storage& ss, RELEASE_ASSERT(ss_len == 0 || static_cast(ss_len) >= offsetof(struct sockaddr_un, sun_path) + 1, ""); - return std::make_shared(sun, ss_len); + return Address::InstanceFactory::createInstancePtr(sun, ss_len); } default: - throw EnvoyException(fmt::format("Unexpected sockaddr family: {}", ss.ss_family)); + return absl::InvalidArgumentError(fmt::format("Unexpected sockaddr family: {}", ss.ss_family)); } NOT_REACHED_GCOVR_EXCL_LINE; } +Address::InstanceConstSharedPtr addressFromSockAddrOrThrow(const sockaddr_storage& ss, + socklen_t ss_len, bool v6only) { + // Though we don't have any test coverage where address validation in addressFromSockAddr() fails, + // this code is called in worker thread and can throw in theory. In that case, the program will + // crash due to uncaught exception. In practice, we don't expect any address validation in + // addressFromSockAddr() to fail in worker thread. + StatusOr address = addressFromSockAddr(ss, ss_len, v6only); + return throwOnError(address); +} + +Address::InstanceConstSharedPtr +addressFromSockAddrOrDie(const sockaddr_storage& ss, socklen_t ss_len, os_fd_t fd, bool v6only) { + // Set v6only to false so that mapped-v6 address can be normalize to v4 + // address. Though dual stack may be disabled, it's still okay to assume the + // address is from a dual stack socket. This is because mapped-v6 address + // must come from a dual stack socket. An actual v6 address can come from + // both dual stack socket and v6 only socket. If |peer_addr| is an actual v6 + // address and the socket is actually v6 only, the returned address will be + // regarded as a v6 address from dual stack socket. However, this address is not going to be + // used to create socket. Wrong knowledge of dual stack support won't hurt. + ASSERT(Thread::MainThread::isWorkerThread()); + StatusOr address = + Address::addressFromSockAddr(ss, ss_len, v6only); + if (!address.ok()) { + PANIC(fmt::format("Invalid address for fd: {}, error: {}", fd, address.status().ToString())); + } + return *address; +} + Ipv4Instance::Ipv4Instance(const sockaddr_in* address, const SocketInterface* sock_interface) : InstanceBase(Type::Ip, sockInterfaceOrDefault(sock_interface)) { - memset(&ip_.ipv4_.address_, 0, sizeof(ip_.ipv4_.address_)); - ip_.ipv4_.address_ = *address; - ip_.friendly_address_ = sockaddrToString(*address); - - // Based on benchmark testing, this reserve+append implementation runs faster than absl::StrCat. - fmt::format_int port(ntohs(address->sin_port)); - friendly_name_.reserve(ip_.friendly_address_.size() + 1 + port.size()); - friendly_name_.append(ip_.friendly_address_); - friendly_name_.push_back(':'); - friendly_name_.append(port.data(), port.size()); - validateIpv4Supported(friendly_name_); + throwOnError(validateProtocolSupported()); + initHelper(address); } Ipv4Instance::Ipv4Instance(const std::string& address, const SocketInterface* sock_interface) @@ -117,6 +131,7 @@ Ipv4Instance::Ipv4Instance(const std::string& address, const SocketInterface* so Ipv4Instance::Ipv4Instance(const std::string& address, uint32_t port, const SocketInterface* sock_interface) : InstanceBase(Type::Ip, sockInterfaceOrDefault(sock_interface)) { + throwOnError(validateProtocolSupported()); memset(&ip_.ipv4_.address_, 0, sizeof(ip_.ipv4_.address_)); ip_.ipv4_.address_.sin_family = AF_INET; ip_.ipv4_.address_.sin_port = htons(port); @@ -126,21 +141,30 @@ Ipv4Instance::Ipv4Instance(const std::string& address, uint32_t port, } friendly_name_ = absl::StrCat(address, ":", port); - validateIpv4Supported(friendly_name_); ip_.friendly_address_ = address; } Ipv4Instance::Ipv4Instance(uint32_t port, const SocketInterface* sock_interface) : InstanceBase(Type::Ip, sockInterfaceOrDefault(sock_interface)) { + throwOnError(validateProtocolSupported()); memset(&ip_.ipv4_.address_, 0, sizeof(ip_.ipv4_.address_)); ip_.ipv4_.address_.sin_family = AF_INET; ip_.ipv4_.address_.sin_port = htons(port); ip_.ipv4_.address_.sin_addr.s_addr = INADDR_ANY; friendly_name_ = absl::StrCat("0.0.0.0:", port); - validateIpv4Supported(friendly_name_); ip_.friendly_address_ = "0.0.0.0"; } +Ipv4Instance::Ipv4Instance(absl::Status& status, const sockaddr_in* address, + const SocketInterface* sock_interface) + : InstanceBase(Type::Ip, sockInterfaceOrDefault(sock_interface)) { + status = validateProtocolSupported(); + if (!status.ok()) { + return; + } + initHelper(address); +} + bool Ipv4Instance::operator==(const Instance& rhs) const { const Ipv4Instance* rhs_casted = dynamic_cast(&rhs); return (rhs_casted && (ip_.ipv4_.address() == rhs_casted->ip_.ipv4_.address()) && @@ -173,6 +197,27 @@ std::string Ipv4Instance::sockaddrToString(const sockaddr_in& addr) { return std::string(start, str + BufferSize - start); } +absl::Status Ipv4Instance::validateProtocolSupported() { + static const bool supported = SocketInterfaceSingleton::get().ipFamilySupported(AF_INET); + if (supported) { + return absl::OkStatus(); + } + return absl::FailedPreconditionError("IPv4 addresses are not supported on this machine"); +} + +void Ipv4Instance::initHelper(const sockaddr_in* address) { + memset(&ip_.ipv4_.address_, 0, sizeof(ip_.ipv4_.address_)); + ip_.ipv4_.address_ = *address; + ip_.friendly_address_ = sockaddrToString(*address); + + // Based on benchmark testing, this reserve+append implementation runs faster than absl::StrCat. + fmt::format_int port(ntohs(address->sin_port)); + friendly_name_.reserve(ip_.friendly_address_.size() + 1 + port.size()); + friendly_name_.append(ip_.friendly_address_); + friendly_name_.push_back(':'); + friendly_name_.append(port.data(), port.size()); +} + absl::uint128 Ipv6Instance::Ipv6Helper::address() const { absl::uint128 result{0}; static_assert(sizeof(absl::uint128) == 16, "The size of asbl::uint128 is not 16."); @@ -194,11 +239,8 @@ std::string Ipv6Instance::Ipv6Helper::makeFriendlyAddress() const { Ipv6Instance::Ipv6Instance(const sockaddr_in6& address, bool v6only, const SocketInterface* sock_interface) : InstanceBase(Type::Ip, sockInterfaceOrDefault(sock_interface)) { - ip_.ipv6_.address_ = address; - ip_.friendly_address_ = ip_.ipv6_.makeFriendlyAddress(); - ip_.ipv6_.v6only_ = v6only; - friendly_name_ = fmt::format("[{}]:{}", ip_.friendly_address_, ip_.port()); - validateIpv6Supported(friendly_name_); + throwOnError(validateProtocolSupported()); + initHelper(address, v6only); } Ipv6Instance::Ipv6Instance(const std::string& address, const SocketInterface* sock_interface) @@ -207,6 +249,7 @@ Ipv6Instance::Ipv6Instance(const std::string& address, const SocketInterface* so Ipv6Instance::Ipv6Instance(const std::string& address, uint32_t port, const SocketInterface* sock_interface) : InstanceBase(Type::Ip, sockInterfaceOrDefault(sock_interface)) { + throwOnError(validateProtocolSupported()); ip_.ipv6_.address_.sin6_family = AF_INET6; ip_.ipv6_.address_.sin6_port = htons(port); if (!address.empty()) { @@ -219,7 +262,6 @@ Ipv6Instance::Ipv6Instance(const std::string& address, uint32_t port, // Just in case address is in a non-canonical format, format from network address. ip_.friendly_address_ = ip_.ipv6_.makeFriendlyAddress(); friendly_name_ = fmt::format("[{}]:{}", ip_.friendly_address_, ip_.port()); - validateIpv6Supported(friendly_name_); } Ipv6Instance::Ipv6Instance(uint32_t port, const SocketInterface* sock_interface) @@ -231,6 +273,31 @@ bool Ipv6Instance::operator==(const Instance& rhs) const { (ip_.port() == rhs_casted->ip_.port())); } +Ipv6Instance::Ipv6Instance(absl::Status& status, const sockaddr_in6& address, bool v6only, + const SocketInterface* sock_interface) + : InstanceBase(Type::Ip, sockInterfaceOrDefault(sock_interface)) { + status = validateProtocolSupported(); + if (!status.ok()) { + return; + } + initHelper(address, v6only); +} + +absl::Status Ipv6Instance::validateProtocolSupported() { + static const bool supported = SocketInterfaceSingleton::get().ipFamilySupported(AF_INET6); + if (supported) { + return absl::OkStatus(); + } + return absl::FailedPreconditionError("IPv6 addresses are not supported on this machine"); +} + +void Ipv6Instance::initHelper(const sockaddr_in6& address, bool v6only) { + ip_.ipv6_.address_ = address; + ip_.friendly_address_ = ip_.ipv6_.makeFriendlyAddress(); + ip_.ipv6_.v6only_ = v6only; + friendly_name_ = fmt::format("[{}]:{}", ip_.friendly_address_, ip_.port()); +} + PipeInstance::PipeInstance(const sockaddr_un* address, socklen_t ss_len, mode_t mode, const SocketInterface* sock_interface) : InstanceBase(Type::Pipe, sockInterfaceOrDefault(sock_interface)) { @@ -243,18 +310,8 @@ PipeInstance::PipeInstance(const sockaddr_un* address, socklen_t ss_len, mode_t pipe_.abstract_namespace_ = true; pipe_.address_length_ = ss_len - offsetof(struct sockaddr_un, sun_path); } - pipe_.address_ = *address; - if (pipe_.abstract_namespace_) { - if (mode != 0) { - throw EnvoyException("Cannot set mode for Abstract AF_UNIX sockets"); - } - // Replace all null characters with '@' in friendly_name_. - friendly_name_ = friendlyNameFromAbstractPath( - absl::string_view(pipe_.address_.sun_path, pipe_.address_length_)); - } else { - friendly_name_ = address->sun_path; - } - pipe_.mode_ = mode; + absl::Status status = initHelper(address, mode); + throwOnError(status); } PipeInstance::PipeInstance(const std::string& pipe_path, mode_t mode, @@ -300,8 +357,40 @@ PipeInstance::PipeInstance(const std::string& pipe_path, mode_t mode, pipe_.mode_ = mode; } +PipeInstance::PipeInstance(absl::Status& error, const sockaddr_un* address, socklen_t ss_len, + mode_t mode, const SocketInterface* sock_interface) + : InstanceBase(Type::Pipe, sockInterfaceOrDefault(sock_interface)) { + if (address->sun_path[0] == '\0') { +#if !defined(__linux__) + error = absl::FailedPreconditionError("Abstract AF_UNIX sockets are only supported on linux."); + return; +#endif + RELEASE_ASSERT(static_cast(ss_len) >= offsetof(struct sockaddr_un, sun_path) + 1, + ""); + pipe_.abstract_namespace_ = true; + pipe_.address_length_ = ss_len - offsetof(struct sockaddr_un, sun_path); + } + error = initHelper(address, mode); +} + bool PipeInstance::operator==(const Instance& rhs) const { return asString() == rhs.asString(); } +absl::Status PipeInstance::initHelper(const sockaddr_un* address, mode_t mode) { + pipe_.address_ = *address; + if (pipe_.abstract_namespace_) { + if (mode != 0) { + return absl::FailedPreconditionError("Cannot set mode for Abstract AF_UNIX sockets"); + } + // Replace all null characters with '@' in friendly_name_. + friendly_name_ = friendlyNameFromAbstractPath( + absl::string_view(pipe_.address_.sun_path, pipe_.address_length_)); + } else { + friendly_name_ = address->sun_path; + } + pipe_.mode_ = mode; + return absl::OkStatus(); +} + EnvoyInternalInstance::EnvoyInternalInstance(const std::string& address_id, const SocketInterface* sock_interface) : InstanceBase(Type::EnvoyInternal, sockInterfaceOrDefault(sock_interface)), diff --git a/source/common/network/address_impl.h b/source/common/network/address_impl.h index a158ca63e4fca..2ade85fc89984 100644 --- a/source/common/network/address_impl.h +++ b/source/common/network/address_impl.h @@ -11,6 +11,7 @@ #include "envoy/network/socket.h" #include "source/common/common/assert.h" +#include "source/common/common/statusor.h" namespace Envoy { namespace Network { @@ -25,8 +26,23 @@ namespace Address { * @param v6only disable IPv4-IPv6 mapping for IPv6 addresses? * @return InstanceConstSharedPtr the address. */ -InstanceConstSharedPtr addressFromSockAddr(const sockaddr_storage& ss, socklen_t len, - bool v6only = true); +StatusOr addressFromSockAddr(const sockaddr_storage& ss, socklen_t len, + bool v6only = true); +InstanceConstSharedPtr addressFromSockAddrOrThrow(const sockaddr_storage& ss, socklen_t len, + bool v6only = true); + +/** + * Convert an address in the form of the socket address struct defined by Posix, Linux, etc. into + * a Network::Address::Instance and return a pointer to it. Die on failure. + * @param ss a valid address with family AF_INET, AF_INET6 or AF_UNIX. + * @param len length of the address (e.g. from accept, getsockname or getpeername). If len > 0, + * it is used to validate the structure contents; else if len == 0, it is ignored. + * @param fd the file descriptor for the created address instance. + * @param v6only disable IPv4-IPv6 mapping for IPv6 addresses? + * @return InstanceConstSharedPtr the address. + */ +InstanceConstSharedPtr addressFromSockAddrOrDie(const sockaddr_storage& ss, socklen_t ss_len, + os_fd_t fd, bool v6only = true); /** * Base class for all address types. @@ -53,6 +69,22 @@ class InstanceBase : public Instance { const Type type_; }; +// Create an address instance. Upon failure, return an error status without throwing. +class InstanceFactory { +public: + template + static StatusOr createInstancePtr(Args&&... args) { + absl::Status status; + // Use new instead of make_shared here because the instance constructors are private and must be + // called directly here. + std::shared_ptr instance(new InstanceType(status, std::forward(args)...)); + if (!status.ok()) { + return status; + } + return instance; + } +}; + /** * Implementation of an IPv4 address. */ @@ -100,7 +132,20 @@ class Ipv4Instance : public InstanceBase { */ static std::string sockaddrToString(const sockaddr_in& addr); + // Validate that IPv4 is supported on this platform, raise an exception for the + // given address if not. + static absl::Status validateProtocolSupported(); + private: + /** + * Construct from an existing unix IPv4 socket address (IP v4 address and port). + * Store the status code in passed in parameter instead of throwing. + * It is called by the factory method and the partially constructed instance will be discarded + * upon error. + */ + explicit Ipv4Instance(absl::Status& error, const sockaddr_in* address, + const SocketInterface* sock_interface = nullptr); + struct Ipv4Helper : public Ipv4 { uint32_t address() const override { return address_.sin_addr.s_addr; } @@ -124,7 +169,10 @@ class Ipv4Instance : public InstanceBase { std::string friendly_address_; }; + void initHelper(const sockaddr_in* address); + IpHelper ip_; + friend class InstanceFactory; }; /** @@ -166,7 +214,19 @@ class Ipv6Instance : public InstanceBase { } socklen_t sockAddrLen() const override { return sizeof(sockaddr_in6); } + // Validate that IPv6 is supported on this platform + static absl::Status validateProtocolSupported(); + private: + /** + * Construct from an existing unix IPv6 socket address (IP v6 address and port). + * Store the status code in passed in parameter instead of throwing. + * It is called by the factory method and the partially constructed instance will be discarded + * upon error. + */ + Ipv6Instance(absl::Status& error, const sockaddr_in6& address, bool v6only = true, + const SocketInterface* sock_interface = nullptr); + struct Ipv6Helper : public Ipv6 { Ipv6Helper() { memset(&address_, 0, sizeof(address_)); } absl::uint128 address() const override; @@ -199,7 +259,10 @@ class Ipv6Instance : public InstanceBase { std::string friendly_address_; }; + void initHelper(const sockaddr_in6& address, bool v6only); + IpHelper ip_; + friend class InstanceFactory; }; /** @@ -219,6 +282,8 @@ class PipeInstance : public InstanceBase { explicit PipeInstance(const std::string& pipe_path, mode_t mode = 0, const SocketInterface* sock_interface = nullptr); + static absl::Status validateProtocolSupported() { return absl::OkStatus(); } + // Network::Address::Instance bool operator==(const Instance& rhs) const override; const Ip* ip() const override { return nullptr; } @@ -236,6 +301,15 @@ class PipeInstance : public InstanceBase { } private: + /** + * Construct from an existing unix address. + * Store the error status code in passed in parameter instead of throwing. + * It is called by the factory method and the partially constructed instance will be discarded + * upon error. + */ + PipeInstance(absl::Status& error, const sockaddr_un* address, socklen_t ss_len, mode_t mode = 0, + const SocketInterface* sock_interface = nullptr); + struct PipeHelper : public Pipe { bool abstractNamespace() const override { return abstract_namespace_; } @@ -248,7 +322,10 @@ class PipeInstance : public InstanceBase { mode_t mode_{0}; }; + absl::Status initHelper(const sockaddr_un* address, mode_t mode); + PipeHelper pipe_; + friend class InstanceFactory; }; class EnvoyInternalInstance : public InstanceBase { diff --git a/source/common/network/apple_dns_impl.cc b/source/common/network/apple_dns_impl.cc index f542c2424f7f8..1325914a3dfb0 100644 --- a/source/common/network/apple_dns_impl.cc +++ b/source/common/network/apple_dns_impl.cc @@ -182,8 +182,8 @@ ActiveDnsQuery* AppleDnsResolverImpl::resolve(const std::string& dns_name, } // Proceed with resolution after establishing that the resolver has a live main_sd_ref_. - std::unique_ptr pending_resolution( - new PendingResolution(*this, callback, dispatcher_, main_sd_ref_, dns_name)); + auto pending_resolution = + std::make_unique(*this, callback, dispatcher_, main_sd_ref_, dns_name); DNSServiceErrorType error = pending_resolution->dnsServiceGetAddrInfo(dns_lookup_family); if (error != kDNSServiceErr_NoError) { diff --git a/source/common/network/connection_impl.cc b/source/common/network/connection_impl.cc index 9f37d4b5b70da..ec8748241e0c1 100644 --- a/source/common/network/connection_impl.cc +++ b/source/common/network/connection_impl.cc @@ -69,11 +69,11 @@ ConnectionImpl::ConnectionImpl(Event::Dispatcher& dispatcher, ConnectionSocketPt : ConnectionImplBase(dispatcher, next_global_id_++), transport_socket_(std::move(transport_socket)), socket_(std::move(socket)), stream_info_(stream_info), filter_manager_(*this, *socket_), - write_buffer_(dispatcher.getWatermarkFactory().create( + write_buffer_(dispatcher.getWatermarkFactory().createBuffer( [this]() -> void { this->onWriteBufferLowWatermark(); }, [this]() -> void { this->onWriteBufferHighWatermark(); }, []() -> void { /* TODO(adisuissa): Handle overflow watermark */ })), - read_buffer_(dispatcher.getWatermarkFactory().create( + read_buffer_(dispatcher.getWatermarkFactory().createBuffer( [this]() -> void { this->onReadBufferLowWatermark(); }, [this]() -> void { this->onReadBufferHighWatermark(); }, []() -> void { /* TODO(adisuissa): Handle overflow watermark */ })), diff --git a/source/common/network/dns_impl.cc b/source/common/network/dns_impl.cc index 5e42755061c72..341f5400c3ccd 100644 --- a/source/common/network/dns_impl.cc +++ b/source/common/network/dns_impl.cc @@ -174,10 +174,12 @@ void DnsResolverImpl::PendingResolution::onAresGetAddrInfoCallback(int status, i if (completed_) { if (!cancelled_) { - // TODO(chaoqin-li1123): remove this exception catching by refactoring. - // We can't add a main thread assertion here because both this code is reused by dns filter - // and executed in both main thread and worker thread. Maybe split the code for filter and - // main thread. + // Use a raw try here because it is used in both main thread and filter. + // Can not convert to use status code as there may be unexpected exceptions in server fuzz + // tests, which must be handled. Potential exception may come from getAddressWithPort() or + // portFromTcpUrl(). + // TODO(chaoqin-li1123): remove try catch pattern here once we figure how to handle unexpected + // exception in fuzz tests. TRY_NEEDS_AUDIT { callback_(resolution_status, std::move(address_list)); } catch (const EnvoyException& e) { ENVOY_LOG(critical, "EnvoyException in c-ares callback: {}", e.what()); @@ -262,8 +264,8 @@ ActiveDnsQuery* DnsResolverImpl::resolve(const std::string& dns_name, initializeChannel(&options.options_, options.optmask_); } - std::unique_ptr pending_resolution( - new PendingResolution(*this, callback, dispatcher_, channel_, dns_name)); + auto pending_resolution = + std::make_unique(*this, callback, dispatcher_, channel_, dns_name); if (dns_lookup_family == DnsLookupFamily::Auto) { pending_resolution->fallback_if_failed_ = true; } diff --git a/source/common/network/filter_manager_impl.cc b/source/common/network/filter_manager_impl.cc index f5984f2db43ce..40ca7b5a8c530 100644 --- a/source/common/network/filter_manager_impl.cc +++ b/source/common/network/filter_manager_impl.cc @@ -11,7 +11,7 @@ namespace Network { void FilterManagerImpl::addWriteFilter(WriteFilterSharedPtr filter) { ASSERT(connection_.state() == Connection::State::Open); - ActiveWriteFilterPtr new_filter(new ActiveWriteFilter{*this, filter}); + ActiveWriteFilterPtr new_filter = std::make_unique(*this, filter); filter->initializeWriteFilterCallbacks(*new_filter); LinkedList::moveIntoList(std::move(new_filter), downstream_filters_); } @@ -23,7 +23,7 @@ void FilterManagerImpl::addFilter(FilterSharedPtr filter) { void FilterManagerImpl::addReadFilter(ReadFilterSharedPtr filter) { ASSERT(connection_.state() == Connection::State::Open); - ActiveReadFilterPtr new_filter(new ActiveReadFilter{*this, filter}); + ActiveReadFilterPtr new_filter = std::make_unique(*this, filter); filter->initializeReadFilterCallbacks(*new_filter); LinkedList::moveIntoListBack(std::move(new_filter), upstream_filters_); } diff --git a/source/common/network/io_socket_handle_impl.cc b/source/common/network/io_socket_handle_impl.cc index 7e2f3a5b789db..8f2d2e0b56872 100644 --- a/source/common/network/io_socket_handle_impl.cc +++ b/source/common/network/io_socket_handle_impl.cc @@ -275,26 +275,6 @@ Api::IoCallUint64Result IoSocketHandleImpl::sendmsg(const Buffer::RawSlice* slic } } -Address::InstanceConstSharedPtr getAddressFromSockAddrOrDie(const sockaddr_storage& ss, - socklen_t ss_len, os_fd_t fd) { - // TODO(chaoqin-li1123): remove exception catching and make Address::addressFromSockAddr return - // null on error. - TRY_NEEDS_AUDIT { - // Set v6only to false so that mapped-v6 address can be normalize to v4 - // address. Though dual stack may be disabled, it's still okay to assume the - // address is from a dual stack socket. This is because mapped-v6 address - // must come from a dual stack socket. An actual v6 address can come from - // both dual stack socket and v6 only socket. If |peer_addr| is an actual v6 - // address and the socket is actually v6 only, the returned address will be - // regarded as a v6 address from dual stack socket. However, this address is not going to be - // used to create socket. Wrong knowledge of dual stack support won't hurt. - return Address::addressFromSockAddr(ss, ss_len, /*v6only=*/false); - } - catch (const EnvoyException& e) { - PANIC(fmt::format("Invalid address for fd: {}, error: {}", fd, e.what())); - } -} - Address::InstanceConstSharedPtr maybeGetDstAddressFromHeader(const cmsghdr& cmsg, uint32_t self_port, os_fd_t fd) { if (cmsg.cmsg_type == IPV6_PKTINFO) { @@ -305,7 +285,7 @@ Address::InstanceConstSharedPtr maybeGetDstAddressFromHeader(const cmsghdr& cmsg ipv6_addr->sin6_family = AF_INET6; ipv6_addr->sin6_addr = info->ipi6_addr; ipv6_addr->sin6_port = htons(self_port); - return getAddressFromSockAddrOrDie(ss, sizeof(sockaddr_in6), fd); + return Address::addressFromSockAddrOrDie(ss, sizeof(sockaddr_in6), fd); } if (cmsg.cmsg_type == messageTypeContainsIP()) { @@ -315,7 +295,7 @@ Address::InstanceConstSharedPtr maybeGetDstAddressFromHeader(const cmsghdr& cmsg ipv4_addr->sin_family = AF_INET; ipv4_addr->sin_addr = addressFromMessage(cmsg); ipv4_addr->sin_port = htons(self_port); - return getAddressFromSockAddrOrDie(ss, sizeof(sockaddr_in), fd); + return Address::addressFromSockAddrOrDie(ss, sizeof(sockaddr_in), fd); } return nullptr; @@ -385,7 +365,7 @@ Api::IoCallUint64Result IoSocketHandleImpl::recvmsg(Buffer::RawSlice* slices, fmt::format("Incorrectly set control message length: {}", hdr.msg_controllen)); RELEASE_ASSERT(hdr.msg_namelen > 0, fmt::format("Unable to get remote address from recvmsg() for fd: {}", fd_)); - output.msg_[0].peer_address_ = getAddressFromSockAddrOrDie(peer_addr, hdr.msg_namelen, fd_); + output.msg_[0].peer_address_ = Address::addressFromSockAddrOrDie(peer_addr, hdr.msg_namelen, fd_); output.msg_[0].gso_size_ = 0; if (hdr.msg_controllen > 0) { @@ -492,7 +472,7 @@ Api::IoCallUint64Result IoSocketHandleImpl::recvmmsg(RawSliceArrays& slices, uin output.msg_[i].msg_len_ = mmsg_hdr[i].msg_len; // Get local and peer addresses for each packet. output.msg_[i].peer_address_ = - getAddressFromSockAddrOrDie(raw_addresses[i], hdr.msg_namelen, fd_); + Address::addressFromSockAddrOrDie(raw_addresses[i], hdr.msg_namelen, fd_); if (hdr.msg_controllen > 0) { struct cmsghdr* cmsg; for (cmsg = CMSG_FIRSTHDR(&hdr); cmsg != nullptr; cmsg = CMSG_NXTHDR(&hdr, cmsg)) { @@ -605,7 +585,7 @@ Address::InstanceConstSharedPtr IoSocketHandleImpl::localAddress() { throw EnvoyException(fmt::format("getsockname failed for '{}': ({}) {}", fd_, result.errno_, errorDetails(result.errno_))); } - return Address::addressFromSockAddr(ss, ss_len, socket_v6only_); + return Address::addressFromSockAddrOrThrow(ss, ss_len, socket_v6only_); } Address::InstanceConstSharedPtr IoSocketHandleImpl::peerAddress() { @@ -616,7 +596,7 @@ Address::InstanceConstSharedPtr IoSocketHandleImpl::peerAddress() { os_sys_calls.getpeername(fd_, reinterpret_cast(&ss), &ss_len); if (result.rc_ != 0) { throw EnvoyException( - fmt::format("getpeername failed for '{}': {}", fd_, errorDetails(result.errno_))); + fmt::format("getpeername failed for '{}': {}", errorDetails(result.errno_))); } if (ss_len == udsAddressLength() && ss.ss_family == AF_UNIX) { @@ -630,7 +610,7 @@ Address::InstanceConstSharedPtr IoSocketHandleImpl::peerAddress() { fmt::format("getsockname failed for '{}': {}", fd_, errorDetails(result.errno_))); } } - return Address::addressFromSockAddr(ss, ss_len); + return Address::addressFromSockAddrOrThrow(ss, ss_len, socket_v6only_); } void IoSocketHandleImpl::initializeFileEvent(Event::Dispatcher& dispatcher, Event::FileReadyCb cb, diff --git a/source/common/network/listen_socket_impl.h b/source/common/network/listen_socket_impl.h index d42e8cadd683a..65e09b2b461ae 100644 --- a/source/common/network/listen_socket_impl.h +++ b/source/common/network/listen_socket_impl.h @@ -152,9 +152,11 @@ class ConnectionSocketImpl : public SocketImpl, public ConnectionSocket { void setRequestedServerName(absl::string_view server_name) override { // Always keep the server_name_ as lower case. - server_name_ = absl::AsciiStrToLower(server_name); + addressProvider().setRequestedServerName(absl::AsciiStrToLower(server_name)); + } + absl::string_view requestedServerName() const override { + return addressProvider().requestedServerName(); } - absl::string_view requestedServerName() const override { return server_name_; } absl::optional lastRoundTripTime() override { return ioHandle().lastRoundTripTime(); @@ -162,15 +164,13 @@ class ConnectionSocketImpl : public SocketImpl, public ConnectionSocket { void dumpState(std::ostream& os, int indent_level) const override { const char* spaces = spacesForLevel(indent_level); - os << spaces << "ListenSocketImpl " << this << DUMP_MEMBER(transport_protocol_) - << DUMP_MEMBER(server_name_) << "\n"; + os << spaces << "ListenSocketImpl " << this << DUMP_MEMBER(transport_protocol_) << "\n"; DUMP_DETAILS(address_provider_); } protected: std::string transport_protocol_; std::vector application_protocols_; - std::string server_name_; }; // ConnectionSocket used with server connections. diff --git a/source/common/network/raw_buffer_socket.cc b/source/common/network/raw_buffer_socket.cc index ad72a2fb434de..788cdc7b1d38e 100644 --- a/source/common/network/raw_buffer_socket.cc +++ b/source/common/network/raw_buffer_socket.cc @@ -87,7 +87,7 @@ absl::string_view RawBufferSocket::failureReason() const { return EMPTY_STRING; void RawBufferSocket::onConnected() { callbacks_->raiseEvent(ConnectionEvent::Connected); } TransportSocketPtr -RawBufferSocketFactory::createTransportSocket(TransportSocketOptionsSharedPtr) const { +RawBufferSocketFactory::createTransportSocket(TransportSocketOptionsConstSharedPtr) const { return std::make_unique(); } diff --git a/source/common/network/raw_buffer_socket.h b/source/common/network/raw_buffer_socket.h index 06462d86f886d..bad90ef9bbfa1 100644 --- a/source/common/network/raw_buffer_socket.h +++ b/source/common/network/raw_buffer_socket.h @@ -31,7 +31,8 @@ class RawBufferSocket : public TransportSocket, protected Logger::LoggableasStringView()) << DUMP_NULLABLE_MEMBER(direct_remote_address_, direct_remote_address_->asStringView()) - << DUMP_NULLABLE_MEMBER(local_address_, local_address_->asStringView()) << "\n"; + << DUMP_NULLABLE_MEMBER(local_address_, local_address_->asStringView()) + << DUMP_MEMBER(server_name_) << "\n"; } // SocketAddressSetter @@ -44,12 +45,17 @@ class SocketAddressSetterImpl : public SocketAddressSetter { const Address::InstanceConstSharedPtr& directRemoteAddress() const override { return direct_remote_address_; } + absl::string_view requestedServerName() const override { return server_name_; } + void setRequestedServerName(const absl::string_view requested_server_name) override { + server_name_ = std::string(requested_server_name); + } private: Address::InstanceConstSharedPtr local_address_; bool local_address_restored_{false}; Address::InstanceConstSharedPtr remote_address_; Address::InstanceConstSharedPtr direct_remote_address_; + std::string server_name_; }; class SocketImpl : public virtual Socket { diff --git a/source/common/network/tcp_listener_impl.cc b/source/common/network/tcp_listener_impl.cc index ad8eb0e5c310f..8760e5f65eb84 100644 --- a/source/common/network/tcp_listener_impl.cc +++ b/source/common/network/tcp_listener_impl.cc @@ -83,12 +83,13 @@ void TcpListenerImpl::onSocketEvent(short flags) { // Pass the 'v6only' parameter as true if the local_address is an IPv6 address. This has no // effect if the socket is a v4 socket, but for v6 sockets this will create an IPv4 remote // address if an IPv4 local_address was created from an IPv6 mapped IPv4 address. - const Address::InstanceConstSharedPtr& remote_address = + + const Address::InstanceConstSharedPtr remote_address = (remote_addr.ss_family == AF_UNIX) ? io_handle->peerAddress() - : Address::addressFromSockAddr(remote_addr, remote_addr_len, - local_address->ip()->version() == - Address::IpVersion::v6); + : Address::addressFromSockAddrOrThrow(remote_addr, remote_addr_len, + local_address->ip()->version() == + Address::IpVersion::v6); cb_.onAccept( std::make_unique(std::move(io_handle), local_address, remote_address)); diff --git a/source/common/network/transport_socket_options_impl.cc b/source/common/network/transport_socket_options_impl.cc index 2950a918cd22a..9990584c39fd9 100644 --- a/source/common/network/transport_socket_options_impl.cc +++ b/source/common/network/transport_socket_options_impl.cc @@ -58,7 +58,7 @@ void TransportSocketOptionsImpl::hashKey(std::vector& key, commonHashKey(*this, key, factory); } -TransportSocketOptionsSharedPtr +TransportSocketOptionsConstSharedPtr TransportSocketOptionsUtility::fromFilterState(const StreamInfo::FilterState& filter_state) { absl::string_view server_name; std::vector application_protocols; diff --git a/source/common/network/transport_socket_options_impl.h b/source/common/network/transport_socket_options_impl.h index 8a961e4fa33d5..6512a29c03c5c 100644 --- a/source/common/network/transport_socket_options_impl.h +++ b/source/common/network/transport_socket_options_impl.h @@ -11,7 +11,7 @@ namespace Network { class AlpnDecoratingTransportSocketOptions : public TransportSocketOptions { public: AlpnDecoratingTransportSocketOptions(std::vector&& alpn, - TransportSocketOptionsSharedPtr inner_options) + TransportSocketOptionsConstSharedPtr inner_options) : alpn_fallback_(std::move(alpn)), inner_options_(std::move(inner_options)) {} // Network::TransportSocketOptions const absl::optional& serverNameOverride() const override { @@ -34,7 +34,7 @@ class AlpnDecoratingTransportSocketOptions : public TransportSocketOptions { private: const std::vector alpn_fallback_; - const TransportSocketOptionsSharedPtr inner_options_; + const TransportSocketOptionsConstSharedPtr inner_options_; }; class TransportSocketOptionsImpl : public TransportSocketOptions { @@ -83,10 +83,10 @@ class TransportSocketOptionsUtility { /** * Construct TransportSocketOptions from StreamInfo::FilterState, using UpstreamServerName * and ApplicationProtocols key in the filter state. - * @returns TransportSocketOptionsSharedPtr a shared pointer to the transport socket options, + * @returns TransportSocketOptionsConstSharedPtr a shared pointer to the transport socket options, * nullptr if nothing is in the filter state. */ - static TransportSocketOptionsSharedPtr + static TransportSocketOptionsConstSharedPtr fromFilterState(const StreamInfo::FilterState& stream_info); }; diff --git a/source/common/network/utility.cc b/source/common/network/utility.cc index 8bed3c875fae3..08b830fd799ca 100644 --- a/source/common/network/utility.cc +++ b/source/common/network/utility.cc @@ -11,6 +11,7 @@ #include "envoy/common/exception.h" #include "envoy/common/platform.h" #include "envoy/config/core/v3/address.pb.h" +#include "envoy/network/address.h" #include "envoy/network/connection.h" #include "source/common/api/os_sys_calls_impl.h" @@ -32,14 +33,20 @@ namespace Envoy { namespace Network { +Address::InstanceConstSharedPtr instanceOrNull(StatusOr address) { + if (address.ok()) { + return *address; + } + return nullptr; +} + Address::InstanceConstSharedPtr Utility::resolveUrl(const std::string& url) { if (urlIsTcpScheme(url)) { return parseInternetAddressAndPort(url.substr(TCP_SCHEME.size())); } else if (urlIsUdpScheme(url)) { return parseInternetAddressAndPort(url.substr(UDP_SCHEME.size())); } else if (urlIsUnixScheme(url)) { - return Address::InstanceConstSharedPtr{ - new Address::PipeInstance(url.substr(UNIX_SCHEME.size()))}; + return std::make_shared(url.substr(UNIX_SCHEME.size())); } else { throw EnvoyException(absl::StrCat("unknown protocol scheme: ", url)); } @@ -133,14 +140,15 @@ Address::InstanceConstSharedPtr Utility::parseInternetAddressNoThrow(const std:: if (inet_pton(AF_INET, ip_address.c_str(), &sa4.sin_addr) == 1) { sa4.sin_family = AF_INET; sa4.sin_port = htons(port); - return std::make_shared(&sa4); + return instanceOrNull(Address::InstanceFactory::createInstancePtr(&sa4)); } sockaddr_in6 sa6; memset(&sa6, 0, sizeof(sa6)); if (inet_pton(AF_INET6, ip_address.c_str(), &sa6.sin6_addr) == 1) { sa6.sin6_family = AF_INET6; sa6.sin6_port = htons(port); - return std::make_shared(sa6, v6only); + return instanceOrNull( + Address::InstanceFactory::createInstancePtr(sa6, v6only)); } return nullptr; } @@ -179,7 +187,8 @@ Utility::parseInternetAddressAndPortNoThrow(const std::string& ip_address, bool } sa6.sin6_family = AF_INET6; sa6.sin6_port = htons(port64); - return std::make_shared(sa6, v6only); + return instanceOrNull( + Address::InstanceFactory::createInstancePtr(sa6, v6only)); } // Treat it as an IPv4 address followed by a port. const auto pos = ip_address.rfind(':'); @@ -199,7 +208,7 @@ Utility::parseInternetAddressAndPortNoThrow(const std::string& ip_address, bool } sa4.sin_family = AF_INET; sa4.sin_port = htons(port64); - return std::make_shared(&sa4); + return instanceOrNull(Address::InstanceFactory::createInstancePtr(&sa4)); } Address::InstanceConstSharedPtr Utility::parseInternetAddressAndPort(const std::string& ip_address, @@ -247,7 +256,7 @@ Address::InstanceConstSharedPtr Utility::getLocalAddress(const Address::IpVersio (ifa->ifa_addr->sa_family == AF_INET6 && version == Address::IpVersion::v6)) { const struct sockaddr_storage* addr = reinterpret_cast(ifa->ifa_addr); - ret = Address::addressFromSockAddr( + ret = Address::addressFromSockAddrOrThrow( *addr, (version == Address::IpVersion::v4) ? sizeof(sockaddr_in) : sizeof(sockaddr_in6)); if (!isLoopbackAddress(*ret)) { break; @@ -405,7 +414,8 @@ Address::InstanceConstSharedPtr Utility::getOriginalDst(Socket& sock) { return nullptr; } - return Address::addressFromSockAddr(orig_addr, 0, true /* default for v6 constructor */); + return Address::addressFromSockAddrOrDie(orig_addr, 0, -1, true /* default for v6 constructor */); + #else // TODO(zuercher): determine if connection redirection is possible under macOS (c.f. pfctl and // divert), and whether it's possible to find the learn destination address. diff --git a/source/common/quic/envoy_quic_client_session.cc b/source/common/quic/envoy_quic_client_session.cc index 4de2584e78e10..6525b085bca1d 100644 --- a/source/common/quic/envoy_quic_client_session.cc +++ b/source/common/quic/envoy_quic_client_session.cc @@ -35,7 +35,7 @@ void EnvoyQuicClientSession::connect() { void EnvoyQuicClientSession::OnConnectionClosed(const quic::QuicConnectionCloseFrame& frame, quic::ConnectionCloseSource source) { quic::QuicSpdyClientSession::OnConnectionClosed(frame, source); - onConnectionCloseEvent(frame, source); + onConnectionCloseEvent(frame, source, version()); } void EnvoyQuicClientSession::Initialize() { diff --git a/source/common/quic/envoy_quic_crypto_stream_factory.h b/source/common/quic/envoy_quic_crypto_stream_factory.h index 30ba1710751e5..fd8151984ee2f 100644 --- a/source/common/quic/envoy_quic_crypto_stream_factory.h +++ b/source/common/quic/envoy_quic_crypto_stream_factory.h @@ -1,6 +1,8 @@ #pragma once +#include "envoy/common/optref.h" #include "envoy/config/typed_config.h" +#include "envoy/network/transport_socket.h" #if defined(__GNUC__) #pragma GCC diagnostic push @@ -26,11 +28,12 @@ class EnvoyQuicCryptoServerStreamFactoryInterface : public Config::TypedFactory std::string category() const override { return "envoy.quic.server.crypto_stream"; } // Return an Envoy specific quic crypto server stream object. - virtual std::unique_ptr - createEnvoyQuicCryptoServerStream(const quic::QuicCryptoServerConfig* crypto_config, - quic::QuicCompressedCertsCache* compressed_certs_cache, - quic::QuicSession* session, - quic::QuicCryptoServerStreamBase::Helper* helper) PURE; + virtual std::unique_ptr createEnvoyQuicCryptoServerStream( + const quic::QuicCryptoServerConfig* crypto_config, + quic::QuicCompressedCertsCache* compressed_certs_cache, quic::QuicSession* session, + quic::QuicCryptoServerStreamBase::Helper* helper, + OptRef transport_socket_factory, + Event::Dispatcher& dispatcher) PURE; }; class EnvoyQuicCryptoClientStreamFactoryInterface { diff --git a/source/common/quic/envoy_quic_dispatcher.cc b/source/common/quic/envoy_quic_dispatcher.cc index b85e2e0d5844e..217e561e2b65d 100644 --- a/source/common/quic/envoy_quic_dispatcher.cc +++ b/source/common/quic/envoy_quic_dispatcher.cc @@ -2,6 +2,8 @@ #include +#include "envoy/common/optref.h" + #include "source/common/common/safe_memcpy.h" #include "source/common/http/utility.h" #include "source/common/quic/envoy_quic_server_connection.h" @@ -71,7 +73,9 @@ std::unique_ptr EnvoyQuicDispatcher::CreateQuicSession( quic_config, quic::ParsedQuicVersionVector{version}, std::move(quic_connection), this, session_helper(), crypto_config(), compressed_certs_cache(), dispatcher_, listener_config_.perConnectionBufferLimitBytes(), quic_stat_names_, - listener_config_.listenerScope(), crypto_server_stream_factory_); + listener_config_.listenerScope(), crypto_server_stream_factory_, + makeOptRefFromPtr(filter_chain == nullptr ? nullptr + : &filter_chain->transportSocketFactory())); if (filter_chain != nullptr) { const bool has_filter_initialized = listener_config_.filterChainFactory().createNetworkFilterChain( diff --git a/source/common/quic/envoy_quic_server_session.cc b/source/common/quic/envoy_quic_server_session.cc index 7fb668a639c79..ca373f22547a1 100644 --- a/source/common/quic/envoy_quic_server_session.cc +++ b/source/common/quic/envoy_quic_server_session.cc @@ -15,14 +15,15 @@ EnvoyQuicServerSession::EnvoyQuicServerSession( quic::QuicCryptoServerStream::Helper* helper, const quic::QuicCryptoServerConfig* crypto_config, quic::QuicCompressedCertsCache* compressed_certs_cache, Event::Dispatcher& dispatcher, uint32_t send_buffer_limit, QuicStatNames& quic_stat_names, Stats::Scope& listener_scope, - EnvoyQuicCryptoServerStreamFactoryInterface& crypto_server_stream_factory) + EnvoyQuicCryptoServerStreamFactoryInterface& crypto_server_stream_factory, + OptRef transport_socket_factory) : quic::QuicServerSessionBase(config, supported_versions, connection.get(), visitor, helper, crypto_config, compressed_certs_cache), QuicFilterManagerConnectionImpl(*connection, connection->connection_id(), dispatcher, send_buffer_limit), quic_connection_(std::move(connection)), quic_stat_names_(quic_stat_names), - listener_scope_(listener_scope), crypto_server_stream_factory_(crypto_server_stream_factory) { -} + listener_scope_(listener_scope), crypto_server_stream_factory_(crypto_server_stream_factory), + transport_socket_factory_(transport_socket_factory) {} EnvoyQuicServerSession::~EnvoyQuicServerSession() { ASSERT(!quic_connection_->connected()); @@ -38,7 +39,8 @@ EnvoyQuicServerSession::CreateQuicCryptoServerStream( const quic::QuicCryptoServerConfig* crypto_config, quic::QuicCompressedCertsCache* compressed_certs_cache) { return crypto_server_stream_factory_.createEnvoyQuicCryptoServerStream( - crypto_config, compressed_certs_cache, this, stream_helper()); + crypto_config, compressed_certs_cache, this, stream_helper(), transport_socket_factory_, + dispatcher()); } quic::QuicSpdyStream* EnvoyQuicServerSession::CreateIncomingStream(quic::QuicStreamId id) { @@ -86,7 +88,7 @@ void EnvoyQuicServerSession::setUpRequestDecoder(EnvoyQuicServerStream& stream) void EnvoyQuicServerSession::OnConnectionClosed(const quic::QuicConnectionCloseFrame& frame, quic::ConnectionCloseSource source) { quic::QuicServerSessionBase::OnConnectionClosed(frame, source); - onConnectionCloseEvent(frame, source); + onConnectionCloseEvent(frame, source, version()); } void EnvoyQuicServerSession::Initialize() { diff --git a/source/common/quic/envoy_quic_server_session.h b/source/common/quic/envoy_quic_server_session.h index 7bdb705ac764e..9e6bedb7aac21 100644 --- a/source/common/quic/envoy_quic_server_session.h +++ b/source/common/quic/envoy_quic_server_session.h @@ -45,7 +45,8 @@ class EnvoyQuicServerSession : public quic::QuicServerSessionBase, quic::QuicCompressedCertsCache* compressed_certs_cache, Event::Dispatcher& dispatcher, uint32_t send_buffer_limit, QuicStatNames& quic_stat_names, Stats::Scope& listener_scope, - EnvoyQuicCryptoServerStreamFactoryInterface& crypto_server_stream_factory); + EnvoyQuicCryptoServerStreamFactoryInterface& crypto_server_stream_factory, + OptRef transport_socket_factory); ~EnvoyQuicServerSession() override; @@ -118,6 +119,7 @@ class EnvoyQuicServerSession : public quic::QuicServerSessionBase, Stats::Scope& listener_scope_; EnvoyQuicCryptoServerStreamFactoryInterface& crypto_server_stream_factory_; + OptRef transport_socket_factory_; }; } // namespace Quic diff --git a/source/common/quic/envoy_quic_utils.cc b/source/common/quic/envoy_quic_utils.cc index 0a86ecfced215..769f8cf086497 100644 --- a/source/common/quic/envoy_quic_utils.cc +++ b/source/common/quic/envoy_quic_utils.cc @@ -17,12 +17,12 @@ namespace Quic { Network::Address::InstanceConstSharedPtr quicAddressToEnvoyAddressInstance(const quic::QuicSocketAddress& quic_address) { return quic_address.IsInitialized() - ? Network::Address::addressFromSockAddr(quic_address.generic_address(), - quic_address.host().address_family() == - quic::IpAddressFamily::IP_V4 - ? sizeof(sockaddr_in) - : sizeof(sockaddr_in6), - false) + ? Network::Address::addressFromSockAddrOrDie(quic_address.generic_address(), + quic_address.host().address_family() == + quic::IpAddressFamily::IP_V4 + ? sizeof(sockaddr_in) + : sizeof(sockaddr_in6), + -1, false) : nullptr; } diff --git a/source/common/quic/quic_filter_manager_connection_impl.cc b/source/common/quic/quic_filter_manager_connection_impl.cc index fe3c229ab895a..1532078e2e50d 100644 --- a/source/common/quic/quic_filter_manager_connection_impl.cc +++ b/source/common/quic/quic_filter_manager_connection_impl.cc @@ -158,7 +158,8 @@ void QuicFilterManagerConnectionImpl::maybeApplyDelayClosePolicy() { } void QuicFilterManagerConnectionImpl::onConnectionCloseEvent( - const quic::QuicConnectionCloseFrame& frame, quic::ConnectionCloseSource source) { + const quic::QuicConnectionCloseFrame& frame, quic::ConnectionCloseSource source, + const quic::ParsedQuicVersion& version) { transport_failure_reason_ = absl::StrCat(quic::QuicErrorCodeToString(frame.quic_error_code), " with details: ", frame.error_details); if (network_connection_ != nullptr) { @@ -169,6 +170,33 @@ void QuicFilterManagerConnectionImpl::onConnectionCloseEvent( ASSERT(network_connection_ != nullptr); network_connection_ = nullptr; } + + if (!codec_stats_.has_value()) { + // The connection was closed before it could be used. Stats are not recorded. + return; + } + switch (version.transport_version) { + case quic::QUIC_VERSION_43: + codec_stats_->quic_version_43_.inc(); + return; + case quic::QUIC_VERSION_46: + codec_stats_->quic_version_46_.inc(); + return; + case quic::QUIC_VERSION_50: + codec_stats_->quic_version_50_.inc(); + return; + case quic::QUIC_VERSION_51: + codec_stats_->quic_version_51_.inc(); + return; + case quic::QUIC_VERSION_IETF_DRAFT_29: + codec_stats_->quic_version_h3_29_.inc(); + return; + case quic::QUIC_VERSION_IETF_RFC_V1: + codec_stats_->quic_version_rfc_v1_.inc(); + return; + default: + return; + } } void QuicFilterManagerConnectionImpl::closeConnectionImmediately() { diff --git a/source/common/quic/quic_filter_manager_connection_impl.h b/source/common/quic/quic_filter_manager_connection_impl.h index 59d1bf8375b5a..e1a96ea42b90a 100644 --- a/source/common/quic/quic_filter_manager_connection_impl.h +++ b/source/common/quic/quic_filter_manager_connection_impl.h @@ -137,13 +137,10 @@ class QuicFilterManagerConnectionImpl : public Network::ConnectionImplBase, uint32_t bytesToSend() { return bytes_to_send_; } void setHttp3Options(const envoy::config::core::v3::Http3ProtocolOptions& http3_options) { - http3_options_ = - std::reference_wrapper(http3_options); + http3_options_ = http3_options; } - void setCodecStats(Http::Http3::CodecStats& stats) { - codec_stats_ = std::reference_wrapper(stats); - } + void setCodecStats(Http::Http3::CodecStats& stats) { codec_stats_ = stats; } uint32_t maxIncomingHeadersCount() { return max_headers_count_; } @@ -154,7 +151,8 @@ class QuicFilterManagerConnectionImpl : public Network::ConnectionImplBase, protected: // Propagate connection close to network_connection_callbacks_. void onConnectionCloseEvent(const quic::QuicConnectionCloseFrame& frame, - quic::ConnectionCloseSource source); + quic::ConnectionCloseSource source, + const quic::ParsedQuicVersion& version); void closeConnectionImmediately() override; @@ -166,9 +164,8 @@ class QuicFilterManagerConnectionImpl : public Network::ConnectionImplBase, QuicNetworkConnection* network_connection_{nullptr}; - absl::optional> codec_stats_; - absl::optional> - http3_options_; + OptRef codec_stats_; + OptRef http3_options_; bool initialized_{false}; private: diff --git a/source/common/quic/quic_stat_names.cc b/source/common/quic/quic_stat_names.cc index 760d40c4c265e..d613c48391539 100644 --- a/source/common/quic/quic_stat_names.cc +++ b/source/common/quic/quic_stat_names.cc @@ -3,6 +3,8 @@ namespace Envoy { namespace Quic { +#ifdef ENVOY_ENABLE_QUIC + // TODO(renjietang): Currently these stats are only available in downstream. Wire it up to upstream // QUIC also. QuicStatNames::QuicStatNames(Stats::SymbolTable& symbol_table) @@ -76,5 +78,10 @@ Stats::StatName QuicStatNames::resetStreamErrorStatName(quic::QuicRstStreamError })); } +#else +QuicStatNames::QuicStatNames(Stats::SymbolTable& /*symbol_table*/) {} + +#endif + } // namespace Quic } // namespace Envoy diff --git a/source/common/quic/quic_stat_names.h b/source/common/quic/quic_stat_names.h index 2e0acbe2de825..f9eba684a76b8 100644 --- a/source/common/quic/quic_stat_names.h +++ b/source/common/quic/quic_stat_names.h @@ -1,5 +1,7 @@ #pragma once +#ifdef ENVOY_ENABLE_QUIC + #include "envoy/stats/scope.h" #include "source/common/common/thread.h" @@ -46,5 +48,18 @@ class QuicStatNames { reset_stream_error_stat_names_; }; +#else + +#include "source/common/stats/symbol_table_impl.h" +namespace Envoy { +namespace Quic { + +class QuicStatNames { +public: + explicit QuicStatNames(Stats::SymbolTable& symbol_table); +}; + +#endif + } // namespace Quic } // namespace Envoy diff --git a/source/common/quic/quic_transport_socket_factory.h b/source/common/quic/quic_transport_socket_factory.h index 53fede86f8112..7c98e1ef29e69 100644 --- a/source/common/quic/quic_transport_socket_factory.h +++ b/source/common/quic/quic_transport_socket_factory.h @@ -45,7 +45,7 @@ class QuicTransportSocketFactoryBase : public Network::TransportSocketFactory, // Network::TransportSocketFactory Network::TransportSocketPtr - createTransportSocket(Network::TransportSocketOptionsSharedPtr /*options*/) const override { + createTransportSocket(Network::TransportSocketOptionsConstSharedPtr /*options*/) const override { NOT_REACHED_GCOVR_EXCL_LINE; } bool implementsSecureTransport() const override { return true; } @@ -104,7 +104,7 @@ class QuicClientTransportSocketFactory : public QuicTransportSocketFactoryBase { // is needed. In this case the QuicClientTransportSocketFactory falls over to // using the fallback factory. Network::TransportSocketPtr - createTransportSocket(Network::TransportSocketOptionsSharedPtr options) const override { + createTransportSocket(Network::TransportSocketOptionsConstSharedPtr options) const override { return fallback_factory_->createTransportSocket(options); } diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index 325b77ef4c0b9..b1432fa02bbf7 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -1021,7 +1021,8 @@ RouteEntryImplBase::WeightedClusterEntry::WeightedClusterEntry( cluster.response_headers_to_remove())), per_filter_configs_(cluster.typed_per_filter_config(), cluster.hidden_envoy_deprecated_per_filter_config(), - optional_http_filters, factory_context, validator) { + optional_http_filters, factory_context, validator), + host_rewrite_(cluster.host_rewrite_literal()) { if (cluster.has_metadata_match()) { const auto filter_it = cluster.metadata_match().filter_metadata().find( Envoy::Config::MetadataFilters::get().ENVOY_LB); diff --git a/source/common/router/config_impl.h b/source/common/router/config_impl.h index e367683e092ec..a6add7e18e3d3 100644 --- a/source/common/router/config_impl.h +++ b/source/common/router/config_impl.h @@ -771,6 +771,9 @@ class RouteEntryImplBase : public RouteEntry, const StreamInfo::StreamInfo& stream_info, bool insert_envoy_original_path) const override { request_headers_parser_->evaluateHeaders(headers, stream_info); + if (!host_rewrite_.empty()) { + headers.setHost(host_rewrite_); + } DynamicRouteEntry::finalizeRequestHeaders(headers, stream_info, insert_envoy_original_path); } void finalizeResponseHeaders(Http::ResponseHeaderMap& headers, @@ -791,6 +794,7 @@ class RouteEntryImplBase : public RouteEntry, HeaderParserPtr request_headers_parser_; HeaderParserPtr response_headers_parser_; PerFilterConfigs per_filter_configs_; + const std::string host_rewrite_; }; using WeightedClusterEntrySharedPtr = std::shared_ptr; diff --git a/source/common/router/rds_impl.cc b/source/common/router/rds_impl.cc index b07c7b9bb02b6..670b5e1268cc0 100644 --- a/source/common/router/rds_impl.cc +++ b/source/common/router/rds_impl.cc @@ -88,7 +88,8 @@ RdsRouteConfigSubscription::RdsRouteConfigSubscription( fmt::format("RdsRouteConfigSubscription local-init-target {}", route_config_name_), [this]() { subscription_->start({route_config_name_}); }), local_init_manager_(fmt::format("RDS local-init-manager {}", route_config_name_)), - stat_prefix_(stat_prefix), stats_({ALL_RDS_STATS(POOL_COUNTER(*scope_))}), + stat_prefix_(stat_prefix), + stats_({ALL_RDS_STATS(POOL_COUNTER(*scope_), POOL_GAUGE(*scope_))}), route_config_provider_manager_(route_config_provider_manager), manager_identifier_(manager_identifier), optional_http_filters_(optional_http_filters) { const auto resource_name = getResourceName(); @@ -131,6 +132,7 @@ void RdsRouteConfigSubscription::onConfigUpdate( std::unique_ptr resume_rds; if (config_update_info_->onRdsUpdate(route_config, version_info)) { stats_.config_reload_.inc(); + stats_.config_reload_time_ms_.set(DateUtil::nowToMilliseconds(factory_context_.timeSource())); if (config_update_info_->protobufConfiguration().has_vhds() && config_update_info_->vhdsConfigurationChanged()) { ENVOY_LOG( @@ -326,7 +328,9 @@ void RdsRouteConfigProviderImpl::requestVirtualHostsUpdate( RouteConfigProviderManagerImpl::RouteConfigProviderManagerImpl(Server::Admin& admin) { config_tracker_entry_ = - admin.getConfigTracker().add("routes", [this] { return dumpRouteConfigs(); }); + admin.getConfigTracker().add("routes", [this](const Matchers::StringMatcher& matcher) { + return dumpRouteConfigs(matcher); + }); // ConfigTracker keys must be unique. We are asserting that no one has stolen the "routes" key // from us, since the returned entry will be nullptr if the key already exists. RELEASE_ASSERT(config_tracker_entry_, ""); @@ -377,7 +381,8 @@ RouteConfigProviderPtr RouteConfigProviderManagerImpl::createStaticRouteConfigPr } std::unique_ptr -RouteConfigProviderManagerImpl::dumpRouteConfigs() const { +RouteConfigProviderManagerImpl::dumpRouteConfigs( + const Matchers::StringMatcher& name_matcher) const { auto config_dump = std::make_unique(); for (const auto& element : dynamic_route_config_providers_) { @@ -389,6 +394,9 @@ RouteConfigProviderManagerImpl::dumpRouteConfigs() const { ASSERT(subscription->route_config_provider_opt_.has_value()); if (subscription->routeConfigUpdate()->configInfo()) { + if (!name_matcher.match(subscription->routeConfigUpdate()->protobufConfiguration().name())) { + continue; + } auto* dynamic_config = config_dump->mutable_dynamic_route_configs()->Add(); dynamic_config->set_version_info(subscription->routeConfigUpdate()->configVersion()); dynamic_config->mutable_route_config()->PackFrom( @@ -400,6 +408,9 @@ RouteConfigProviderManagerImpl::dumpRouteConfigs() const { for (const auto& provider : static_route_config_providers_) { ASSERT(provider->configInfo()); + if (!name_matcher.match(provider->configInfo().value().config_.name())) { + continue; + } auto* static_config = config_dump->mutable_static_route_configs()->Add(); static_config->mutable_route_config()->PackFrom( API_RECOVER_ORIGINAL(provider->configInfo().value().config_)); diff --git a/source/common/router/rds_impl.h b/source/common/router/rds_impl.h index 6ea491b88d253..4c4df57d4482b 100644 --- a/source/common/router/rds_impl.h +++ b/source/common/router/rds_impl.h @@ -97,15 +97,16 @@ class StaticRouteConfigProviderImpl : public RouteConfigProvider { /** * All RDS stats. @see stats_macros.h */ -#define ALL_RDS_STATS(COUNTER) \ +#define ALL_RDS_STATS(COUNTER, GAUGE) \ COUNTER(config_reload) \ - COUNTER(update_empty) + COUNTER(update_empty) \ + GAUGE(config_reload_time_ms, NeverImport) /** * Struct definition for all RDS stats. @see stats_macros.h */ struct RdsStats { - ALL_RDS_STATS(GENERATE_COUNTER_STRUCT) + ALL_RDS_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT) }; class RdsRouteConfigProviderImpl; @@ -241,7 +242,8 @@ class RouteConfigProviderManagerImpl : public RouteConfigProviderManager, public: RouteConfigProviderManagerImpl(Server::Admin& admin); - std::unique_ptr dumpRouteConfigs() const; + std::unique_ptr + dumpRouteConfigs(const Matchers::StringMatcher& name_matcher) const; // RouteConfigProviderManager RouteConfigProviderSharedPtr createRdsRouteConfigProvider( diff --git a/source/common/router/router.cc b/source/common/router/router.cc index b6f478620560a..259e105a1af90 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -1144,6 +1144,8 @@ Filter::streamResetReasonToResponseFlag(Http::StreamResetReason reset_reason) { return StreamInfo::ResponseFlag::UpstreamRemoteReset; case Http::StreamResetReason::ProtocolError: return StreamInfo::ResponseFlag::UpstreamProtocolError; + case Http::StreamResetReason::OverloadManager: + return StreamInfo::ResponseFlag::OverloadManager; } NOT_REACHED_GCOVR_EXCL_LINE; diff --git a/source/common/router/router.h b/source/common/router/router.h index df71f4007d132..1a4ab2ae47940 100644 --- a/source/common/router/router.h +++ b/source/common/router/router.h @@ -388,7 +388,7 @@ class Filter : Logger::Loggable, : callbacks_->getUpstreamSocketOptions(); } - Network::TransportSocketOptionsSharedPtr upstreamTransportSocketOptions() const override { + Network::TransportSocketOptionsConstSharedPtr upstreamTransportSocketOptions() const override { return transport_socket_options_; } @@ -549,7 +549,7 @@ class Filter : Logger::Loggable, uint32_t attempt_count_{1}; uint32_t pending_retries_{0}; - Network::TransportSocketOptionsSharedPtr transport_socket_options_; + Network::TransportSocketOptionsConstSharedPtr transport_socket_options_; Network::Socket::OptionsSharedPtr upstream_options_; }; diff --git a/source/common/router/scoped_rds.cc b/source/common/router/scoped_rds.cc index 949c260a1975b..13cee6e2449e0 100644 --- a/source/common/router/scoped_rds.cc +++ b/source/common/router/scoped_rds.cc @@ -393,6 +393,7 @@ void ScopedRdsConfigSubscription::onConfigUpdate( } stats_.all_scopes_.set(scoped_route_map_.size()); stats_.config_reload_.inc(); + stats_.config_reload_time_ms_.set(DateUtil::nowToMilliseconds(factory_context_.timeSource())); } void ScopedRdsConfigSubscription::onRdsConfigUpdate(const std::string& scope_name, @@ -520,7 +521,8 @@ ScopedRdsConfigProvider::ScopedRdsConfigProvider( ScopedRdsConfigSubscriptionSharedPtr&& subscription) : MutableConfigProviderCommonBase(std::move(subscription), ConfigProvider::ApiType::Delta) {} -ProtobufTypes::MessagePtr ScopedRoutesConfigProviderManager::dumpConfigs() const { +ProtobufTypes::MessagePtr +ScopedRoutesConfigProviderManager::dumpConfigs(const Matchers::StringMatcher& name_matcher) const { auto config_dump = std::make_unique(); for (const auto& element : configSubscriptions()) { auto subscription = element.second.lock(); @@ -534,6 +536,9 @@ ProtobufTypes::MessagePtr ScopedRoutesConfigProviderManager::dumpConfigs() const dynamic_config->set_name(typed_subscription->name()); const ScopedRouteMap& scoped_route_map = typed_subscription->scopedRouteMap(); for (const auto& it : scoped_route_map) { + if (!name_matcher.match(it.second->configProto().name())) { + continue; + } dynamic_config->mutable_scoped_route_configs()->Add()->PackFrom( API_RECOVER_ORIGINAL(it.second->configProto())); } @@ -549,6 +554,9 @@ ProtobufTypes::MessagePtr ScopedRoutesConfigProviderManager::dumpConfigs() const auto* inline_config = config_dump->mutable_inline_scoped_route_configs()->Add(); inline_config->set_name(static_cast(provider)->name()); for (const auto& config_proto : protos_info.value().config_protos_) { + if (!name_matcher.match(config_proto->name())) { + continue; + } inline_config->mutable_scoped_route_configs()->Add()->PackFrom( API_RECOVER_ORIGINAL(*config_proto)); } diff --git a/source/common/router/scoped_rds.h b/source/common/router/scoped_rds.h index df24117c83ead..b0e5690bee8e8 100644 --- a/source/common/router/scoped_rds.h +++ b/source/common/router/scoped_rds.h @@ -86,6 +86,7 @@ class InlineScopedRoutesConfigProvider : public Envoy::Config::ImmutableConfigPr COUNTER(config_reload) \ COUNTER(update_empty) \ GAUGE(all_scopes, Accumulate) \ + GAUGE(config_reload_time_ms, NeverImport) \ GAUGE(on_demand_scopes, Accumulate) \ GAUGE(active_scopes, Accumulate) @@ -274,7 +275,7 @@ class ScopedRoutesConfigProviderManager : public Envoy::Config::ConfigProviderMa ~ScopedRoutesConfigProviderManager() override = default; // Envoy::Config::ConfigProviderManagerImplBase - ProtobufTypes::MessagePtr dumpConfigs() const override; + ProtobufTypes::MessagePtr dumpConfigs(const Matchers::StringMatcher& name_matcher) const override; // Envoy::Config::ConfigProviderManager Envoy::Config::ConfigProviderPtr diff --git a/source/common/router/upstream_request.cc b/source/common/router/upstream_request.cc index 4a81e24d58354..3dd9cf0394129 100644 --- a/source/common/router/upstream_request.cc +++ b/source/common/router/upstream_request.cc @@ -234,7 +234,7 @@ void UpstreamRequest::encodeData(Buffer::Instance& data, bool end_stream) { if (!upstream_ || paused_for_connect_) { ENVOY_STREAM_LOG(trace, "buffering {} bytes", *parent_.callbacks(), data.length()); if (!buffered_request_body_) { - buffered_request_body_ = parent_.callbacks()->dispatcher().getWatermarkFactory().create( + buffered_request_body_ = parent_.callbacks()->dispatcher().getWatermarkFactory().createBuffer( [this]() -> void { this->enableDataFromDownstreamForFlowControl(); }, [this]() -> void { this->disableDataFromDownstreamForFlowControl(); }, []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }); diff --git a/source/common/secret/secret_manager_impl.cc b/source/common/secret/secret_manager_impl.cc index 0d583128c6c43..5238ff77ae9e8 100644 --- a/source/common/secret/secret_manager_impl.cc +++ b/source/common/secret/secret_manager_impl.cc @@ -19,8 +19,10 @@ namespace Envoy { namespace Secret { SecretManagerImpl::SecretManagerImpl(Server::ConfigTracker& config_tracker) - : config_tracker_entry_(config_tracker.add("secrets", [this] { return dumpSecretConfigs(); })) { -} + : config_tracker_entry_( + config_tracker.add("secrets", [this](const Matchers::StringMatcher& name_matcher) { + return dumpSecretConfigs(name_matcher); + })) {} void SecretManagerImpl::addStaticSecret( const envoy::extensions::transport_sockets::tls::v3::Secret& secret) { switch (secret.type_case()) { @@ -151,7 +153,8 @@ GenericSecretConfigProviderSharedPtr SecretManagerImpl::findOrCreateGenericSecre secret_provider_context); } -ProtobufTypes::MessagePtr SecretManagerImpl::dumpSecretConfigs() { +ProtobufTypes::MessagePtr +SecretManagerImpl::dumpSecretConfigs(const Matchers::StringMatcher& name_matcher) { // TODO(htuch): unlike other config providers, we're recreating the original // Secrets below. This makes it hard to support API_RECOVER_ORIGINAL()-style // recovery of the original config message. As a result, for now we're @@ -161,52 +164,64 @@ ProtobufTypes::MessagePtr SecretManagerImpl::dumpSecretConfigs() { // Handle static tls key/cert providers. for (const auto& cert_iter : static_tls_certificate_providers_) { const auto& tls_cert = cert_iter.second; - auto static_secret = config_dump->mutable_static_secrets()->Add(); - static_secret->set_name(cert_iter.first); ASSERT(tls_cert != nullptr); envoy::extensions::transport_sockets::tls::v3::Secret dump_secret; dump_secret.set_name(cert_iter.first); dump_secret.mutable_tls_certificate()->MergeFrom(*tls_cert->secret()); + if (!name_matcher.match(dump_secret.name())) { + continue; + } MessageUtil::redact(dump_secret); + auto static_secret = config_dump->mutable_static_secrets()->Add(); + static_secret->set_name(cert_iter.first); static_secret->mutable_secret()->PackFrom(dump_secret); } // Handle static certificate validation context providers. for (const auto& context_iter : static_certificate_validation_context_providers_) { const auto& validation_context = context_iter.second; - auto static_secret = config_dump->mutable_static_secrets()->Add(); - static_secret->set_name(context_iter.first); ASSERT(validation_context != nullptr); envoy::extensions::transport_sockets::tls::v3::Secret dump_secret; dump_secret.set_name(context_iter.first); dump_secret.mutable_validation_context()->MergeFrom(*validation_context->secret()); + if (!name_matcher.match(dump_secret.name())) { + continue; + } + auto static_secret = config_dump->mutable_static_secrets()->Add(); + static_secret->set_name(context_iter.first); static_secret->mutable_secret()->PackFrom(dump_secret); } // Handle static session keys providers. for (const auto& context_iter : static_session_ticket_keys_providers_) { const auto& session_ticket_keys = context_iter.second; - auto static_secret = config_dump->mutable_static_secrets()->Add(); - static_secret->set_name(context_iter.first); ASSERT(session_ticket_keys != nullptr); envoy::extensions::transport_sockets::tls::v3::Secret dump_secret; dump_secret.set_name(context_iter.first); for (const auto& key : session_ticket_keys->secret()->keys()) { dump_secret.mutable_session_ticket_keys()->add_keys()->MergeFrom(key); } + if (!name_matcher.match(dump_secret.name())) { + continue; + } MessageUtil::redact(dump_secret); + auto static_secret = config_dump->mutable_static_secrets()->Add(); + static_secret->set_name(context_iter.first); static_secret->mutable_secret()->PackFrom(dump_secret); } // Handle static generic secret providers. for (const auto& secret_iter : static_generic_secret_providers_) { const auto& generic_secret = secret_iter.second; - auto static_secret = config_dump->mutable_static_secrets()->Add(); - static_secret->set_name(secret_iter.first); ASSERT(generic_secret != nullptr); envoy::extensions::transport_sockets::tls::v3::Secret dump_secret; dump_secret.set_name(secret_iter.first); dump_secret.mutable_generic_secret()->MergeFrom(*generic_secret->secret()); + if (!name_matcher.match(dump_secret.name())) { + continue; + } + auto static_secret = config_dump->mutable_static_secrets()->Add(); + static_secret->set_name(secret_iter.first); MessageUtil::redact(dump_secret); static_secret->mutable_secret()->PackFrom(dump_secret); } @@ -216,25 +231,28 @@ ProtobufTypes::MessagePtr SecretManagerImpl::dumpSecretConfigs() { for (const auto& cert_secrets : providers) { const auto& secret_data = cert_secrets->secretData(); const auto& tls_cert = cert_secrets->secret(); - envoy::admin::v3::SecretsConfigDump::DynamicSecret* dump_secret; const bool secret_ready = tls_cert != nullptr; - if (secret_ready) { - dump_secret = config_dump->mutable_dynamic_active_secrets()->Add(); - } else { - dump_secret = config_dump->mutable_dynamic_warming_secrets()->Add(); - } - dump_secret->set_name(secret_data.resource_name_); envoy::extensions::transport_sockets::tls::v3::Secret secret; secret.set_name(secret_data.resource_name_); ProtobufWkt::Timestamp last_updated_ts; TimestampUtil::systemClockToTimestamp(secret_data.last_updated_, last_updated_ts); - dump_secret->set_version_info(secret_data.version_info_); - *dump_secret->mutable_last_updated() = last_updated_ts; secret.set_name(secret_data.resource_name_); if (secret_ready) { secret.mutable_tls_certificate()->MergeFrom(*tls_cert); } + if (!name_matcher.match(secret.name())) { + continue; + } MessageUtil::redact(secret); + envoy::admin::v3::SecretsConfigDump::DynamicSecret* dump_secret; + if (secret_ready) { + dump_secret = config_dump->mutable_dynamic_active_secrets()->Add(); + } else { + dump_secret = config_dump->mutable_dynamic_warming_secrets()->Add(); + } + dump_secret->set_name(secret_data.resource_name_); + dump_secret->set_version_info(secret_data.version_info_); + *dump_secret->mutable_last_updated() = last_updated_ts; dump_secret->mutable_secret()->PackFrom(secret); } @@ -243,23 +261,28 @@ ProtobufTypes::MessagePtr SecretManagerImpl::dumpSecretConfigs() { for (const auto& validation_context_secret : context_secret_provider) { const auto& secret_data = validation_context_secret->secretData(); const auto& validation_context = validation_context_secret->secret(); - envoy::admin::v3::SecretsConfigDump::DynamicSecret* dump_secret; const bool secret_ready = validation_context != nullptr; + + envoy::extensions::transport_sockets::tls::v3::Secret secret; + if (secret_ready) { - dump_secret = config_dump->mutable_dynamic_active_secrets()->Add(); - } else { - dump_secret = config_dump->mutable_dynamic_warming_secrets()->Add(); + secret.mutable_validation_context()->MergeFrom(*validation_context); } - dump_secret->set_name(secret_data.resource_name_); - envoy::extensions::transport_sockets::tls::v3::Secret secret; secret.set_name(secret_data.resource_name_); + if (!name_matcher.match(secret.name())) { + continue; + } ProtobufWkt::Timestamp last_updated_ts; + envoy::admin::v3::SecretsConfigDump::DynamicSecret* dump_secret; TimestampUtil::systemClockToTimestamp(secret_data.last_updated_, last_updated_ts); - dump_secret->set_version_info(secret_data.version_info_); - *dump_secret->mutable_last_updated() = last_updated_ts; if (secret_ready) { - secret.mutable_validation_context()->MergeFrom(*validation_context); + dump_secret = config_dump->mutable_dynamic_active_secrets()->Add(); + } else { + dump_secret = config_dump->mutable_dynamic_warming_secrets()->Add(); } + dump_secret->set_version_info(secret_data.version_info_); + *dump_secret->mutable_last_updated() = last_updated_ts; + dump_secret->set_name(secret_data.resource_name_); dump_secret->mutable_secret()->PackFrom(secret); } @@ -268,23 +291,26 @@ ProtobufTypes::MessagePtr SecretManagerImpl::dumpSecretConfigs() { for (const auto& stek_secrets : stek_providers) { const auto& secret_data = stek_secrets->secretData(); const auto& tls_stek = stek_secrets->secret(); - envoy::admin::v3::SecretsConfigDump::DynamicSecret* dump_secret; const bool secret_ready = tls_stek != nullptr; + envoy::extensions::transport_sockets::tls::v3::Secret secret; + secret.set_name(secret_data.resource_name_); + if (secret_ready) { + secret.mutable_session_ticket_keys()->MergeFrom(*tls_stek); + } + if (!name_matcher.match(secret.name())) { + continue; + } + ProtobufWkt::Timestamp last_updated_ts; + TimestampUtil::systemClockToTimestamp(secret_data.last_updated_, last_updated_ts); + envoy::admin::v3::SecretsConfigDump::DynamicSecret* dump_secret; if (secret_ready) { dump_secret = config_dump->mutable_dynamic_active_secrets()->Add(); } else { dump_secret = config_dump->mutable_dynamic_warming_secrets()->Add(); } dump_secret->set_name(secret_data.resource_name_); - envoy::extensions::transport_sockets::tls::v3::Secret secret; - secret.set_name(secret_data.resource_name_); - ProtobufWkt::Timestamp last_updated_ts; - TimestampUtil::systemClockToTimestamp(secret_data.last_updated_, last_updated_ts); dump_secret->set_version_info(secret_data.version_info_); *dump_secret->mutable_last_updated() = last_updated_ts; - if (secret_ready) { - secret.mutable_session_ticket_keys()->MergeFrom(*tls_stek); - } MessageUtil::redact(secret); dump_secret->mutable_secret()->PackFrom(secret); } @@ -294,23 +320,26 @@ ProtobufTypes::MessagePtr SecretManagerImpl::dumpSecretConfigs() { for (const auto& provider : generic_secret_providers) { const auto& secret_data = provider->secretData(); const auto& generic_secret = provider->secret(); - envoy::admin::v3::SecretsConfigDump::DynamicSecret* dump_secret; const bool secret_ready = generic_secret != nullptr; + envoy::extensions::transport_sockets::tls::v3::Secret secret; + secret.set_name(secret_data.resource_name_); + if (secret_ready) { + secret.mutable_generic_secret()->MergeFrom(*generic_secret); + } + if (!name_matcher.match(secret.name())) { + continue; + } + ProtobufWkt::Timestamp last_updated_ts; + TimestampUtil::systemClockToTimestamp(secret_data.last_updated_, last_updated_ts); + envoy::admin::v3::SecretsConfigDump::DynamicSecret* dump_secret; if (secret_ready) { dump_secret = config_dump->mutable_dynamic_active_secrets()->Add(); } else { dump_secret = config_dump->mutable_dynamic_warming_secrets()->Add(); } dump_secret->set_name(secret_data.resource_name_); - envoy::extensions::transport_sockets::tls::v3::Secret secret; - secret.set_name(secret_data.resource_name_); - ProtobufWkt::Timestamp last_updated_ts; - TimestampUtil::systemClockToTimestamp(secret_data.last_updated_, last_updated_ts); dump_secret->set_version_info(secret_data.version_info_); *dump_secret->mutable_last_updated() = last_updated_ts; - if (secret_ready) { - secret.mutable_generic_secret()->MergeFrom(*generic_secret); - } MessageUtil::redact(secret); dump_secret->mutable_secret()->PackFrom(secret); } diff --git a/source/common/secret/secret_manager_impl.h b/source/common/secret/secret_manager_impl.h index 66ca9e4435818..4537c80705c36 100644 --- a/source/common/secret/secret_manager_impl.h +++ b/source/common/secret/secret_manager_impl.h @@ -68,7 +68,7 @@ class SecretManagerImpl : public SecretManager { Server::Configuration::TransportSocketFactoryContext& secret_provider_context) override; private: - ProtobufTypes::MessagePtr dumpSecretConfigs(); + ProtobufTypes::MessagePtr dumpSecretConfigs(const Matchers::StringMatcher& name_matcher); template class DynamicSecretProviders : public Logger::Loggable { diff --git a/source/common/ssl/certificate_validation_context_config_impl.cc b/source/common/ssl/certificate_validation_context_config_impl.cc index 4782127e99ecc..40dc20f6ef3a3 100644 --- a/source/common/ssl/certificate_validation_context_config_impl.cc +++ b/source/common/ssl/certificate_validation_context_config_impl.cc @@ -22,9 +22,6 @@ CertificateValidationContextConfigImpl::CertificateValidationContextConfigImpl( certificate_revocation_list_path_( Config::DataSource::getPath(config.crl()) .value_or(certificate_revocation_list_.empty() ? EMPTY_STRING : INLINE_STRING)), - verify_subject_alt_name_list_( - config.hidden_envoy_deprecated_verify_subject_alt_name().begin(), - config.hidden_envoy_deprecated_verify_subject_alt_name().end()), subject_alt_name_matchers_(config.match_subject_alt_names().begin(), config.match_subject_alt_names().end()), verify_certificate_hash_list_(config.verify_certificate_hash().begin(), @@ -44,7 +41,7 @@ CertificateValidationContextConfigImpl::CertificateValidationContextConfigImpl( throw EnvoyException(fmt::format("Failed to load CRL from {} without trusted CA", certificateRevocationListPath())); } - if (!subject_alt_name_matchers_.empty() || !verify_subject_alt_name_list_.empty()) { + if (!subject_alt_name_matchers_.empty()) { throw EnvoyException("SAN-based verification of peer certificates without " "trusted CA is insecure and not allowed"); } diff --git a/source/common/ssl/certificate_validation_context_config_impl.h b/source/common/ssl/certificate_validation_context_config_impl.h index 56765baa74342..7cf045a351841 100644 --- a/source/common/ssl/certificate_validation_context_config_impl.h +++ b/source/common/ssl/certificate_validation_context_config_impl.h @@ -24,9 +24,6 @@ class CertificateValidationContextConfigImpl : public CertificateValidationConte const std::string& certificateRevocationListPath() const final { return certificate_revocation_list_path_; } - const std::vector& verifySubjectAltNameList() const override { - return verify_subject_alt_name_list_; - } const std::vector& subjectAltNameMatchers() const override { return subject_alt_name_matchers_; @@ -56,7 +53,6 @@ class CertificateValidationContextConfigImpl : public CertificateValidationConte const std::string ca_cert_path_; const std::string certificate_revocation_list_; const std::string certificate_revocation_list_path_; - const std::vector verify_subject_alt_name_list_; const std::vector subject_alt_name_matchers_; const std::vector verify_certificate_hash_list_; const std::vector verify_certificate_spki_list_; diff --git a/source/common/stream_info/stream_info_impl.h b/source/common/stream_info/stream_info_impl.h index 633178d2c361b..1a41097e51420 100644 --- a/source/common/stream_info/stream_info_impl.h +++ b/source/common/stream_info/stream_info_impl.h @@ -233,12 +233,6 @@ struct StreamInfoImpl : public StreamInfo { upstream_filter_state_ = filter_state; } - void setRequestedServerName(absl::string_view requested_server_name) override { - requested_server_name_ = std::string(requested_server_name); - } - - const std::string& requestedServerName() const override { return requested_server_name_; } - void setUpstreamTransportFailureReason(absl::string_view failure_reason) override { upstream_transport_failure_reason_ = std::string(failure_reason); } diff --git a/source/common/stream_info/utility.cc b/source/common/stream_info/utility.cc index 8ea8da5158b48..bda0687672900 100644 --- a/source/common/stream_info/utility.cc +++ b/source/common/stream_info/utility.cc @@ -21,7 +21,7 @@ const std::string ResponseFlagUtils::toShortString(const StreamInfo& stream_info } absl::flat_hash_map ResponseFlagUtils::getFlagMap() { - static_assert(ResponseFlag::LastFlag == 0x1000000, + static_assert(ResponseFlag::LastFlag == 0x2000000, "A flag has been added. Add the new flag to ALL_RESPONSE_STRING_FLAGS."); absl::flat_hash_map res; for (auto [str, flag] : ResponseFlagUtils::ALL_RESPONSE_STRING_FLAGS) { diff --git a/source/common/stream_info/utility.h b/source/common/stream_info/utility.h index 9e946af380986..f6c39b8bd25a5 100644 --- a/source/common/stream_info/utility.h +++ b/source/common/stream_info/utility.h @@ -44,6 +44,7 @@ class ResponseFlagUtils { constexpr static absl::string_view DURATION_TIMEOUT = "DT"; constexpr static absl::string_view UPSTREAM_PROTOCOL_ERROR = "UPE"; constexpr static absl::string_view NO_CLUSTER_FOUND = "NC"; + constexpr static absl::string_view OVERLOAD_MANAGER = "OM"; static constexpr std::array ALL_RESPONSE_STRING_FLAGS{ FlagStringAndEnum{FAILED_LOCAL_HEALTH_CHECK, ResponseFlag::FailedLocalHealthCheck}, @@ -74,6 +75,7 @@ class ResponseFlagUtils { FlagStringAndEnum{DURATION_TIMEOUT, ResponseFlag::DurationTimeout}, FlagStringAndEnum{UPSTREAM_PROTOCOL_ERROR, ResponseFlag::UpstreamProtocolError}, FlagStringAndEnum{NO_CLUSTER_FOUND, ResponseFlag::NoClusterFound}, + FlagStringAndEnum{OVERLOAD_MANAGER, ResponseFlag::OverloadManager}, }; private: diff --git a/source/common/tcp/conn_pool.cc b/source/common/tcp/conn_pool.cc index dd0ae46994ff1..a4f33086956c6 100644 --- a/source/common/tcp/conn_pool.cc +++ b/source/common/tcp/conn_pool.cc @@ -64,15 +64,14 @@ void ActiveTcpClient::onEvent(Network::ConnectionEvent event) { // This is also necessary for prefetch to be used with such protocols. if (event == Network::ConnectionEvent::Connected) { connection_->readDisable(true); + connection_->streamInfo().setDownstreamSslConnection(connection_->ssl()); } Envoy::ConnectionPool::ActiveClient::onEvent(event); if (callbacks_) { // Do not pass the Connected event to any session which registered during onEvent above. // Consumers of connection pool connections assume they are receiving already connected // connections. - if (event == Network::ConnectionEvent::Connected) { - connection_->streamInfo().setDownstreamSslConnection(connection_->ssl()); - } else { + if (event != Network::ConnectionEvent::Connected) { if (tcp_connection_data_) { Envoy::Upstream::reportUpstreamCxDestroyActiveRequest(parent_.host(), event); } diff --git a/source/common/tcp/conn_pool.h b/source/common/tcp/conn_pool.h index e1208f7205359..e8c2f24c026e4 100644 --- a/source/common/tcp/conn_pool.h +++ b/source/common/tcp/conn_pool.h @@ -139,7 +139,7 @@ class ConnPoolImpl : public Envoy::ConnectionPool::ConnPoolImplBase, ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, - Network::TransportSocketOptionsSharedPtr transport_socket_options, + Network::TransportSocketOptionsConstSharedPtr transport_socket_options, Upstream::ClusterConnectivityState& state) : Envoy::ConnectionPool::ConnPoolImplBase(host, priority, dispatcher, options, transport_socket_options, state) {} @@ -204,10 +204,10 @@ class ConnPoolImpl : public Envoy::ConnectionPool::ConnPoolImplBase, } void onPoolFailure(const Upstream::HostDescriptionConstSharedPtr& host_description, - absl::string_view, ConnectionPool::PoolFailureReason reason, + absl::string_view failure_reason, ConnectionPool::PoolFailureReason reason, Envoy::ConnectionPool::AttachContext& context) override { auto* callbacks = typedContext(context).callbacks_; - callbacks->onPoolFailure(reason, host_description); + callbacks->onPoolFailure(reason, failure_reason, host_description); } // These two functions exist for testing parity between old and new Tcp Connection Pools. diff --git a/source/common/tcp/original_conn_pool.cc b/source/common/tcp/original_conn_pool.cc index 33796607c4489..4f4da573b940d 100644 --- a/source/common/tcp/original_conn_pool.cc +++ b/source/common/tcp/original_conn_pool.cc @@ -15,7 +15,7 @@ namespace Tcp { OriginalConnPoolImpl::OriginalConnPoolImpl( Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, - Network::TransportSocketOptionsSharedPtr transport_socket_options) + Network::TransportSocketOptionsConstSharedPtr transport_socket_options) : dispatcher_(dispatcher), host_(host), priority_(priority), socket_options_(options), transport_socket_options_(transport_socket_options), upstream_ready_cb_(dispatcher_.createSchedulableCallback([this]() { onUpstreamReady(); })) {} @@ -128,7 +128,7 @@ OriginalConnPoolImpl::newConnection(ConnectionPool::Callbacks& callbacks) { return pending_requests_.front().get(); } else { ENVOY_LOG(debug, "max pending requests overflow"); - callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, nullptr); + callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, "", nullptr); host_->cluster().stats().upstream_rq_pending_overflow_.inc(); return nullptr; } @@ -184,7 +184,7 @@ void OriginalConnPoolImpl::onConnectionEvent(ActiveConn& conn, Network::Connecti PendingRequestPtr request = pending_requests_to_purge.front()->removeFromList(pending_requests_to_purge); host_->cluster().stats().upstream_rq_pending_failure_eject_.inc(); - request->callbacks_.onPoolFailure(reason, conn.real_host_description_); + request->callbacks_.onPoolFailure(reason, "", conn.real_host_description_); } } diff --git a/source/common/tcp/original_conn_pool.h b/source/common/tcp/original_conn_pool.h index d6b021bcf3e29..b4c5f4bca5fd5 100644 --- a/source/common/tcp/original_conn_pool.h +++ b/source/common/tcp/original_conn_pool.h @@ -24,7 +24,7 @@ class OriginalConnPoolImpl : Logger::Loggable, public Connecti OriginalConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, - Network::TransportSocketOptionsSharedPtr transport_socket_options); + Network::TransportSocketOptionsConstSharedPtr transport_socket_options); ~OriginalConnPoolImpl() override; @@ -154,7 +154,7 @@ class OriginalConnPoolImpl : Logger::Loggable, public Connecti Upstream::HostConstSharedPtr host_; Upstream::ResourcePriority priority_; const Network::ConnectionSocket::OptionsSharedPtr socket_options_; - Network::TransportSocketOptionsSharedPtr transport_socket_options_; + Network::TransportSocketOptionsConstSharedPtr transport_socket_options_; std::list pending_conns_; // conns awaiting connected event std::list ready_conns_; // conns ready for assignment diff --git a/source/common/tcp_proxy/tcp_proxy.cc b/source/common/tcp_proxy/tcp_proxy.cc index 37f71070dd9d2..ce3f18d7d65c2 100644 --- a/source/common/tcp_proxy/tcp_proxy.cc +++ b/source/common/tcp_proxy/tcp_proxy.cc @@ -655,9 +655,9 @@ void Filter::onUpstreamConnection() { read_callbacks_->upstreamHost()->outlierDetector().putResult( Upstream::Outlier::Result::LocalOriginConnectSuccessFinal); - getStreamInfo().setRequestedServerName(read_callbacks_->connection().requestedServerName()); ENVOY_CONN_LOG(debug, "TCP:onUpstreamEvent(), requestedServerName: {}", - read_callbacks_->connection(), getStreamInfo().requestedServerName()); + read_callbacks_->connection(), + getStreamInfo().downstreamAddressProvider().requestedServerName()); if (config_->idleTimeout()) { // The idle_timer_ can be moved to a Drainer, so related callbacks call into diff --git a/source/common/tcp_proxy/tcp_proxy.h b/source/common/tcp_proxy/tcp_proxy.h index e51d3e7ffbcdf..06c5572a313f2 100644 --- a/source/common/tcp_proxy/tcp_proxy.h +++ b/source/common/tcp_proxy/tcp_proxy.h @@ -278,7 +278,7 @@ class Filter : public Network::ReadFilter, return &read_callbacks_->connection(); } - Network::TransportSocketOptionsSharedPtr upstreamTransportSocketOptions() const override { + Network::TransportSocketOptionsConstSharedPtr upstreamTransportSocketOptions() const override { return transport_socket_options_; } @@ -380,7 +380,7 @@ class Filter : public Network::ReadFilter, std::unique_ptr generic_conn_pool_; RouteConstSharedPtr route_; Router::MetadataMatchCriteriaConstPtr metadata_match_criteria_; - Network::TransportSocketOptionsSharedPtr transport_socket_options_; + Network::TransportSocketOptionsConstSharedPtr transport_socket_options_; Network::Socket::OptionsSharedPtr upstream_options_; uint32_t connect_attempts_{}; bool connecting_{}; diff --git a/source/common/tcp_proxy/upstream.cc b/source/common/tcp_proxy/upstream.cc index 3e866bf94f830..c20d818b36323 100644 --- a/source/common/tcp_proxy/upstream.cc +++ b/source/common/tcp_proxy/upstream.cc @@ -172,7 +172,7 @@ void TcpConnPool::newStream(GenericConnectionPoolCallbacks& callbacks) { } } -void TcpConnPool::onPoolFailure(ConnectionPool::PoolFailureReason reason, +void TcpConnPool::onPoolFailure(ConnectionPool::PoolFailureReason reason, absl::string_view, Upstream::HostDescriptionConstSharedPtr host) { upstream_handle_ = nullptr; callbacks_->onGenericPoolFailure(reason, host); diff --git a/source/common/tcp_proxy/upstream.h b/source/common/tcp_proxy/upstream.h index 4da9830209d96..3f2bfd9c67785 100644 --- a/source/common/tcp_proxy/upstream.h +++ b/source/common/tcp_proxy/upstream.h @@ -29,6 +29,7 @@ class TcpConnPool : public GenericConnPool, public Tcp::ConnectionPool::Callback // Tcp::ConnectionPool::Callbacks void onPoolFailure(ConnectionPool::PoolFailureReason reason, + absl::string_view transport_failure_reason, Upstream::HostDescriptionConstSharedPtr host) override; void onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn_data, Upstream::HostDescriptionConstSharedPtr host) override; diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index 4ffb69b48f545..b62c3261076d1 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -272,7 +272,10 @@ ClusterManagerImpl::ClusterManagerImpl( cm_stats_(generateStats(stats)), init_helper_(*this, [this](ClusterManagerCluster& cluster) { onClusterInit(cluster); }), config_tracker_entry_( - admin.getConfigTracker().add("clusters", [this] { return dumpClusterConfigs(); })), + admin.getConfigTracker().add("clusters", + [this](const Matchers::StringMatcher& name_matcher) { + return dumpClusterConfigs(name_matcher); + })), time_source_(main_thread_dispatcher.timeSource()), dispatcher_(main_thread_dispatcher), http_context_(http_context), router_context_(router_context), cluster_stat_names_(stats.symbolTable()), @@ -1045,11 +1048,15 @@ ClusterManagerImpl::addThreadLocalClusterUpdateCallbacks(ClusterUpdateCallbacks& return std::make_unique(cb, cluster_manager.update_callbacks_); } -ProtobufTypes::MessagePtr ClusterManagerImpl::dumpClusterConfigs() { +ProtobufTypes::MessagePtr +ClusterManagerImpl::dumpClusterConfigs(const Matchers::StringMatcher& name_matcher) { auto config_dump = std::make_unique(); config_dump->set_version_info(cds_api_ != nullptr ? cds_api_->versionInfo() : ""); for (const auto& active_cluster_pair : active_clusters_) { const auto& cluster = *active_cluster_pair.second; + if (!name_matcher.match(cluster.cluster_config_.name())) { + continue; + } if (!cluster.added_via_api_) { auto& static_cluster = *config_dump->mutable_static_clusters()->Add(); static_cluster.mutable_cluster()->PackFrom(API_RECOVER_ORIGINAL(cluster.cluster_config_)); @@ -1066,6 +1073,9 @@ ProtobufTypes::MessagePtr ClusterManagerImpl::dumpClusterConfigs() { for (const auto& warming_cluster_pair : warming_clusters_) { const auto& cluster = *warming_cluster_pair.second; + if (!name_matcher.match(cluster.cluster_config_.name())) { + continue; + } auto& dynamic_cluster = *config_dump->mutable_dynamic_warming_clusters()->Add(); dynamic_cluster.set_version_info(cluster.version_info_); dynamic_cluster.mutable_cluster()->PackFrom(API_RECOVER_ORIGINAL(cluster.cluster_config_)); @@ -1525,8 +1535,8 @@ Http::ConnectionPool::InstancePtr ProdClusterManagerFactory::allocateConnPool( const absl::optional& alternate_protocol_options, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options, TimeSource& source, - ClusterConnectivityState& state) { + const Network::TransportSocketOptionsConstSharedPtr& transport_socket_options, + TimeSource& source, ClusterConnectivityState& state) { if (protocols.size() == 3 && runtime_.snapshot().featureEnabled("upstream.use_http3", 100)) { ASSERT(contains(protocols, {Http::Protocol::Http11, Http::Protocol::Http2, Http::Protocol::Http3})); @@ -1576,7 +1586,7 @@ Http::ConnectionPool::InstancePtr ProdClusterManagerFactory::allocateConnPool( Tcp::ConnectionPool::InstancePtr ProdClusterManagerFactory::allocateTcpConnPool( Event::Dispatcher& dispatcher, HostConstSharedPtr host, ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, - Network::TransportSocketOptionsSharedPtr transport_socket_options, + Network::TransportSocketOptionsConstSharedPtr transport_socket_options, ClusterConnectivityState& state) { if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_tcp_connection_pool")) { return std::make_unique(dispatcher, host, priority, options, diff --git a/source/common/upstream/cluster_manager_impl.h b/source/common/upstream/cluster_manager_impl.h index 18073d43a00df..c3d436eb3f9ac 100644 --- a/source/common/upstream/cluster_manager_impl.h +++ b/source/common/upstream/cluster_manager_impl.h @@ -74,13 +74,13 @@ class ProdClusterManagerFactory : public ClusterManagerFactory { const absl::optional& alternate_protocol_options, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options, + const Network::TransportSocketOptionsConstSharedPtr& transport_socket_options, TimeSource& time_source, ClusterConnectivityState& state) override; Tcp::ConnectionPool::InstancePtr allocateTcpConnPool(Event::Dispatcher& dispatcher, HostConstSharedPtr host, ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, - Network::TransportSocketOptionsSharedPtr transport_socket_options, + Network::TransportSocketOptionsConstSharedPtr transport_socket_options, ClusterConnectivityState& state) override; std::pair clusterFromProto(const envoy::config::cluster::v3::Cluster& cluster, ClusterManager& cm, @@ -565,7 +565,7 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable(lb_endpoint.metadata()), @@ -47,7 +48,7 @@ class LogicalHost : public HostImpl { // Upstream::Host CreateConnectionData createConnection( Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options, - Network::TransportSocketOptionsSharedPtr transport_socket_options) const override; + Network::TransportSocketOptionsConstSharedPtr transport_socket_options) const override; // Upstream::HostDescription Network::Address::InstanceConstSharedPtr address() const override { @@ -60,7 +61,7 @@ class LogicalHost : public HostImpl { } private: - const Network::TransportSocketOptionsSharedPtr override_transport_socket_options_; + const Network::TransportSocketOptionsConstSharedPtr override_transport_socket_options_; mutable absl::Mutex address_lock_; }; diff --git a/source/common/upstream/strict_dns_cluster.cc b/source/common/upstream/strict_dns_cluster.cc index 6663f63cd9e5a..c776e070a0085 100644 --- a/source/common/upstream/strict_dns_cluster.cc +++ b/source/common/upstream/strict_dns_cluster.cc @@ -54,7 +54,7 @@ void StrictDnsClusterImpl::startPreInit() { } // If the config provides no endpoints, the cluster is initialized immediately as if all hosts are // resolved in failure. - if (resolve_targets_.empty()) { + if (resolve_targets_.empty() || !wait_for_warm_on_init_) { onPreInitComplete(); } } diff --git a/source/common/upstream/subset_lb.h b/source/common/upstream/subset_lb.h index c84c572d78baf..d64f2f54839e4 100644 --- a/source/common/upstream/subset_lb.h +++ b/source/common/upstream/subset_lb.h @@ -158,7 +158,7 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::LoggableupstreamSocketOptions(); } - Network::TransportSocketOptionsSharedPtr upstreamTransportSocketOptions() const override { + Network::TransportSocketOptionsConstSharedPtr upstreamTransportSocketOptions() const override { return wrapped_->upstreamTransportSocketOptions(); } diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index d710972e00311..83169b162d354 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -280,7 +280,7 @@ Network::TransportSocketFactory& HostDescriptionImpl::resolveTransportSocketFact Host::CreateConnectionData HostImpl::createConnection( Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options, - Network::TransportSocketOptionsSharedPtr transport_socket_options) const { + Network::TransportSocketOptionsConstSharedPtr transport_socket_options) const { return {createConnection(dispatcher, cluster(), address(), transportSocketFactory(), options, transport_socket_options), shared_from_this()}; @@ -306,7 +306,7 @@ void HostImpl::setEdsHealthFlag(envoy::config::core::v3::HealthStatus health_sta Host::CreateConnectionData HostImpl::createHealthCheckConnection( Event::Dispatcher& dispatcher, - Network::TransportSocketOptionsSharedPtr transport_socket_options, + Network::TransportSocketOptionsConstSharedPtr transport_socket_options, const envoy::config::core::v3::Metadata* metadata) const { Network::TransportSocketFactory& factory = @@ -322,7 +322,7 @@ HostImpl::createConnection(Event::Dispatcher& dispatcher, const ClusterInfo& clu const Network::Address::InstanceConstSharedPtr& address, Network::TransportSocketFactory& socket_factory, const Network::ConnectionSocket::OptionsSharedPtr& options, - Network::TransportSocketOptionsSharedPtr transport_socket_options) { + Network::TransportSocketOptionsConstSharedPtr transport_socket_options) { Network::ConnectionSocket::OptionsSharedPtr connection_options; if (cluster.clusterSocketOptions() != nullptr) { if (options) { @@ -944,6 +944,7 @@ ClusterImplBase::ClusterImplBase( Stats::ScopePtr&& stats_scope, bool added_via_api, TimeSource& time_source) : init_manager_(fmt::format("Cluster {}", cluster.name())), init_watcher_("ClusterImplBase", [this]() { onInitDone(); }), runtime_(runtime), + wait_for_warm_on_init_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(cluster, wait_for_warm_on_init, true)), time_source_(time_source), local_cluster_(factory_context.clusterManager().localClusterName().value_or("") == cluster.name()), diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index 6d62a26b6be65..ec2eb3e147392 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -86,6 +86,7 @@ class HostDescriptionImpl : virtual public HostDescription, uint32_t priority, TimeSource& time_source); Network::TransportSocketFactory& transportSocketFactory() const override { + absl::ReaderMutexLock lock(&metadata_mutex_); return socket_factory_; } @@ -103,8 +104,13 @@ class HostDescriptionImpl : virtual public HostDescription, return metadata_; } void metadata(MetadataConstSharedPtr new_metadata) override { - absl::WriterMutexLock lock(&metadata_mutex_); - metadata_ = new_metadata; + auto& new_socket_factory = resolveTransportSocketFactory(address_, new_metadata.get()); + { + absl::WriterMutexLock lock(&metadata_mutex_); + metadata_ = new_metadata; + // Update data members dependent on metadata. + socket_factory_ = new_socket_factory; + } } const ClusterInfo& cluster() const override { return *cluster_; } @@ -178,7 +184,8 @@ class HostDescriptionImpl : virtual public HostDescription, Outlier::DetectorHostMonitorPtr outlier_detector_; HealthCheckHostMonitorPtr health_checker_; std::atomic priority_; - Network::TransportSocketFactory& socket_factory_; + std::reference_wrapper + socket_factory_ ABSL_GUARDED_BY(metadata_mutex_); const MonotonicTime creation_time_; }; @@ -209,11 +216,11 @@ class HostImpl : public HostDescriptionImpl, } CreateConnectionData createConnection( Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options, - Network::TransportSocketOptionsSharedPtr transport_socket_options) const override; - CreateConnectionData - createHealthCheckConnection(Event::Dispatcher& dispatcher, - Network::TransportSocketOptionsSharedPtr transport_socket_options, - const envoy::config::core::v3::Metadata* metadata) const override; + Network::TransportSocketOptionsConstSharedPtr transport_socket_options) const override; + CreateConnectionData createHealthCheckConnection( + Event::Dispatcher& dispatcher, + Network::TransportSocketOptionsConstSharedPtr transport_socket_options, + const envoy::config::core::v3::Metadata* metadata) const override; std::vector> gauges() const override { @@ -260,7 +267,7 @@ class HostImpl : public HostDescriptionImpl, const Network::Address::InstanceConstSharedPtr& address, Network::TransportSocketFactory& socket_factory, const Network::ConnectionSocket::OptionsSharedPtr& options, - Network::TransportSocketOptionsSharedPtr transport_socket_options); + Network::TransportSocketOptionsConstSharedPtr transport_socket_options); private: void setEdsHealthFlag(envoy::config::core::v3::HealthStatus health_status); @@ -864,6 +871,7 @@ class ClusterImplBase : public Cluster, protected Logger::Loggable( - server, envoy::config::listener::v3::Listener::MODIFY_ONLY); + server, envoy::config::listener::v3::Listener::MODIFY_ONLY, server.dispatcher()); } Runtime::LoaderPtr ProdComponentFactory::createRuntime(Server::Instance& server, diff --git a/source/extensions/access_loggers/grpc/grpc_access_log_utils.cc b/source/extensions/access_loggers/grpc/grpc_access_log_utils.cc index 3d924c0cd47cb..64e9923aaf450 100644 --- a/source/extensions/access_loggers/grpc/grpc_access_log_utils.cc +++ b/source/extensions/access_loggers/grpc/grpc_access_log_utils.cc @@ -37,7 +37,7 @@ void Utility::responseFlagsToAccessLogResponseFlags( envoy::data::accesslog::v3::AccessLogCommon& common_access_log, const StreamInfo::StreamInfo& stream_info) { - static_assert(StreamInfo::ResponseFlag::LastFlag == 0x1000000, + static_assert(StreamInfo::ResponseFlag::LastFlag == 0x2000000, "A flag has been added. Fix this code."); if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::FailedLocalHealthCheck)) { @@ -140,6 +140,10 @@ void Utility::responseFlagsToAccessLogResponseFlags( if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::NoClusterFound)) { common_access_log.mutable_response_flags()->set_no_cluster_found(true); } + + if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::OverloadManager)) { + common_access_log.mutable_response_flags()->set_overload_manager(true); + } } void Utility::extractCommonAccessLogProperties( @@ -167,7 +171,8 @@ void Utility::extractCommonAccessLogProperties( const Ssl::ConnectionInfoConstSharedPtr downstream_ssl_connection = stream_info.downstreamSslConnection(); - tls_properties->set_tls_sni_hostname(stream_info.requestedServerName()); + tls_properties->set_tls_sni_hostname( + std::string(stream_info.downstreamAddressProvider().requestedServerName())); auto* local_properties = tls_properties->mutable_local_certificate_properties(); for (const auto& uri_san : downstream_ssl_connection->uriSanLocalCertificate()) { diff --git a/source/extensions/access_loggers/wasm/config.cc b/source/extensions/access_loggers/wasm/config.cc index 0c8fb063abb04..d200351dc227d 100644 --- a/source/extensions/access_loggers/wasm/config.cc +++ b/source/extensions/access_loggers/wasm/config.cc @@ -14,6 +14,8 @@ namespace Extensions { namespace AccessLoggers { namespace Wasm { +using Common::Wasm::PluginHandleSharedPtrThreadLocal; + AccessLog::InstanceSharedPtr WasmAccessLogFactory::createAccessLogInstance( const Protobuf::Message& proto_config, AccessLog::FilterPtr&& filter, Server::Configuration::CommonFactoryContext& context) { @@ -30,9 +32,10 @@ AccessLog::InstanceSharedPtr WasmAccessLogFactory::createAccessLogInstance( auto callback = [access_log, &context, plugin](Common::Wasm::WasmHandleSharedPtr base_wasm) { // NB: the Slot set() call doesn't complete inline, so all arguments must outlive this call. auto tls_slot = - ThreadLocal::TypedSlot::makeUnique(context.threadLocal()); + ThreadLocal::TypedSlot::makeUnique(context.threadLocal()); tls_slot->set([base_wasm, plugin](Event::Dispatcher& dispatcher) { - return Common::Wasm::getOrCreateThreadLocalPlugin(base_wasm, plugin, dispatcher); + return std::make_shared( + Common::Wasm::getOrCreateThreadLocalPlugin(base_wasm, plugin, dispatcher)); }); access_log->setTlsSlot(std::move(tls_slot)); }; diff --git a/source/extensions/access_loggers/wasm/wasm_access_log_impl.h b/source/extensions/access_loggers/wasm/wasm_access_log_impl.h index ec7fb29797dea..64962aaff52ab 100644 --- a/source/extensions/access_loggers/wasm/wasm_access_log_impl.h +++ b/source/extensions/access_loggers/wasm/wasm_access_log_impl.h @@ -10,12 +10,13 @@ namespace Extensions { namespace AccessLoggers { namespace Wasm { -using Envoy::Extensions::Common::Wasm::PluginHandle; +using Common::Wasm::PluginHandleSharedPtrThreadLocal; using Envoy::Extensions::Common::Wasm::PluginSharedPtr; class WasmAccessLog : public AccessLog::Instance { public: - WasmAccessLog(const PluginSharedPtr& plugin, ThreadLocal::TypedSlotPtr&& tls_slot, + WasmAccessLog(const PluginSharedPtr& plugin, + ThreadLocal::TypedSlotPtr&& tls_slot, AccessLog::FilterPtr filter) : plugin_(plugin), tls_slot_(std::move(tls_slot)), filter_(std::move(filter)) {} @@ -30,21 +31,21 @@ class WasmAccessLog : public AccessLog::Instance { } } - auto handle = tls_slot_->get(); - if (handle.has_value()) { - handle->wasm()->log(plugin_, request_headers, response_headers, response_trailers, - stream_info); + auto handle = tls_slot_->get()->handle(); + if (handle->wasmHandle()) { + handle->wasmHandle()->wasm()->log(plugin_, request_headers, response_headers, + response_trailers, stream_info); } } - void setTlsSlot(ThreadLocal::TypedSlotPtr&& tls_slot) { + void setTlsSlot(ThreadLocal::TypedSlotPtr&& tls_slot) { ASSERT(tls_slot_ == nullptr); tls_slot_ = std::move(tls_slot); } private: PluginSharedPtr plugin_; - ThreadLocal::TypedSlotPtr tls_slot_; + ThreadLocal::TypedSlotPtr tls_slot_; AccessLog::FilterPtr filter_; }; diff --git a/source/extensions/bootstrap/wasm/config.cc b/source/extensions/bootstrap/wasm/config.cc index ae8e733d57d60..f2771f596814c 100644 --- a/source/extensions/bootstrap/wasm/config.cc +++ b/source/extensions/bootstrap/wasm/config.cc @@ -39,9 +39,11 @@ void WasmServiceExtension::createWasm(Server::Configuration::ServerFactoryContex // Per-thread WASM VM. // NB: the Slot set() call doesn't complete inline, so all arguments must outlive this call. auto tls_slot = - ThreadLocal::TypedSlot::makeUnique(context.threadLocal()); + ThreadLocal::TypedSlot::makeUnique( + context.threadLocal()); tls_slot->set([base_wasm, plugin](Event::Dispatcher& dispatcher) { - return Common::Wasm::getOrCreateThreadLocalPlugin(base_wasm, plugin, dispatcher); + return std::make_shared( + Common::Wasm::getOrCreateThreadLocalPlugin(base_wasm, plugin, dispatcher)); }); wasm_service_ = std::make_unique(plugin, std::move(tls_slot)); }; diff --git a/source/extensions/bootstrap/wasm/config.h b/source/extensions/bootstrap/wasm/config.h index db415cb14f14a..6839792d71d8c 100644 --- a/source/extensions/bootstrap/wasm/config.h +++ b/source/extensions/bootstrap/wasm/config.h @@ -15,7 +15,7 @@ namespace Extensions { namespace Bootstrap { namespace Wasm { -using Envoy::Extensions::Common::Wasm::PluginHandle; +using Common::Wasm::PluginHandleSharedPtrThreadLocal; using Envoy::Extensions::Common::Wasm::PluginHandleSharedPtr; using Envoy::Extensions::Common::Wasm::PluginSharedPtr; @@ -23,13 +23,14 @@ class WasmService { public: WasmService(PluginSharedPtr plugin, PluginHandleSharedPtr singleton) : plugin_(plugin), singleton_(std::move(singleton)) {} - WasmService(PluginSharedPtr plugin, ThreadLocal::TypedSlotPtr&& tls_slot) + WasmService(PluginSharedPtr plugin, + ThreadLocal::TypedSlotPtr&& tls_slot) : plugin_(plugin), tls_slot_(std::move(tls_slot)) {} private: PluginSharedPtr plugin_; PluginHandleSharedPtr singleton_; - ThreadLocal::TypedSlotPtr tls_slot_; + ThreadLocal::TypedSlotPtr tls_slot_; }; using WasmServicePtr = std::unique_ptr; diff --git a/source/extensions/clusters/aggregate/lb_context.h b/source/extensions/clusters/aggregate/lb_context.h index 8add803c63c29..65eb3a5b0db6b 100644 --- a/source/extensions/clusters/aggregate/lb_context.h +++ b/source/extensions/clusters/aggregate/lb_context.h @@ -62,7 +62,7 @@ class AggregateLoadBalancerContext : public Upstream::LoadBalancerContext { Network::Socket::OptionsSharedPtr upstreamSocketOptions() const override { return context_->upstreamSocketOptions(); } - Network::TransportSocketOptionsSharedPtr upstreamTransportSocketOptions() const override { + Network::TransportSocketOptionsConstSharedPtr upstreamTransportSocketOptions() const override { return context_->upstreamTransportSocketOptions(); } diff --git a/source/extensions/clusters/dynamic_forward_proxy/cluster.cc b/source/extensions/clusters/dynamic_forward_proxy/cluster.cc index 335bb2a8c9a86..6650a021ebaf8 100644 --- a/source/extensions/clusters/dynamic_forward_proxy/cluster.cc +++ b/source/extensions/clusters/dynamic_forward_proxy/cluster.cc @@ -29,14 +29,8 @@ Cluster::Cluster( // support these parameters dynamically in the future. This is not an exhaustive list of // parameters that don't make sense but should be the most obvious ones that a user might set // in error. - if (!cluster.hidden_envoy_deprecated_tls_context().sni().empty() || - !cluster.hidden_envoy_deprecated_tls_context() - .common_tls_context() - .validation_context() - .hidden_envoy_deprecated_verify_subject_alt_name() - .empty()) { - throw EnvoyException( - "dynamic_forward_proxy cluster cannot configure 'sni' or 'verify_subject_alt_name'"); + if (!cluster.hidden_envoy_deprecated_tls_context().sni().empty()) { + throw EnvoyException("dynamic_forward_proxy cluster cannot configure 'sni'"); } } diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc index 15e3adb4d715b..312c920c27bb6 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc @@ -33,6 +33,25 @@ DnsCacheImpl::DnsCacheImpl( host_ttl_(PROTOBUF_GET_MS_OR_DEFAULT(config, host_ttl, 300000)), max_hosts_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, max_hosts, 1024)) { tls_slot_.set([&](Event::Dispatcher&) { return std::make_shared(*this); }); + + if (static_cast(config.preresolve_hostnames().size()) > max_hosts_) { + throw EnvoyException(fmt::format( + "DNS Cache [{}] configured with preresolve_hostnames={} larger than max_hosts={}", + config.name(), config.preresolve_hostnames().size(), max_hosts_)); + } + + // Preresolved hostnames are resolved without a read lock on primary hosts because it is done + // during object construction. + for (const auto& hostname : config.preresolve_hostnames()) { + // No need to get a resolution handle on this resolution as the only outcome needed is for the + // cache to load an entry. Further if this particular resolution fails all the is lost is the + // potential optimization of having the entry be preresolved the first time a true consumer of + // this DNS cache asks for it. + main_thread_dispatcher_.post( + [this, host = hostname.address(), default_port = hostname.port_value()]() { + startCacheLoad(host, default_port); + }); + } } DnsCacheImpl::~DnsCacheImpl() { diff --git a/source/extensions/common/wasm/context.cc b/source/extensions/common/wasm/context.cc index 4591f9d2a177b..bc08b0b261871 100644 --- a/source/extensions/common/wasm/context.cc +++ b/source/extensions/common/wasm/context.cc @@ -1,3 +1,5 @@ +#include "source/extensions/common/wasm/context.h" + #include #include #include @@ -148,8 +150,8 @@ Context::Context(Wasm* wasm) : ContextBase(wasm) {} Context::Context(Wasm* wasm, const PluginSharedPtr& plugin) : ContextBase(wasm, plugin) { root_local_info_ = &std::static_pointer_cast(plugin)->localInfo(); } -Context::Context(Wasm* wasm, uint32_t root_context_id, const PluginSharedPtr& plugin) - : ContextBase(wasm, root_context_id, plugin) {} +Context::Context(Wasm* wasm, uint32_t root_context_id, PluginHandleSharedPtr plugin_handle) + : ContextBase(wasm, root_context_id, plugin_handle), plugin_handle_(plugin_handle) {} Wasm* Context::wasm() const { return static_cast(wasm_); } Plugin* Context::plugin() const { return static_cast(plugin_.get()); } @@ -659,14 +661,16 @@ Http::HeaderMap* Context::getMap(WasmHeaderMapType type) { case WasmHeaderMapType::RequestHeaders: return request_headers_; case WasmHeaderMapType::RequestTrailers: - if (request_trailers_ == nullptr && request_body_buffer_ && end_of_stream_) { + if (request_trailers_ == nullptr && request_body_buffer_ && end_of_stream_ && + decoder_callbacks_) { request_trailers_ = &decoder_callbacks_->addDecodedTrailers(); } return request_trailers_; case WasmHeaderMapType::ResponseHeaders: return response_headers_; case WasmHeaderMapType::ResponseTrailers: - if (response_trailers_ == nullptr && response_body_buffer_ && end_of_stream_) { + if (response_trailers_ == nullptr && response_body_buffer_ && end_of_stream_ && + encoder_callbacks_) { response_trailers_ = &encoder_callbacks_->addEncodedTrailers(); } return response_trailers_; @@ -727,7 +731,7 @@ WasmResult Context::addHeaderMapValue(WasmHeaderMapType type, std::string_view k } const Http::LowerCaseString lower_key{std::string(key)}; map->addCopy(lower_key, std::string(value)); - if (type == WasmHeaderMapType::RequestHeaders) { + if (type == WasmHeaderMapType::RequestHeaders && decoder_callbacks_) { decoder_callbacks_->clearRouteCache(); } return WasmResult::Ok; @@ -802,7 +806,7 @@ WasmResult Context::setHeaderMapPairs(WasmHeaderMapType type, const Pairs& pairs const Http::LowerCaseString lower_key{std::string(p.first)}; map->addCopy(lower_key, std::string(p.second)); } - if (type == WasmHeaderMapType::RequestHeaders) { + if (type == WasmHeaderMapType::RequestHeaders && decoder_callbacks_) { decoder_callbacks_->clearRouteCache(); } return WasmResult::Ok; @@ -815,7 +819,7 @@ WasmResult Context::removeHeaderMapValue(WasmHeaderMapType type, std::string_vie } const Http::LowerCaseString lower_key{std::string(key)}; map->remove(lower_key); - if (type == WasmHeaderMapType::RequestHeaders) { + if (type == WasmHeaderMapType::RequestHeaders && decoder_callbacks_) { decoder_callbacks_->clearRouteCache(); } return WasmResult::Ok; @@ -829,7 +833,7 @@ WasmResult Context::replaceHeaderMapValue(WasmHeaderMapType type, std::string_vi } const Http::LowerCaseString lower_key{std::string(key)}; map->setCopy(lower_key, toAbslStringView(value)); - if (type == WasmHeaderMapType::RequestHeaders) { + if (type == WasmHeaderMapType::RequestHeaders && decoder_callbacks_) { decoder_callbacks_->clearRouteCache(); } return WasmResult::Ok; @@ -860,7 +864,7 @@ BufferInterface* Context::getBuffer(WasmBufferType type) { } return nullptr; case WasmBufferType::HttpRequestBody: - if (buffering_request_body_) { + if (buffering_request_body_ && decoder_callbacks_) { // We need the mutable version, so capture it using a callback. // TODO: consider adding a mutableDecodingBuffer() interface. ::Envoy::Buffer::Instance* buffer_instance{}; @@ -870,7 +874,7 @@ BufferInterface* Context::getBuffer(WasmBufferType type) { } return buffer_.set(request_body_buffer_); case WasmBufferType::HttpResponseBody: - if (buffering_response_body_) { + if (buffering_response_body_ && encoder_callbacks_) { // TODO: consider adding a mutableDecodingBuffer() interface. ::Envoy::Buffer::Instance* buffer_instance{}; encoder_callbacks_->modifyEncodingBuffer( @@ -1630,17 +1634,19 @@ constexpr absl::string_view FailStreamResponseDetails = "wasm_fail_stream"; void Context::failStream(WasmStreamType stream_type) { switch (stream_type) { case WasmStreamType::Request: - if (decoder_callbacks_) { + if (decoder_callbacks_ && !local_reply_sent_) { decoder_callbacks_->sendLocalReply(Envoy::Http::Code::ServiceUnavailable, "", nullptr, Grpc::Status::WellKnownGrpcStatus::Unavailable, FailStreamResponseDetails); + local_reply_sent_ = true; } break; case WasmStreamType::Response: - if (encoder_callbacks_) { + if (encoder_callbacks_ && !local_reply_sent_) { encoder_callbacks_->sendLocalReply(Envoy::Http::Code::ServiceUnavailable, "", nullptr, Grpc::Status::WellKnownGrpcStatus::Unavailable, FailStreamResponseDetails); + local_reply_sent_ = true; } break; case WasmStreamType::Downstream: diff --git a/source/extensions/common/wasm/context.h b/source/extensions/common/wasm/context.h index a834be202a5aa..901b5a4a8c448 100644 --- a/source/extensions/common/wasm/context.h +++ b/source/extensions/common/wasm/context.h @@ -46,10 +46,12 @@ using CapabilityRestrictionConfig = envoy::extensions::wasm::v3::CapabilityRestr using SanitizationConfig = envoy::extensions::wasm::v3::SanitizationConfig; using GrpcService = envoy::config::core::v3::GrpcService; +class PluginHandle; class Wasm; using PluginBaseSharedPtr = std::shared_ptr; using PluginHandleBaseSharedPtr = std::shared_ptr; +using PluginHandleSharedPtr = std::shared_ptr; using WasmHandleBaseSharedPtr = std::shared_ptr; // Opaque context object. @@ -110,10 +112,11 @@ class Context : public proxy_wasm::ContextBase, public google::api::expr::runtime::BaseActivation, public std::enable_shared_from_this { public: - Context(); // Testing. - Context(Wasm* wasm); // Vm Context. - Context(Wasm* wasm, const PluginSharedPtr& plugin); // Root Context. - Context(Wasm* wasm, uint32_t root_context_id, const PluginSharedPtr& plugin); // Stream context. + Context(); // Testing. + Context(Wasm* wasm); // Vm Context. + Context(Wasm* wasm, const PluginSharedPtr& plugin); // Root Context. + Context(Wasm* wasm, uint32_t root_context_id, + PluginHandleSharedPtr plugin_handle); // Stream context. ~Context() override; Wasm* wasm() const; @@ -396,6 +399,7 @@ class Context : public proxy_wasm::ContextBase, const Http::HeaderMap* getConstMap(WasmHeaderMapType type); const LocalInfo::LocalInfo* root_local_info_{nullptr}; // set only for root_context. + PluginHandleSharedPtr plugin_handle_{nullptr}; uint32_t next_http_call_token_ = 1; uint32_t next_grpc_token_ = 1; // Odd tokens are for Calls even for Streams. @@ -446,11 +450,12 @@ class Context : public proxy_wasm::ContextBase, const Http::ResponseTrailerMap* access_log_response_trailers_{}; // Temporary state. - ProtobufWkt::Struct temporary_metadata_; - bool end_of_stream_; + Buffer buffer_; bool buffering_request_body_ = false; bool buffering_response_body_ = false; - Buffer buffer_; + bool end_of_stream_ = false; + bool local_reply_sent_ = false; + ProtobufWkt::Struct temporary_metadata_; // MB: must be a node-type map as we take persistent references to the entries. std::map http_request_; diff --git a/source/extensions/common/wasm/wasm.cc b/source/extensions/common/wasm/wasm.cc index 8bef9b885485e..22274d07acfdd 100644 --- a/source/extensions/common/wasm/wasm.cc +++ b/source/extensions/common/wasm/wasm.cc @@ -474,7 +474,9 @@ getOrCreateThreadLocalPlugin(const WasmHandleSharedPtr& base_wasm, const PluginS ENVOY_LOG_TO_LOGGER(Envoy::Logger::Registry::getLog(Envoy::Logger::Id::wasm), critical, "Plugin configured to fail closed failed to load"); } - return nullptr; + // To handle the case when failed to create VMs and fail-open/close properly, + // we still create PluginHandle with null WasmBase. + return std::make_shared(nullptr, plugin); } return std::static_pointer_cast(proxy_wasm::getOrCreateThreadLocalPlugin( std::static_pointer_cast(base_wasm), plugin, diff --git a/source/extensions/common/wasm/wasm.h b/source/extensions/common/wasm/wasm.h index b696268ae5b75..ac856463b5a2f 100644 --- a/source/extensions/common/wasm/wasm.h +++ b/source/extensions/common/wasm/wasm.h @@ -137,25 +137,32 @@ class WasmHandle : public WasmHandleBase, public ThreadLocal::ThreadLocalObject using WasmHandleSharedPtr = std::shared_ptr; -class PluginHandle : public PluginHandleBase, public ThreadLocal::ThreadLocalObject { +class PluginHandle : public PluginHandleBase { public: explicit PluginHandle(const WasmHandleSharedPtr& wasm_handle, const PluginSharedPtr& plugin) : PluginHandleBase(std::static_pointer_cast(wasm_handle), std::static_pointer_cast(plugin)), - wasm_handle_(wasm_handle), - root_context_id_(wasm_handle->wasm()->getRootContext(plugin, false)->id()) {} + plugin_(plugin), wasm_handle_(wasm_handle) {} - WasmSharedPtr& wasm() { return wasm_handle_->wasm(); } - WasmHandleSharedPtr& wasmHandleForTest() { return wasm_handle_; } - uint32_t rootContextId() { return root_context_id_; } + WasmHandleSharedPtr& wasmHandle() { return wasm_handle_; } + uint32_t rootContextId() { return wasm_handle_->wasm()->getRootContext(plugin_, false)->id(); } private: + PluginSharedPtr plugin_; WasmHandleSharedPtr wasm_handle_; - const uint32_t root_context_id_; }; using PluginHandleSharedPtr = std::shared_ptr; +class PluginHandleSharedPtrThreadLocal : public ThreadLocal::ThreadLocalObject { +public: + PluginHandleSharedPtrThreadLocal(PluginHandleSharedPtr handle) : handle_(handle){}; + PluginHandleSharedPtr& handle() { return handle_; } + +private: + PluginHandleSharedPtr handle_; +}; + using CreateWasmCallback = std::function; // Returns false if createWasm failed synchronously. This is necessary because xDS *MUST* report diff --git a/source/extensions/extensions_build_config.bzl b/source/extensions/extensions_build_config.bzl index bfe74f721c120..8ee2ef00e95e7 100644 --- a/source/extensions/extensions_build_config.bzl +++ b/source/extensions/extensions_build_config.bzl @@ -52,6 +52,7 @@ EXTENSIONS = { # "envoy.matching.input_matchers.consistent_hashing": "//source/extensions/matching/input_matchers/consistent_hashing:config", + "envoy.matching.input_matchers.ip": "//source/extensions/matching/input_matchers/ip:config", # # Generic Inputs diff --git a/source/extensions/extensions_metadata.yaml b/source/extensions/extensions_metadata.yaml index 261b50d1d3c9e..31cdfad79124c 100644 --- a/source/extensions/extensions_metadata.yaml +++ b/source/extensions/extensions_metadata.yaml @@ -499,6 +499,11 @@ envoy.matching.input_matchers.consistent_hashing: - envoy.matching.input_matchers security_posture: robust_to_untrusted_downstream status: stable +envoy.matching.input_matchers.ip: + categories: + - envoy.matching.input_matchers + security_posture: robust_to_untrusted_downstream_and_upstream + status: stable envoy.quic.proof_source.filter_chain: categories: - envoy.quic.proof_source diff --git a/source/extensions/filters/common/expr/context.cc b/source/extensions/filters/common/expr/context.cc index 66603460003c6..d073b3226494f 100644 --- a/source/extensions/filters/common/expr/context.cc +++ b/source/extensions/filters/common/expr/context.cc @@ -184,7 +184,7 @@ absl::optional ConnectionWrapper::operator[](CelValue key) const { return CelValue::CreateBool(info_.downstreamSslConnection() != nullptr && info_.downstreamSslConnection()->peerCertificatePresented()); } else if (value == RequestedServerName) { - return CelValue::CreateString(&info_.requestedServerName()); + return CelValue::CreateStringView(info_.downstreamAddressProvider().requestedServerName()); } else if (value == ID) { auto id = info_.connectionID(); if (id.has_value()) { diff --git a/source/extensions/filters/http/jwt_authn/authenticator.cc b/source/extensions/filters/http/jwt_authn/authenticator.cc index ee5aa0b1cc007..151d6a7fa1286 100644 --- a/source/extensions/filters/http/jwt_authn/authenticator.cc +++ b/source/extensions/filters/http/jwt_authn/authenticator.cc @@ -3,6 +3,7 @@ #include "envoy/http/async_client.h" #include "source/common/common/assert.h" +#include "source/common/common/base64.h" #include "source/common/common/enum_to_int.h" #include "source/common/common/logger.h" #include "source/common/http/message_impl.h" @@ -252,9 +253,17 @@ void AuthenticatorImpl::verifyKey() { // Forward the payload const auto& provider = jwks_data_->getJwtProvider(); + if (!provider.forward_payload_header().empty()) { - headers_->addCopy(Http::LowerCaseString(provider.forward_payload_header()), - jwt_->payload_str_base64url_); + if (provider.pad_forward_payload_header()) { + std::string payload_with_padding = jwt_->payload_str_base64url_; + Base64::completePadding(payload_with_padding); + headers_->addCopy(Http::LowerCaseString(provider.forward_payload_header()), + payload_with_padding); + } else { + headers_->addCopy(Http::LowerCaseString(provider.forward_payload_header()), + jwt_->payload_str_base64url_); + } } if (!provider.forward()) { diff --git a/source/extensions/filters/http/lua/wrappers.cc b/source/extensions/filters/http/lua/wrappers.cc index 04bd6a7b3eac5..bf70a111e2180 100644 --- a/source/extensions/filters/http/lua/wrappers.cc +++ b/source/extensions/filters/http/lua/wrappers.cc @@ -141,7 +141,7 @@ int StreamInfoWrapper::luaDownstreamDirectRemoteAddress(lua_State* state) { } int StreamInfoWrapper::luaRequestedServerName(lua_State* state) { - lua_pushstring(state, stream_info_.requestedServerName().c_str()); + lua_pushstring(state, stream_info_.downstreamAddressProvider().requestedServerName().data()); return 1; } diff --git a/source/extensions/filters/http/wasm/wasm_filter.cc b/source/extensions/filters/http/wasm/wasm_filter.cc index 90cd4c6bacd7b..00cd2c6b715f2 100644 --- a/source/extensions/filters/http/wasm/wasm_filter.cc +++ b/source/extensions/filters/http/wasm/wasm_filter.cc @@ -7,8 +7,8 @@ namespace Wasm { FilterConfig::FilterConfig(const envoy::extensions::filters::http::wasm::v3::Wasm& config, Server::Configuration::FactoryContext& context) - : tls_slot_( - ThreadLocal::TypedSlot::makeUnique(context.threadLocal())) { + : tls_slot_(ThreadLocal::TypedSlot::makeUnique( + context.threadLocal())) { plugin_ = std::make_shared( config.config(), context.direction(), context.localInfo(), &context.listenerMetadata()); @@ -16,7 +16,8 @@ FilterConfig::FilterConfig(const envoy::extensions::filters::http::wasm::v3::Was auto callback = [plugin, this](const Common::Wasm::WasmHandleSharedPtr& base_wasm) { // NB: the Slot set() call doesn't complete inline, so all arguments must outlive this call. tls_slot_->set([base_wasm, plugin](Event::Dispatcher& dispatcher) { - return Common::Wasm::getOrCreateThreadLocalPlugin(base_wasm, plugin, dispatcher); + return std::make_shared( + Common::Wasm::getOrCreateThreadLocalPlugin(base_wasm, plugin, dispatcher)); }); }; diff --git a/source/extensions/filters/http/wasm/wasm_filter.h b/source/extensions/filters/http/wasm/wasm_filter.h index 9b4ed541f0702..e3b3fb13892b4 100644 --- a/source/extensions/filters/http/wasm/wasm_filter.h +++ b/source/extensions/filters/http/wasm/wasm_filter.h @@ -16,7 +16,8 @@ namespace HttpFilters { namespace Wasm { using Envoy::Extensions::Common::Wasm::Context; -using Envoy::Extensions::Common::Wasm::PluginHandle; +using Envoy::Extensions::Common::Wasm::PluginHandleSharedPtr; +using Envoy::Extensions::Common::Wasm::PluginHandleSharedPtrThreadLocal; using Envoy::Extensions::Common::Wasm::PluginSharedPtr; using Envoy::Extensions::Common::Wasm::Wasm; @@ -27,9 +28,9 @@ class FilterConfig : Logger::Loggable { std::shared_ptr createFilter() { Wasm* wasm = nullptr; - auto handle = tls_slot_->get(); - if (handle.has_value()) { - wasm = handle->wasm().get(); + PluginHandleSharedPtr handle = tls_slot_->get()->handle(); + if (handle->wasmHandle()) { + wasm = handle->wasmHandle()->wasm().get(); } if (!wasm || wasm->isFailed()) { if (plugin_->fail_open_) { @@ -37,15 +38,15 @@ class FilterConfig : Logger::Loggable { return nullptr; } else { // Fail closed is handled by an empty Context. - return std::make_shared(nullptr, 0, plugin_); + return std::make_shared(nullptr, 0, handle); } } - return std::make_shared(wasm, handle->rootContextId(), plugin_); + return std::make_shared(wasm, handle->rootContextId(), handle); } private: PluginSharedPtr plugin_; - ThreadLocal::TypedSlotPtr tls_slot_; + ThreadLocal::TypedSlotPtr tls_slot_; Config::DataSource::RemoteAsyncDataProviderPtr remote_data_provider_; }; diff --git a/source/extensions/filters/listener/http_inspector/http_inspector.h b/source/extensions/filters/listener/http_inspector/http_inspector.h index b43422f9e66ed..75d5eb5e76af8 100644 --- a/source/extensions/filters/listener/http_inspector/http_inspector.h +++ b/source/extensions/filters/listener/http_inspector/http_inspector.h @@ -66,6 +66,11 @@ using ConfigSharedPtr = std::shared_ptr; class Filter : public Network::ListenerFilter, Logger::Loggable { public: Filter(const ConfigSharedPtr config); + ~Filter() override { + if (cb_) { + cb_->socket().ioHandle().resetFileEvents(); + } + } // Network::ListenerFilter Network::FilterStatus onAccept(Network::ListenerFilterCallbacks& cb) override; diff --git a/source/extensions/filters/listener/proxy_protocol/proxy_protocol.h b/source/extensions/filters/listener/proxy_protocol/proxy_protocol.h index 469c838f9b83f..3eeb067466f31 100644 --- a/source/extensions/filters/listener/proxy_protocol/proxy_protocol.h +++ b/source/extensions/filters/listener/proxy_protocol/proxy_protocol.h @@ -83,7 +83,11 @@ enum class ReadOrParseState { Done, TryAgainLater, Error }; class Filter : public Network::ListenerFilter, Logger::Loggable { public: Filter(const ConfigSharedPtr& config) : config_(config) {} - + ~Filter() override { + if (cb_) { + cb_->socket().ioHandle().resetFileEvents(); + } + } // Network::ListenerFilter Network::FilterStatus onAccept(Network::ListenerFilterCallbacks& cb) override; diff --git a/source/extensions/filters/listener/tls_inspector/tls_inspector.h b/source/extensions/filters/listener/tls_inspector/tls_inspector.h index a31e814ffc0e4..bd64cd2bd3f54 100644 --- a/source/extensions/filters/listener/tls_inspector/tls_inspector.h +++ b/source/extensions/filters/listener/tls_inspector/tls_inspector.h @@ -73,6 +73,11 @@ using ConfigSharedPtr = std::shared_ptr; class Filter : public Network::ListenerFilter, Logger::Loggable { public: Filter(const ConfigSharedPtr config); + ~Filter() override { + if (cb_) { + cb_->socket().ioHandle().resetFileEvents(); + } + } // Network::ListenerFilter Network::FilterStatus onAccept(Network::ListenerFilterCallbacks& cb) override; @@ -85,7 +90,7 @@ class Filter : public Network::ListenerFilter, Logger::Loggable ssl_; uint64_t read_{0}; diff --git a/source/extensions/filters/network/dubbo_proxy/router/router_impl.cc b/source/extensions/filters/network/dubbo_proxy/router/router_impl.cc index 561c9eeda61c6..4c64decc03bdb 100644 --- a/source/extensions/filters/network/dubbo_proxy/router/router_impl.cc +++ b/source/extensions/filters/network/dubbo_proxy/router/router_impl.cc @@ -281,6 +281,7 @@ void Router::UpstreamRequest::encodeData(Buffer::Instance& data) { } void Router::UpstreamRequest::onPoolFailure(ConnectionPool::PoolFailureReason reason, + absl::string_view, Upstream::HostDescriptionConstSharedPtr host) { conn_pool_handle_ = nullptr; diff --git a/source/extensions/filters/network/dubbo_proxy/router/router_impl.h b/source/extensions/filters/network/dubbo_proxy/router/router_impl.h index 7c62e60fdab6a..c67b7ab778714 100644 --- a/source/extensions/filters/network/dubbo_proxy/router/router_impl.h +++ b/source/extensions/filters/network/dubbo_proxy/router/router_impl.h @@ -61,6 +61,7 @@ class Router : public Tcp::ConnectionPool::UpstreamCallbacks, // Tcp::ConnectionPool::Callbacks void onPoolFailure(ConnectionPool::PoolFailureReason reason, + absl::string_view transport_failure_reason, Upstream::HostDescriptionConstSharedPtr host) override; void onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn, Upstream::HostDescriptionConstSharedPtr host) override; diff --git a/source/extensions/filters/network/postgres_proxy/postgres_decoder.cc b/source/extensions/filters/network/postgres_proxy/postgres_decoder.cc index 7af15c8d305b1..793bd96f32d34 100644 --- a/source/extensions/filters/network/postgres_proxy/postgres_decoder.cc +++ b/source/extensions/filters/network/postgres_proxy/postgres_decoder.cc @@ -13,13 +13,16 @@ namespace PostgresProxy { []() -> std::unique_ptr { return createMsgBodyReader<__VA_ARGS__>(); } #define NO_BODY BODY_FORMAT() +constexpr absl::string_view FRONTEND = "Frontend"; +constexpr absl::string_view BACKEND = "Backend"; + void DecoderImpl::initialize() { // Special handler for first message of the transaction. first_ = MessageProcessor{"Startup", BODY_FORMAT(Int32, Repeated), {&DecoderImpl::onStartup}}; // Frontend messages. - FE_messages_.direction_ = "Frontend"; + FE_messages_.direction_ = FRONTEND; // Setup handlers for known messages. absl::flat_hash_map& FE_known_msgs = FE_messages_.messages_; @@ -52,7 +55,7 @@ void DecoderImpl::initialize() { MessageProcessor{"Other", BODY_FORMAT(ByteN), {&DecoderImpl::incMessagesUnknown}}; // Backend messages. - BE_messages_.direction_ = "Backend"; + BE_messages_.direction_ = BACKEND; // Setup handlers for known messages. absl::flat_hash_map& BE_known_msgs = BE_messages_.messages_; @@ -176,88 +179,156 @@ void DecoderImpl::initialize() { }; } -Decoder::Result DecoderImpl::parseHeader(Buffer::Instance& data) { - ENVOY_LOG(trace, "postgres_proxy: parsing message, len {}", data.length()); +/* Main handler for incoming messages. Messages are dispatched based on the + current decoder's state. +*/ +Decoder::Result DecoderImpl::onData(Buffer::Instance& data, bool frontend) { + switch (state_) { + case State::InitState: + return onDataInit(data, frontend); + case State::OutOfSyncState: + case State::EncryptedState: + return onDataIgnore(data, frontend); + case State::InSyncState: + return onDataInSync(data, frontend); + default: + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } +} - // The minimum size of the message sufficient for parsing is 5 bytes. - if (data.length() < 5) { +/* Handler for messages when decoder is in Init State. There are very few message types which + are allowed in this state. + If the initial message has the correct syntax and indicates that session should be in + clear-text, the decoder will move to InSyncState. If the initial message has the correct syntax + and indicates that session should be encrypted, the decoder stays in InitState, because the + initial message will be received again after transport socket negotiates SSL. If the message + syntax is incorrect, the decoder will move to OutOfSyncState, in which messages are not parsed. +*/ +Decoder::Result DecoderImpl::onDataInit(Buffer::Instance& data, bool) { + ASSERT(state_ == State::InitState); + + // In Init state the minimum size of the message sufficient for parsing is 4 bytes. + if (data.length() < 4) { // not enough data in the buffer. - return Decoder::NeedMoreData; + return Decoder::Result::NeedMoreData; + } + + // Validate the message before processing. + const MsgBodyReader& f = std::get<1>(first_); + const auto msgParser = f(); + // Run the validation. + message_len_ = data.peekBEInt(0); + if (message_len_ > MAX_STARTUP_PACKET_LENGTH) { + // Message does not conform to the expected format. Move to out-of-sync state. + data.drain(data.length()); + state_ = State::OutOfSyncState; + return Decoder::Result::ReadyForNext; } - if (!startup_) { - data.copyOut(0, 1, &command_); - ENVOY_LOG(trace, "postgres_proxy: command is {}", command_); + Message::ValidationResult validationResult = msgParser->validate(data, 4, message_len_ - 4); + + if (validationResult == Message::ValidationNeedMoreData) { + return Decoder::Result::NeedMoreData; } - // The 1 byte message type and message length should be in the buffer - // Check if the entire message has been read. - std::string message; - message_len_ = data.peekBEInt(startup_ ? 0 : 1); - if (data.length() < (message_len_ + (startup_ ? 0 : 1))) { - ENVOY_LOG(trace, "postgres_proxy: cannot parse message. Need {} bytes in buffer", - message_len_ + (startup_ ? 0 : 1)); - // Not enough data in the buffer. - return Decoder::NeedMoreData; + if (validationResult == Message::ValidationFailed) { + // Message does not conform to the expected format. Move to out-of-sync state. + data.drain(data.length()); + state_ = State::OutOfSyncState; + return Decoder::Result::ReadyForNext; } - if (startup_) { - uint32_t code = data.peekBEInt(4); - // Startup message with 1234 in the most significant 16 bits - // indicate request to encrypt. - if (code >= 0x04d20000) { - encrypted_ = true; - // Handler for SSLRequest (Int32(80877103) = 0x04d2162f) - // See details in https://www.postgresql.org/docs/current/protocol-message-formats.html. - if (code == 0x04d2162f) { - // Notify the filter that `SSLRequest` message was decoded. - // If the filter returns true, it means to pass the message upstream - // to the server. If it returns false it means, that filter will try - // to terminate SSL session and SSLRequest should not be passed to the - // server. - encrypted_ = callbacks_->onSSLRequest(); - } - - // Count it as recognized frontend message. - callbacks_->incMessagesFrontend(); - if (encrypted_) { - ENVOY_LOG(trace, "postgres_proxy: detected encrypted traffic."); - incSessionsEncrypted(); - startup_ = false; - } - data.drain(data.length()); - return encrypted_ ? Decoder::ReadyForNext : Decoder::Stopped; + Decoder::Result result = Decoder::Result::ReadyForNext; + uint32_t code = data.peekBEInt(4); + data.drain(4); + // Startup message with 1234 in the most significant 16 bits + // indicate request to encrypt. + if (code >= 0x04d20000) { + encrypted_ = true; + // Handler for SSLRequest (Int32(80877103) = 0x04d2162f) + // See details in https://www.postgresql.org/docs/current/protocol-message-formats.html. + if (code == 0x04d2162f) { + // Notify the filter that `SSLRequest` message was decoded. + // If the filter returns true, it means to pass the message upstream + // to the server. If it returns false it means, that filter will try + // to terminate SSL session and SSLRequest should not be passed to the + // server. + encrypted_ = callbacks_->onSSLRequest(); + } + + // Count it as recognized frontend message. + callbacks_->incMessagesFrontend(); + if (encrypted_) { + ENVOY_LOG(trace, "postgres_proxy: detected encrypted traffic."); + incSessionsEncrypted(); + state_ = State::EncryptedState; } else { - ENVOY_LOG(debug, "Detected version {}.{} of Postgres", code >> 16, code & 0x0000FFFF); + result = Decoder::Result::Stopped; + // Stay in InitState. After switch to SSL, another init packet will be sent. } + } else { + ENVOY_LOG(debug, "Detected version {}.{} of Postgres", code >> 16, code & 0x0000FFFF); + state_ = State::InSyncState; } - data.drain(startup_ ? 4 : 5); // Length plus optional 1st byte. - - ENVOY_LOG(trace, "postgres_proxy: msg parsed"); - return Decoder::ReadyForNext; + processMessageBody(data, FRONTEND, message_len_ - 4, first_, msgParser); + data.drain(message_len_); + return result; } -Decoder::Result DecoderImpl::onData(Buffer::Instance& data, bool frontend) { - // If encrypted, just drain the traffic. - if (encrypted_) { - ENVOY_LOG(trace, "postgres_proxy: ignoring {} bytes of encrypted data", data.length()); - data.drain(data.length()); - return Decoder::ReadyForNext; - } +/* + Method invokes actions associated with message type and generate debug logs. +*/ +void DecoderImpl::processMessageBody(Buffer::Instance& data, absl::string_view direction, + uint32_t length, MessageProcessor& msg, + const std::unique_ptr& parser) { + uint32_t bytes_to_read = length; - if (!frontend && startup_) { - data.drain(data.length()); - return Decoder::ReadyForNext; + std::vector& actions = std::get<2>(msg); + if (!actions.empty()) { + // Linearize the message for processing. + message_.assign(std::string(static_cast(data.linearize(bytes_to_read)), bytes_to_read)); + + // Invoke actions associated with the type of received message. + for (const auto& action : actions) { + action(this); + } + + // Drop the linearized message. + message_.erase(); } + ENVOY_LOG(debug, "({}) command = {} ({})", direction, command_, std::get<0>(msg)); + ENVOY_LOG(debug, "({}) length = {}", direction, message_len_); + ENVOY_LOG(debug, "({}) message = {}", direction, genDebugMessage(parser, data, bytes_to_read)); + + ENVOY_LOG(trace, "postgres_proxy: {} bytes remaining in buffer", data.length()); + + data.drain(length); +} + +/* + onDataInSync is called when decoder is on-track with decoding messages. + All previous messages has been decoded properly and decoder is able to find + message boundaries. +*/ +Decoder::Result DecoderImpl::onDataInSync(Buffer::Instance& data, bool frontend) { ENVOY_LOG(trace, "postgres_proxy: decoding {} bytes", data.length()); - const Decoder::Result result = parseHeader(data); - if (result != Decoder::ReadyForNext || encrypted_) { - return result; + ENVOY_LOG(trace, "postgres_proxy: parsing message, len {}", data.length()); + + // The minimum size of the message sufficient for parsing is 5 bytes. + if (data.length() < 5) { + // not enough data in the buffer. + return Decoder::Result::NeedMoreData; } + data.copyOut(0, 1, &command_); + ENVOY_LOG(trace, "postgres_proxy: command is {}", command_); + + // The 1 byte message type and message length should be in the buffer + // Find the message processor and validate the message syntax. + MsgGroup& msg_processor = std::ref(frontend ? FE_messages_ : BE_messages_); frontend ? callbacks_->incMessagesFrontend() : callbacks_->incMessagesBackend(); @@ -265,45 +336,55 @@ Decoder::Result DecoderImpl::onData(Buffer::Instance& data, bool frontend) { // If message is found, the processing will be updated. std::reference_wrapper msg = msg_processor.unknown_; - if (startup_) { - msg = std::ref(first_); - startup_ = false; - } else { - auto it = msg_processor.messages_.find(command_); - if (it != msg_processor.messages_.end()) { - msg = std::ref((*it).second); - } + auto it = msg_processor.messages_.find(command_); + if (it != msg_processor.messages_.end()) { + msg = std::ref((*it).second); } - // message_len_ specifies total message length including 4 bytes long - // "length" field. The length of message body is total length minus size - // of "length" field (4 bytes). - uint32_t bytes_to_read = message_len_ - 4; - - std::vector& actions = std::get<2>(msg.get()); - if (!actions.empty()) { - // Linearize the message for processing. - message_.assign(std::string(static_cast(data.linearize(bytes_to_read)), bytes_to_read)); - - // Invoke actions associated with the type of received message. - for (const auto& action : actions) { - action(this); - } + // Validate the message before processing. + const MsgBodyReader& f = std::get<1>(msg.get()); + message_len_ = data.peekBEInt(1); + const auto msgParser = f(); + // Run the validation. + // Because the message validation may return NeedMoreData error, data must stay intact (no + // draining) until the remaining data arrives and validator will run again. Validator therefore + // starts at offset 5 (1 byte message type and 4 bytes of length). This is in contrast to + // processing of the message, which assumes that message has been validated and starts at the + // beginning of the message. + Message::ValidationResult validationResult = msgParser->validate(data, 5, message_len_ - 4); + + if (validationResult == Message::ValidationNeedMoreData) { + ENVOY_LOG(trace, "postgres_proxy: cannot parse message. Not enough bytes in the buffer."); + return Decoder::Result::NeedMoreData; + } - // Drop the linearized message. - message_.erase(); + if (validationResult == Message::ValidationFailed) { + // Message does not conform to the expected format. Move to out-of-sync state. + data.drain(data.length()); + state_ = State::OutOfSyncState; + return Decoder::Result::ReadyForNext; } - ENVOY_LOG(debug, "({}) command = {} ({})", msg_processor.direction_, command_, - std::get<0>(msg.get())); - ENVOY_LOG(debug, "({}) length = {}", msg_processor.direction_, message_len_); - ENVOY_LOG(debug, "({}) message = {}", msg_processor.direction_, - genDebugMessage(msg, data, bytes_to_read)); + // Drain message code and length fields. + // Processing the message assumes that message starts at the beginning of the buffer. + data.drain(5); - data.drain(bytes_to_read); - ENVOY_LOG(trace, "postgres_proxy: {} bytes remaining in buffer", data.length()); + processMessageBody(data, msg_processor.direction_, message_len_ - 4, msg, msgParser); - return Decoder::ReadyForNext; + return Decoder::Result::ReadyForNext; +} +/* + onDataIgnore method is called when the decoder does not inspect passing + messages. This happens when the decoder detected encrypted packets or + when the decoder could not validate passing messages and lost track of + messages boundaries. In order not to interpret received values as message + lengths and not to start buffering large amount of data, the decoder + enters OutOfSync state and starts ignoring passing messages. Once the + decoder enters OutOfSyncState it cannot leave that state. +*/ +Decoder::Result DecoderImpl::onDataIgnore(Buffer::Instance& data, bool) { + data.drain(data.length()); + return Decoder::Result::ReadyForNext; } // Method is called when C (CommandComplete) message has been @@ -423,16 +504,10 @@ void DecoderImpl::onStartup() { } // Method generates displayable format of currently processed message. -const std::string DecoderImpl::genDebugMessage(const MessageProcessor& msg, Buffer::Instance& data, - uint32_t message_len) { - const MsgBodyReader& f = std::get<1>(msg); - std::string message = "Unrecognized"; - if (f != nullptr) { - const auto msgParser = f(); - msgParser->read(data, message_len); - message = msgParser->toString(); - } - return message; +const std::string DecoderImpl::genDebugMessage(const std::unique_ptr& parser, + Buffer::Instance& data, uint32_t message_len) { + parser->read(data, message_len); + return parser->toString(); } } // namespace PostgresProxy diff --git a/source/extensions/filters/network/postgres_proxy/postgres_decoder.h b/source/extensions/filters/network/postgres_proxy/postgres_decoder.h index bed146c097a90..f62de0108f574 100644 --- a/source/extensions/filters/network/postgres_proxy/postgres_decoder.h +++ b/source/extensions/filters/network/postgres_proxy/postgres_decoder.h @@ -53,7 +53,7 @@ class Decoder { // The following values are returned by the decoder, when filter // passes bytes of data via onData method: - enum Result { + enum class Result { ReadyForNext, // Decoder processed previous message and is ready for the next message. NeedMoreData, // Decoder needs more data to reconstruct the message. Stopped // Received and processed message disrupts the current flow. Decoder stopped accepting @@ -84,12 +84,21 @@ class DecoderImpl : public Decoder, Logger::Loggable { std::string getMessage() { return message_; } - void setStartup(bool startup) { startup_ = startup; } void initialize(); bool encrypted() const { return encrypted_; } + enum class State { InitState, InSyncState, OutOfSyncState, EncryptedState }; + State state() const { return state_; } + void state(State state) { state_ = state; } + protected: + State state_{State::InitState}; + + Result onDataInit(Buffer::Instance& data, bool frontend); + Result onDataInSync(Buffer::Instance& data, bool frontend); + Result onDataIgnore(Buffer::Instance& data, bool frontend); + // MsgAction defines the Decoder's method which will be invoked // when a specific message has been decoded. using MsgAction = std::function; @@ -110,7 +119,7 @@ class DecoderImpl : public Decoder, Logger::Loggable { // Frontend and Backend messages. using MsgGroup = struct { // String describing direction (Frontend or Backend). - std::string direction_; + absl::string_view direction_; // Hash map indexed by messages' 1st byte points to handlers used for processing messages. absl::flat_hash_map messages_; // Handler used for processing messages not found in hash map. @@ -131,7 +140,8 @@ class DecoderImpl : public Decoder, Logger::Loggable { MsgAction unknown_; }; - Result parseHeader(Buffer::Instance& data); + void processMessageBody(Buffer::Instance& data, absl::string_view direction, uint32_t length, + MessageProcessor& msg, const std::unique_ptr& parser); void decode(Buffer::Instance& data); void decodeAuthentication(); void decodeBackendStatements(); @@ -149,18 +159,17 @@ class DecoderImpl : public Decoder, Logger::Loggable { // Helper method generating currently processed message in // displayable format. - const std::string genDebugMessage(const MessageProcessor& msg, Buffer::Instance& data, + const std::string genDebugMessage(const std::unique_ptr& parser, Buffer::Instance& data, uint32_t message_len); DecoderCallbacks* callbacks_{}; PostgresSession session_{}; // The following fields store result of message parsing. - char command_{}; + char command_{'-'}; std::string message_; uint32_t message_len_{}; - bool startup_{true}; // startup stage does not have 1st byte command bool encrypted_{false}; // tells if exchange is encrypted // Dispatchers for Backend (BE) and Frontend (FE) messages. @@ -178,6 +187,11 @@ class DecoderImpl : public Decoder, Logger::Loggable { MsgParserDict BE_errors_; MsgParserDict BE_notices_; + + // MAX_STARTUP_PACKET_LENGTH is defined in Postgres source code + // as maximum size of initial packet. + // https://github.com/postgres/postgres/search?q=MAX_STARTUP_PACKET_LENGTH&type=code + static constexpr uint64_t MAX_STARTUP_PACKET_LENGTH = 10000; }; } // namespace PostgresProxy diff --git a/source/extensions/filters/network/postgres_proxy/postgres_filter.cc b/source/extensions/filters/network/postgres_proxy/postgres_filter.cc index 71e3388718b17..0e8daac36fed4 100644 --- a/source/extensions/filters/network/postgres_proxy/postgres_filter.cc +++ b/source/extensions/filters/network/postgres_proxy/postgres_filter.cc @@ -231,11 +231,11 @@ Network::FilterStatus PostgresFilter::doDecode(Buffer::Instance& data, bool fron // that it cannot process data in the buffer. while (0 < data.length()) { switch (decoder_->onData(data, frontend)) { - case Decoder::NeedMoreData: + case Decoder::Result::NeedMoreData: return Network::FilterStatus::Continue; - case Decoder::ReadyForNext: + case Decoder::Result::ReadyForNext: continue; - case Decoder::Stopped: + case Decoder::Result::Stopped: return Network::FilterStatus::StopIteration; } } diff --git a/source/extensions/filters/network/postgres_proxy/postgres_message.cc b/source/extensions/filters/network/postgres_proxy/postgres_message.cc index 340092d489105..b8e4a3d5febe5 100644 --- a/source/extensions/filters/network/postgres_proxy/postgres_message.cc +++ b/source/extensions/filters/network/postgres_proxy/postgres_message.cc @@ -7,18 +7,16 @@ namespace PostgresProxy { // String type methods. bool String::read(const Buffer::Instance& data, uint64_t& pos, uint64_t& left) { - // First find the terminating zero. - const char zero = 0; - const ssize_t index = data.search(&zero, 1, pos); - if (index == -1) { - return false; - } + // read method uses values set by validate method. + // This avoids unnecessary repetition of scanning data looking for terminating zero. + ASSERT(pos == start_); + ASSERT(end_ >= start_); // Reserve that many bytes in the string. - const uint64_t size = index - pos; + const uint64_t size = end_ - start_; value_.resize(size); // Now copy from buffer to string. - data.copyOut(pos, index - pos, value_.data()); + data.copyOut(pos, size, value_.data()); pos += (size + 1); left -= (size + 1); @@ -27,6 +25,35 @@ bool String::read(const Buffer::Instance& data, uint64_t& pos, uint64_t& left) { std::string String::toString() const { return absl::StrCat("[", value_, "]"); } +Message::ValidationResult String::validate(const Buffer::Instance& data, + const uint64_t start_offset, uint64_t& pos, + uint64_t& left) { + // Try to find the terminating zero. + // If found, all is good. If not found, we may need more data. + const char zero = 0; + const ssize_t index = data.search(&zero, 1, pos); + if (index == -1) { + if (left <= (data.length() - pos)) { + // Message ended before finding terminating zero. + return Message::ValidationFailed; + } else { + return Message::ValidationNeedMoreData; + } + } + // Found, but after the message boundary. + const uint64_t size = index - pos; + if (size >= left) { + return Message::ValidationFailed; + } + + start_ = pos - start_offset; + end_ = start_ + size; + + pos += (size + 1); + left -= (size + 1); + return Message::ValidationOK; +} + // ByteN type methods. bool ByteN::read(const Buffer::Instance& data, uint64_t& pos, uint64_t& left) { if (left > (data.length() - pos)) { @@ -38,6 +65,19 @@ bool ByteN::read(const Buffer::Instance& data, uint64_t& pos, uint64_t& left) { left = 0; return true; } +// Since ByteN does not have a length field, it is not possible to verify +// its correctness. +Message::ValidationResult ByteN::validate(const Buffer::Instance& data, const uint64_t, + uint64_t& pos, uint64_t& left) { + if (left > (data.length() - pos)) { + return Message::ValidationNeedMoreData; + } + + pos += left; + left = 0; + + return Message::ValidationOK; +} std::string ByteN::toString() const { std::string out = "["; @@ -48,10 +88,7 @@ std::string ByteN::toString() const { // VarByteN type methods. bool VarByteN::read(const Buffer::Instance& data, uint64_t& pos, uint64_t& left) { - if ((left < sizeof(int32_t)) || ((data.length() - pos) < sizeof(int32_t))) { - return false; - } - len_ = data.peekBEInt(pos); + // len_ was set by validator, skip it. pos += sizeof(int32_t); left -= sizeof(int32_t); if (len_ < 1) { @@ -59,10 +96,7 @@ bool VarByteN::read(const Buffer::Instance& data, uint64_t& pos, uint64_t& left) value_.clear(); return true; } - if ((left < static_cast(len_)) || - ((data.length() - pos) < static_cast(len_))) { - return false; - } + value_.resize(len_); data.copyOut(pos, len_, value_.data()); pos += len_; @@ -78,6 +112,42 @@ std::string VarByteN::toString() const { return out; } +Message::ValidationResult VarByteN::validate(const Buffer::Instance& data, const uint64_t, + uint64_t& pos, uint64_t& left) { + if (left < sizeof(int32_t)) { + // Malformed message. + return Message::ValidationFailed; + } + + if ((data.length() - pos) < sizeof(int32_t)) { + return Message::ValidationNeedMoreData; + } + + // Read length of the VarByteN structure. + len_ = data.peekBEInt(pos); + if (static_cast(len_) > static_cast(left)) { + // VarByteN would extend past the current message boundaries. + // Lengths of message and individual fields do not match. + return Message::ValidationFailed; + } + + if (len_ < 1) { + // There is no payload if length is not positive. + pos += sizeof(int32_t); + left -= sizeof(int32_t); + return Message::ValidationOK; + } + + if ((data.length() - pos) < (len_ + sizeof(int32_t))) { + return Message::ValidationNeedMoreData; + } + + pos += (len_ + sizeof(int32_t)); + left -= (len_ + sizeof(int32_t)); + + return Message::ValidationOK; +} + } // namespace PostgresProxy } // namespace NetworkFilters } // namespace Extensions diff --git a/source/extensions/filters/network/postgres_proxy/postgres_message.h b/source/extensions/filters/network/postgres_proxy/postgres_message.h index 948167892b148..584662f0b7546 100644 --- a/source/extensions/filters/network/postgres_proxy/postgres_message.h +++ b/source/extensions/filters/network/postgres_proxy/postgres_message.h @@ -29,6 +29,30 @@ namespace PostgresProxy { * */ +// Interface to Postgres message class. +class Message { +public: + enum ValidationResult { ValidationFailed, ValidationOK, ValidationNeedMoreData }; + + virtual ~Message() = default; + + // read method should read only as many bytes from data + // buffer as it is indicated in message's length field. + // "length" parameter indicates how many bytes were indicated in Postgres message's + // length field. "data" buffer may contain more bytes than "length". + virtual bool read(const Buffer::Instance& data, const uint64_t length) PURE; + + virtual ValidationResult validate(const Buffer::Instance& data, const uint64_t, + const uint64_t) PURE; + + // toString method provides displayable representation of + // the Postgres message. + virtual std::string toString() const PURE; + +protected: + ValidationResult validation_result_{ValidationNeedMoreData}; +}; + // Template for integer types. // Size of integer types is fixed and depends on the type of integer. template class Int { @@ -46,15 +70,27 @@ template class Int { * for the current message. */ bool read(const Buffer::Instance& data, uint64_t& pos, uint64_t& left) { - if ((data.length() - pos) < sizeof(T)) { - return false; - } value_ = data.peekBEInt(pos); pos += sizeof(T); left -= sizeof(T); return true; } + Message::ValidationResult validate(const Buffer::Instance& data, const uint64_t, uint64_t& pos, + uint64_t& left) { + if (left < sizeof(T)) { + return Message::ValidationFailed; + } + + if ((data.length() - pos) < sizeof(T)) { + return Message::ValidationNeedMoreData; + } + + pos += sizeof(T); + left -= sizeof(T); + return Message::ValidationOK; + } + std::string toString() const { return fmt::format("[{}]", value_); } T get() const { return value_; } @@ -78,8 +114,13 @@ class String { */ bool read(const Buffer::Instance& data, uint64_t& pos, uint64_t& left); std::string toString() const; + Message::ValidationResult validate(const Buffer::Instance&, const uint64_t start_offset, + uint64_t&, uint64_t&); private: + // start_ and end_ are set by validate method. + uint64_t start_; + uint64_t end_; std::string value_; }; @@ -92,6 +133,7 @@ class ByteN { */ bool read(const Buffer::Instance& data, uint64_t& pos, uint64_t& left); std::string toString() const; + Message::ValidationResult validate(const Buffer::Instance&, const uint64_t, uint64_t&, uint64_t&); private: std::vector value_; @@ -115,6 +157,7 @@ class VarByteN { */ bool read(const Buffer::Instance& data, uint64_t& pos, uint64_t& left); std::string toString() const; + Message::ValidationResult validate(const Buffer::Instance&, const uint64_t, uint64_t&, uint64_t&); private: int32_t len_; @@ -128,22 +171,11 @@ template class Array { * See above for parameter and return value description. */ bool read(const Buffer::Instance& data, uint64_t& pos, uint64_t& left) { - // First read the 16 bits value which indicates how many - // elements there are in the array. - if (((data.length() - pos) < sizeof(uint16_t)) || (left < sizeof(uint16_t))) { - return false; - } - const uint16_t num = data.peekBEInt(pos); + // Skip reading the size of array. The validator did it. pos += sizeof(uint16_t); left -= sizeof(uint16_t); - if (num != 0) { - for (uint16_t i = 0; i < num; i++) { - auto item = std::make_unique(); - if (!item->read(data, pos, left)) { - return false; - } - value_.push_back(std::move(item)); - } + for (uint16_t i = 0; i < size_; i++) { + value_[i]->read(data, pos, left); } return true; } @@ -161,8 +193,41 @@ template class Array { return out; } + Message::ValidationResult validate(const Buffer::Instance& data, const uint64_t start_offset, + uint64_t& pos, uint64_t& left) { + // First read the 16 bits value which indicates how many + // elements there are in the array. + if (left < sizeof(uint16_t)) { + return Message::ValidationFailed; + } + + if ((data.length() - pos) < sizeof(uint16_t)) { + return Message::ValidationNeedMoreData; + } + + size_ = data.peekBEInt(pos); + uint64_t orig_pos = pos; + uint64_t orig_left = left; + pos += sizeof(uint16_t); + left -= sizeof(uint16_t); + if (size_ != 0) { + for (uint16_t i = 0; i < size_; i++) { + auto item = std::make_unique(); + Message::ValidationResult result = item->validate(data, start_offset, pos, left); + if (Message::ValidationOK != result) { + pos = orig_pos; + left = orig_left; + value_.clear(); + return result; + } + value_.push_back(std::move(item)); + } + } + return Message::ValidationOK; + } private: + uint16_t size_; std::vector> value_; }; @@ -175,16 +240,10 @@ template class Repeated { * See above for parameter and return value description. */ bool read(const Buffer::Instance& data, uint64_t& pos, uint64_t& left) { - if ((data.length() - pos) < left) { - return false; - } - // Read until nothing is left. - while (left != 0) { - auto item = std::make_unique(); - if (!item->read(data, pos, left)) { + for (size_t i = 0; i < value_.size(); i++) { + if (!value_[i]->read(data, pos, left)) { return false; } - value_.push_back(std::move(item)); } return true; } @@ -200,47 +259,45 @@ template class Repeated { } return out; } + Message::ValidationResult validate(const Buffer::Instance& data, const uint64_t start_offset, + uint64_t& pos, uint64_t& left) { + if ((data.length() - pos) < left) { + return Message::ValidationNeedMoreData; + } -private: - std::vector> value_; -}; - -// Interface to Postgres message class. -class Message { -public: - virtual ~Message() = default; + // Validate until the end of the message. + uint64_t orig_pos = pos; + uint64_t orig_left = left; + while (left != 0) { + auto item = std::make_unique(); + Message::ValidationResult result = item->validate(data, start_offset, pos, left); + if (Message::ValidationOK != result) { + pos = orig_pos; + left = orig_left; + value_.clear(); + return result; + } + value_.push_back(std::move(item)); + } - // read method should read only as many bytes from data - // buffer as it is indicated in message's length field. - // "length" parameter indicates how many bytes were indicated in Postgres message's - // length field. "data" buffer may contain more bytes than "length". - virtual bool read(const Buffer::Instance& data, const uint64_t length) PURE; + return Message::ValidationOK; + } - // toString method provides displayable representation of - // the Postgres message. - virtual std::string toString() const PURE; +private: + std::vector> value_; }; // Sequence is tuple like structure, which binds together // set of several fields of different types. template class Sequence; -template -class Sequence : public Message { +template class Sequence { FirstField first_; Sequence remaining_; public: Sequence() = default; - std::string toString() const override { - return absl::StrCat(first_.toString(), remaining_.toString()); - } - - bool read(const Buffer::Instance& data, const uint64_t length) override { - uint64_t pos = 0; - uint64_t left = length; - return read(data, pos, left); - } + std::string toString() const { return absl::StrCat(first_.toString(), remaining_.toString()); } /** * Implementation of "read" method for variadic template. @@ -255,21 +312,56 @@ class Sequence : public Message { } return remaining_.read(data, pos, left); } + + Message::ValidationResult validate(const Buffer::Instance& data, const uint64_t start_offset, + uint64_t& pos, uint64_t& left) { + Message::ValidationResult result = first_.validate(data, start_offset, pos, left); + if (result != Message::ValidationOK) { + return result; + } + return remaining_.validate(data, start_offset, pos, left); + } }; // Terminal template definition for variadic Sequence template. -template <> class Sequence<> : public Message { +template <> class Sequence<> { public: Sequence<>() = default; - std::string toString() const override { return ""; } + std::string toString() const { return ""; } bool read(const Buffer::Instance&, uint64_t&, uint64_t&) { return true; } - bool read(const Buffer::Instance&, const uint64_t) override { return true; } + Message::ValidationResult validate(const Buffer::Instance&, const uint64_t, uint64_t&, + uint64_t& left) { + return left == 0 ? Message::ValidationOK : Message::ValidationFailed; + } +}; + +template class MessageImpl : public Message, public Sequence { +public: + ~MessageImpl() override = default; + bool read(const Buffer::Instance& data, const uint64_t length) override { + // Do not call read unless validation was successful. + ASSERT(validation_result_ == ValidationOK); + uint64_t pos = 0; + uint64_t left = length; + return Sequence::read(data, pos, left); + } + Message::ValidationResult validate(const Buffer::Instance& data, const uint64_t start_pos, + const uint64_t length) override { + uint64_t pos = start_pos; + uint64_t left = length; + validation_result_ = Sequence::validate(data, start_pos, pos, left); + return validation_result_; + } + std::string toString() const override { return Sequence::toString(); } + +private: + // Message::ValidationResult validation_result_; }; // Helper function to create pointer to a Sequence structure and is used by Postgres // decoder after learning the type of Postgres message. template std::unique_ptr createMsgBodyReader() { - return std::make_unique>(); + return std::make_unique>(); } } // namespace PostgresProxy diff --git a/source/extensions/filters/network/rocketmq_proxy/active_message.cc b/source/extensions/filters/network/rocketmq_proxy/active_message.cc index 772806a5829c1..d510ccadfebe7 100644 --- a/source/extensions/filters/network/rocketmq_proxy/active_message.cc +++ b/source/extensions/filters/network/rocketmq_proxy/active_message.cc @@ -175,7 +175,7 @@ void ActiveMessage::onQueryTopicRoute() { int32_t read_queue_num = 0; if (metadata_fields.contains(RocketmqConstants::get().ReadQueueNum)) { read_queue_num = static_cast( - metadata_fields.at(RocketmqConstants::get().WriteQueueNum).number_value()); + metadata_fields.at(RocketmqConstants::get().ReadQueueNum).number_value()); } int32_t write_queue_num = 0; if (metadata_fields.contains(RocketmqConstants::get().WriteQueueNum)) { diff --git a/source/extensions/filters/network/rocketmq_proxy/router/router_impl.cc b/source/extensions/filters/network/rocketmq_proxy/router/router_impl.cc index 5086f685d7f63..7c24db78482f7 100644 --- a/source/extensions/filters/network/rocketmq_proxy/router/router_impl.cc +++ b/source/extensions/filters/network/rocketmq_proxy/router/router_impl.cc @@ -166,6 +166,7 @@ void RouterImpl::UpstreamRequest::onPoolReady(Tcp::ConnectionPool::ConnectionDat } void RouterImpl::UpstreamRequest::onPoolFailure(Tcp::ConnectionPool::PoolFailureReason reason, + absl::string_view, Upstream::HostDescriptionConstSharedPtr host) { if (router_.handle_) { ENVOY_LOG(trace, "#onPoolFailure, reset cancellable handle to nullptr"); diff --git a/source/extensions/filters/network/rocketmq_proxy/router/router_impl.h b/source/extensions/filters/network/rocketmq_proxy/router/router_impl.h index 30e0cdbe07d62..38a61dcdfef2e 100644 --- a/source/extensions/filters/network/rocketmq_proxy/router/router_impl.h +++ b/source/extensions/filters/network/rocketmq_proxy/router/router_impl.h @@ -41,6 +41,7 @@ class RouterImpl : public Router, public Logger::Loggable UpstreamRequest(RouterImpl& router); void onPoolFailure(Tcp::ConnectionPool::PoolFailureReason reason, + absl::string_view transport_failure_reason, Upstream::HostDescriptionConstSharedPtr host) override; void onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn, diff --git a/source/extensions/filters/network/thrift_proxy/router/router_impl.cc b/source/extensions/filters/network/thrift_proxy/router/router_impl.cc index 7d8580b714e69..e675668c764d7 100644 --- a/source/extensions/filters/network/thrift_proxy/router/router_impl.cc +++ b/source/extensions/filters/network/thrift_proxy/router/router_impl.cc @@ -362,13 +362,19 @@ void Router::onUpstreamData(Buffer::Instance& data, bool end_stream) { case MessageType::Reply: incClusterScopeCounter({upstream_resp_reply_}); if (callbacks_->responseSuccess()) { + upstream_request_->upstream_host_->outlierDetector().putResult( + Upstream::Outlier::Result::ExtOriginRequestSuccess); incClusterScopeCounter({upstream_resp_reply_success_}); } else { + upstream_request_->upstream_host_->outlierDetector().putResult( + Upstream::Outlier::Result::ExtOriginRequestFailed); incClusterScopeCounter({upstream_resp_reply_error_}); } break; case MessageType::Exception: + upstream_request_->upstream_host_->outlierDetector().putResult( + Upstream::Outlier::Result::ExtOriginRequestFailed); incClusterScopeCounter({upstream_resp_exception_}); break; @@ -382,6 +388,8 @@ void Router::onUpstreamData(Buffer::Instance& data, bool end_stream) { } else if (status == ThriftFilters::ResponseStatus::Reset) { // Note: invalid responses are not accounted in the response size histogram. ENVOY_STREAM_LOG(debug, "upstream reset", *callbacks_); + upstream_request_->upstream_host_->outlierDetector().putResult( + Upstream::Outlier::Result::ExtOriginRequestFailed); upstream_request_->resetStream(); return; } @@ -483,6 +491,7 @@ void Router::UpstreamRequest::releaseConnection(const bool close) { void Router::UpstreamRequest::resetStream() { releaseConnection(true); } void Router::UpstreamRequest::onPoolFailure(ConnectionPool::PoolFailureReason reason, + absl::string_view, Upstream::HostDescriptionConstSharedPtr host) { conn_pool_handle_ = nullptr; @@ -497,6 +506,8 @@ void Router::UpstreamRequest::onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr bool continue_decoding = conn_pool_handle_ != nullptr; onUpstreamHostSelected(host); + host->outlierDetector().putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess); + conn_data_ = std::move(conn_data); conn_data_->addUpstreamCallbacks(parent_); conn_pool_handle_ = nullptr; @@ -566,12 +577,21 @@ void Router::UpstreamRequest::onResetStream(ConnectionPool::PoolFailureReason re true); break; case ConnectionPool::PoolFailureReason::LocalConnectionFailure: + upstream_host_->outlierDetector().putResult( + Upstream::Outlier::Result::LocalOriginConnectFailed); // Should only happen if we closed the connection, due to an error condition, in which case // we've already handled any possible downstream response. parent_.callbacks_->resetDownstreamConnection(); break; case ConnectionPool::PoolFailureReason::RemoteConnectionFailure: case ConnectionPool::PoolFailureReason::Timeout: + if (reason == ConnectionPool::PoolFailureReason::Timeout) { + upstream_host_->outlierDetector().putResult(Upstream::Outlier::Result::LocalOriginTimeout); + } else if (reason == ConnectionPool::PoolFailureReason::RemoteConnectionFailure) { + upstream_host_->outlierDetector().putResult( + Upstream::Outlier::Result::LocalOriginConnectFailed); + } + // TODO(zuercher): distinguish between these cases where appropriate (particularly timeout) if (!response_started_) { parent_.callbacks_->sendLocalReply( diff --git a/source/extensions/filters/network/thrift_proxy/router/router_impl.h b/source/extensions/filters/network/thrift_proxy/router/router_impl.h index 69d2e3a03d475..6fd9039dde434 100644 --- a/source/extensions/filters/network/thrift_proxy/router/router_impl.h +++ b/source/extensions/filters/network/thrift_proxy/router/router_impl.h @@ -234,6 +234,7 @@ class Router : public Tcp::ConnectionPool::UpstreamCallbacks, // Tcp::ConnectionPool::Callbacks void onPoolFailure(ConnectionPool::PoolFailureReason reason, + absl::string_view transport_failure_reason, Upstream::HostDescriptionConstSharedPtr host) override; void onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn, Upstream::HostDescriptionConstSharedPtr host) override; diff --git a/source/extensions/filters/network/wasm/wasm_filter.cc b/source/extensions/filters/network/wasm/wasm_filter.cc index 862382d10ee13..3f7cdbc1cee05 100644 --- a/source/extensions/filters/network/wasm/wasm_filter.cc +++ b/source/extensions/filters/network/wasm/wasm_filter.cc @@ -7,8 +7,8 @@ namespace Wasm { FilterConfig::FilterConfig(const envoy::extensions::filters::network::wasm::v3::Wasm& config, Server::Configuration::FactoryContext& context) - : tls_slot_( - ThreadLocal::TypedSlot::makeUnique(context.threadLocal())) { + : tls_slot_(ThreadLocal::TypedSlot::makeUnique( + context.threadLocal())) { plugin_ = std::make_shared( config.config(), context.direction(), context.localInfo(), &context.listenerMetadata()); @@ -16,7 +16,8 @@ FilterConfig::FilterConfig(const envoy::extensions::filters::network::wasm::v3:: auto callback = [plugin, this](Common::Wasm::WasmHandleSharedPtr base_wasm) { // NB: the Slot set() call doesn't complete inline, so all arguments must outlive this call. tls_slot_->set([base_wasm, plugin](Event::Dispatcher& dispatcher) { - return Common::Wasm::getOrCreateThreadLocalPlugin(base_wasm, plugin, dispatcher); + return std::make_shared( + Common::Wasm::getOrCreateThreadLocalPlugin(base_wasm, plugin, dispatcher)); }); }; diff --git a/source/extensions/filters/network/wasm/wasm_filter.h b/source/extensions/filters/network/wasm/wasm_filter.h index ad0d99e5a4a67..27c2c52d47141 100644 --- a/source/extensions/filters/network/wasm/wasm_filter.h +++ b/source/extensions/filters/network/wasm/wasm_filter.h @@ -16,7 +16,8 @@ namespace NetworkFilters { namespace Wasm { using Envoy::Extensions::Common::Wasm::Context; -using Envoy::Extensions::Common::Wasm::PluginHandle; +using Envoy::Extensions::Common::Wasm::PluginHandleSharedPtr; +using Envoy::Extensions::Common::Wasm::PluginHandleSharedPtrThreadLocal; using Envoy::Extensions::Common::Wasm::PluginSharedPtr; using Envoy::Extensions::Common::Wasm::Wasm; @@ -27,9 +28,9 @@ class FilterConfig : Logger::Loggable { std::shared_ptr createFilter() { Wasm* wasm = nullptr; - auto handle = tls_slot_->get(); - if (handle.has_value()) { - wasm = handle->wasm().get(); + PluginHandleSharedPtr handle = tls_slot_->get()->handle(); + if (handle->wasmHandle()) { + wasm = handle->wasmHandle()->wasm().get(); } if (!wasm || wasm->isFailed()) { if (plugin_->fail_open_) { @@ -37,17 +38,17 @@ class FilterConfig : Logger::Loggable { return nullptr; } else { // Fail closed is handled by an empty Context. - return std::make_shared(nullptr, 0, plugin_); + return std::make_shared(nullptr, 0, handle); } } - return std::make_shared(wasm, handle->rootContextId(), plugin_); + return std::make_shared(wasm, handle->rootContextId(), handle); } - Wasm* wasmForTest() { return tls_slot_->get()->wasm().get(); } + Wasm* wasmForTest() { return tls_slot_->get()->handle()->wasmHandle()->wasm().get(); } private: PluginSharedPtr plugin_; - ThreadLocal::TypedSlotPtr tls_slot_; + ThreadLocal::TypedSlotPtr tls_slot_; Config::DataSource::RemoteAsyncDataProviderPtr remote_data_provider_; }; diff --git a/source/extensions/matching/input_matchers/ip/BUILD b/source/extensions/matching/input_matchers/ip/BUILD new file mode 100644 index 0000000000000..fe2104b899c43 --- /dev/null +++ b/source/extensions/matching/input_matchers/ip/BUILD @@ -0,0 +1,33 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +envoy_cc_library( + name = "ip_lib", + srcs = ["matcher.cc"], + hdrs = ["matcher.h"], + deps = [ + "//envoy/matcher:matcher_interface", + "//source/common/network:lc_trie_lib", + ], +) + +envoy_cc_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + deps = [ + ":ip_lib", + "//envoy/matcher:matcher_interface", + "//envoy/registry", + "//envoy/server:factory_context_interface", + "@envoy_api//envoy/extensions/matching/input_matchers/ip/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/matching/input_matchers/ip/config.cc b/source/extensions/matching/input_matchers/ip/config.cc new file mode 100644 index 0000000000000..8798dd4a007fe --- /dev/null +++ b/source/extensions/matching/input_matchers/ip/config.cc @@ -0,0 +1,47 @@ +#include "source/extensions/matching/input_matchers/ip/config.h" + +namespace Envoy { +namespace Extensions { +namespace Matching { +namespace InputMatchers { +namespace IP { + +Envoy::Matcher::InputMatcherFactoryCb +Config::createInputMatcherFactoryCb(const Protobuf::Message& config, + Server::Configuration::FactoryContext& context) { + const auto& ip_config = MessageUtil::downcastAndValidate< + const envoy::extensions::matching::input_matchers::ip::v3::Ip&>( + config, context.messageValidationVisitor()); + + const auto& cidr_ranges = ip_config.cidr_ranges(); + std::vector ranges; + ranges.reserve(cidr_ranges.size()); + for (const auto& cidr_range : cidr_ranges) { + const std::string& address = cidr_range.address_prefix(); + const uint32_t prefix_len = cidr_range.prefix_len().value(); + const auto range = Network::Address::CidrRange::create(address, prefix_len); + // We only assert that the range is valid because: + // * if "address" can't be parsed, it will throw an EnvoyException + // * prefix_len can't be < 0 as per the protobuf definition as an uint32_t + // * if prefix_len is too big, CidrRange::create clamps it to a valid value + // => it is thus not possible to create an invalid range. + ASSERT(range.isValid(), "address range should be valid!"); + ranges.emplace_back(std::move(range)); + } + + const std::string& stat_prefix = ip_config.stat_prefix(); + Stats::Scope& scope = context.scope(); + return [ranges, stat_prefix, &scope]() { + return std::make_unique(ranges, stat_prefix, scope); + }; +} +/** + * Static registration for the consistent hashing matcher. @see RegisterFactory. + */ +REGISTER_FACTORY(Config, Envoy::Matcher::InputMatcherFactory); + +} // namespace IP +} // namespace InputMatchers +} // namespace Matching +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/matching/input_matchers/ip/config.h b/source/extensions/matching/input_matchers/ip/config.h new file mode 100644 index 0000000000000..ade3ecbfae1f1 --- /dev/null +++ b/source/extensions/matching/input_matchers/ip/config.h @@ -0,0 +1,33 @@ +#pragma once + +#include "envoy/extensions/matching/input_matchers/ip/v3/ip.pb.h" +#include "envoy/extensions/matching/input_matchers/ip/v3/ip.pb.validate.h" +#include "envoy/matcher/matcher.h" +#include "envoy/server/factory_context.h" + +#include "source/common/protobuf/utility.h" +#include "source/extensions/matching/input_matchers/ip/matcher.h" + +namespace Envoy { +namespace Extensions { +namespace Matching { +namespace InputMatchers { +namespace IP { + +class Config : public Envoy::Matcher::InputMatcherFactory { +public: + Envoy::Matcher::InputMatcherFactoryCb + createInputMatcherFactoryCb(const Protobuf::Message& config, + Server::Configuration::FactoryContext& factory_context) override; + + std::string name() const override { return "envoy.matching.matchers.ip"; } + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return std::make_unique(); + } +}; +} // namespace IP +} // namespace InputMatchers +} // namespace Matching +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/matching/input_matchers/ip/matcher.cc b/source/extensions/matching/input_matchers/ip/matcher.cc new file mode 100644 index 0000000000000..00db60a8dd628 --- /dev/null +++ b/source/extensions/matching/input_matchers/ip/matcher.cc @@ -0,0 +1,49 @@ +#include "source/extensions/matching/input_matchers/ip/matcher.h" + +#include "source/common/network/utility.h" + +namespace Envoy { +namespace Extensions { +namespace Matching { +namespace InputMatchers { +namespace IP { + +namespace { + +MatcherStats generateStats(absl::string_view prefix, Stats::Scope& scope) { + return MatcherStats{IP_MATCHER_STATS(POOL_COUNTER_PREFIX(scope, prefix))}; +} + +} // namespace + +Matcher::Matcher(std::vector const& ranges, + absl::string_view stat_prefix, + Stats::Scope& stat_scope) + : // We could put "false" instead of "true". What matters is that the IP + // belongs to the trie. We could further optimize the storage of LcTrie in + // this case by implementing an LcTrie specialization that doesn't + // store any associated data. + trie_({{true, ranges}}), stats_(generateStats(stat_prefix, stat_scope)) {} + +bool Matcher::match(absl::optional input) { + if (!input) { + return false; + } + const absl::string_view ip_str = *input; + if (ip_str.empty()) { + return false; + } + const auto ip = Network::Utility::parseInternetAddressNoThrow(std::string{ip_str}); + if (!ip) { + stats_.ip_parsing_failed_.inc(); + ENVOY_LOG(debug, "IP matcher: unable to parse address '{}'", ip_str); + return false; + } + return !trie_.getData(ip).empty(); +} + +} // namespace IP +} // namespace InputMatchers +} // namespace Matching +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/matching/input_matchers/ip/matcher.h b/source/extensions/matching/input_matchers/ip/matcher.h new file mode 100644 index 0000000000000..ba035ad579ffb --- /dev/null +++ b/source/extensions/matching/input_matchers/ip/matcher.h @@ -0,0 +1,39 @@ +#pragma once + +#include + +#include "envoy/matcher/matcher.h" +#include "envoy/network/address.h" +#include "envoy/stats/stats_macros.h" + +#include "source/common/network/lc_trie.h" + +namespace Envoy { +namespace Extensions { +namespace Matching { +namespace InputMatchers { +namespace IP { + +#define IP_MATCHER_STATS(COUNTER) COUNTER(ip_parsing_failed) + +struct MatcherStats { + IP_MATCHER_STATS(GENERATE_COUNTER_STRUCT); +}; + +class Matcher : public Envoy::Matcher::InputMatcher, Logger::Loggable { +public: + Matcher(std::vector const& ranges, absl::string_view stat_prefix, + Stats::Scope& stat_scope); + bool match(absl::optional input) override; + absl::optional stats() const { return stats_; } + +private: + const Network::LcTrie::LcTrie trie_; + MatcherStats stats_; +}; + +} // namespace IP +} // namespace InputMatchers +} // namespace Matching +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/quic/crypto_stream/envoy_quic_crypto_server_stream.cc b/source/extensions/quic/crypto_stream/envoy_quic_crypto_server_stream.cc index 282a5ad4d5bc0..ad17d8512605f 100644 --- a/source/extensions/quic/crypto_stream/envoy_quic_crypto_server_stream.cc +++ b/source/extensions/quic/crypto_stream/envoy_quic_crypto_server_stream.cc @@ -7,7 +7,11 @@ std::unique_ptr EnvoyQuicCryptoServerStreamFactoryImpl::createEnvoyQuicCryptoServerStream( const quic::QuicCryptoServerConfig* crypto_config, quic::QuicCompressedCertsCache* compressed_certs_cache, quic::QuicSession* session, - quic::QuicCryptoServerStreamBase::Helper* helper) { + quic::QuicCryptoServerStreamBase::Helper* helper, + // Though this extension doesn't use the two parameters below, they might be used by + // downstreams. Do not remove them. + OptRef /*transport_socket_factory*/, + Envoy::Event::Dispatcher& /*dispatcher*/) { return quic::CreateCryptoServerStream(crypto_config, compressed_certs_cache, session, helper); } diff --git a/source/extensions/quic/crypto_stream/envoy_quic_crypto_server_stream.h b/source/extensions/quic/crypto_stream/envoy_quic_crypto_server_stream.h index 1cf35b5c01c86..10bf47cf5e8ba 100644 --- a/source/extensions/quic/crypto_stream/envoy_quic_crypto_server_stream.h +++ b/source/extensions/quic/crypto_stream/envoy_quic_crypto_server_stream.h @@ -14,11 +14,12 @@ class EnvoyQuicCryptoServerStreamFactoryImpl : public EnvoyQuicCryptoServerStrea return std::make_unique(); } std::string name() const override { return "envoy.quic.crypto_stream.server.quiche"; } - std::unique_ptr - createEnvoyQuicCryptoServerStream(const quic::QuicCryptoServerConfig* crypto_config, - quic::QuicCompressedCertsCache* compressed_certs_cache, - quic::QuicSession* session, - quic::QuicCryptoServerStreamBase::Helper* helper) override; + std::unique_ptr createEnvoyQuicCryptoServerStream( + const quic::QuicCryptoServerConfig* crypto_config, + quic::QuicCompressedCertsCache* compressed_certs_cache, quic::QuicSession* session, + quic::QuicCryptoServerStreamBase::Helper* helper, + OptRef transport_socket_factory, + Envoy::Event::Dispatcher& dispatcher) override; }; DECLARE_FACTORY(EnvoyQuicCryptoServerStreamFactoryImpl); diff --git a/source/extensions/stat_sinks/metrics_service/config.cc b/source/extensions/stat_sinks/metrics_service/config.cc index a711335b01f75..ad34c75e63056 100644 --- a/source/extensions/stat_sinks/metrics_service/config.cc +++ b/source/extensions/stat_sinks/metrics_service/config.cc @@ -38,8 +38,9 @@ MetricsServiceSinkFactory::createStatsSink(const Protobuf::Message& config, return std::make_unique>( - grpc_metrics_streamer, sink_config.emit_tags_as_labels(), - PROTOBUF_GET_WRAPPED_OR_DEFAULT(sink_config, report_counters_as_deltas, false)); + grpc_metrics_streamer, + PROTOBUF_GET_WRAPPED_OR_DEFAULT(sink_config, report_counters_as_deltas, false), + sink_config.emit_tags_as_labels()); } ProtobufTypes::MessagePtr MetricsServiceSinkFactory::createEmptyConfigProto() { diff --git a/source/extensions/stat_sinks/wasm/wasm_stat_sink_impl.h b/source/extensions/stat_sinks/wasm/wasm_stat_sink_impl.h index e3099b85874e2..d9459bb45fc83 100644 --- a/source/extensions/stat_sinks/wasm/wasm_stat_sink_impl.h +++ b/source/extensions/stat_sinks/wasm/wasm_stat_sink_impl.h @@ -21,7 +21,7 @@ class WasmStatSink : public Stats::Sink { : plugin_(plugin), singleton_(singleton) {} void flush(Stats::MetricSnapshot& snapshot) override { - singleton_->wasm()->onStatsUpdate(plugin_, snapshot); + singleton_->wasmHandle()->wasm()->onStatsUpdate(plugin_, snapshot); } void setSingleton(PluginHandleSharedPtr singleton) { diff --git a/source/extensions/tracers/common/ot/opentracing_driver_impl.cc b/source/extensions/tracers/common/ot/opentracing_driver_impl.cc index aeeb94a489af2..e8064a6bd2a92 100644 --- a/source/extensions/tracers/common/ot/opentracing_driver_impl.cc +++ b/source/extensions/tracers/common/ot/opentracing_driver_impl.cc @@ -29,7 +29,7 @@ class OpenTracingHTTPHeadersWriter : public opentracing::HTTPHeadersWriter { // opentracing::HTTPHeadersWriter opentracing::expected Set(opentracing::string_view key, opentracing::string_view value) const override { - Http::LowerCaseString lowercase_key{key}; + Http::LowerCaseString lowercase_key{{key.data(), key.size()}}; request_headers_.remove(lowercase_key); request_headers_.addCopy(std::move(lowercase_key), {value.data(), value.size()}); return {}; @@ -50,7 +50,7 @@ class OpenTracingHTTPHeadersReader : public opentracing::HTTPHeadersReader { // opentracing::HTTPHeadersReader opentracing::expected LookupKey(opentracing::string_view key) const override { - const auto entry = request_headers_.get(Http::LowerCaseString{key}); + const auto entry = request_headers_.get(Http::LowerCaseString{{key.data(), key.size()}}); if (!entry.empty()) { // This is an implicitly untrusted header, so only the first value is used. return opentracing::string_view{entry[0]->value().getStringView().data(), diff --git a/source/extensions/transport_sockets/alts/tsi_socket.cc b/source/extensions/transport_sockets/alts/tsi_socket.cc index 6b06552e4a20b..2ba20311b1332 100644 --- a/source/extensions/transport_sockets/alts/tsi_socket.cc +++ b/source/extensions/transport_sockets/alts/tsi_socket.cc @@ -372,7 +372,7 @@ TsiSocketFactory::TsiSocketFactory(HandshakerFactory handshaker_factory, bool TsiSocketFactory::implementsSecureTransport() const { return true; } Network::TransportSocketPtr -TsiSocketFactory::createTransportSocket(Network::TransportSocketOptionsSharedPtr) const { +TsiSocketFactory::createTransportSocket(Network::TransportSocketOptionsConstSharedPtr) const { return std::make_unique(handshaker_factory_, handshake_validator_); } diff --git a/source/extensions/transport_sockets/alts/tsi_socket.h b/source/extensions/transport_sockets/alts/tsi_socket.h index 8784ddcc10f17..24ce022a181df 100644 --- a/source/extensions/transport_sockets/alts/tsi_socket.h +++ b/source/extensions/transport_sockets/alts/tsi_socket.h @@ -130,7 +130,7 @@ class TsiSocketFactory : public Network::TransportSocketFactory { bool implementsSecureTransport() const override; bool usesProxyProtocolOptions() const override { return false; } Network::TransportSocketPtr - createTransportSocket(Network::TransportSocketOptionsSharedPtr options) const override; + createTransportSocket(Network::TransportSocketOptionsConstSharedPtr options) const override; private: HandshakerFactory handshaker_factory_; diff --git a/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.cc b/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.cc index 503c7898981db..66a93f45e9021 100644 --- a/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.cc +++ b/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.cc @@ -19,7 +19,7 @@ namespace ProxyProtocol { UpstreamProxyProtocolSocket::UpstreamProxyProtocolSocket( Network::TransportSocketPtr&& transport_socket, - Network::TransportSocketOptionsSharedPtr options, ProxyProtocolConfig_Version version) + Network::TransportSocketOptionsConstSharedPtr options, ProxyProtocolConfig_Version version) : PassthroughSocket(std::move(transport_socket)), options_(options), version_(version) {} void UpstreamProxyProtocolSocket::setTransportSocketCallbacks( @@ -110,7 +110,7 @@ UpstreamProxyProtocolSocketFactory::UpstreamProxyProtocolSocketFactory( : transport_socket_factory_(std::move(transport_socket_factory)), config_(config) {} Network::TransportSocketPtr UpstreamProxyProtocolSocketFactory::createTransportSocket( - Network::TransportSocketOptionsSharedPtr options) const { + Network::TransportSocketOptionsConstSharedPtr options) const { auto inner_socket = transport_socket_factory_->createTransportSocket(options); if (inner_socket == nullptr) { return nullptr; diff --git a/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.h b/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.h index 05d787fcea8b6..521804758417c 100644 --- a/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.h +++ b/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.h @@ -20,7 +20,7 @@ class UpstreamProxyProtocolSocket : public TransportSockets::PassthroughSocket, public Logger::Loggable { public: UpstreamProxyProtocolSocket(Network::TransportSocketPtr&& transport_socket, - Network::TransportSocketOptionsSharedPtr options, + Network::TransportSocketOptionsConstSharedPtr options, ProxyProtocolConfig_Version version); void setTransportSocketCallbacks(Network::TransportSocketCallbacks& callbacks) override; @@ -33,7 +33,7 @@ class UpstreamProxyProtocolSocket : public TransportSockets::PassthroughSocket, void generateHeaderV2(); Network::IoResult writeHeader(); - Network::TransportSocketOptionsSharedPtr options_; + Network::TransportSocketOptionsConstSharedPtr options_; Network::TransportSocketCallbacks* callbacks_{}; Buffer::OwnedImpl header_buffer_{}; ProxyProtocolConfig_Version version_{ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1}; @@ -46,7 +46,7 @@ class UpstreamProxyProtocolSocketFactory : public Network::TransportSocketFactor // Network::TransportSocketFactory Network::TransportSocketPtr - createTransportSocket(Network::TransportSocketOptionsSharedPtr options) const override; + createTransportSocket(Network::TransportSocketOptionsConstSharedPtr options) const override; bool implementsSecureTransport() const override; bool usesProxyProtocolOptions() const override { return true; } diff --git a/source/extensions/transport_sockets/starttls/starttls_socket.cc b/source/extensions/transport_sockets/starttls/starttls_socket.cc index 17c73aec17fd0..56e62d0133910 100644 --- a/source/extensions/transport_sockets/starttls/starttls_socket.cc +++ b/source/extensions/transport_sockets/starttls/starttls_socket.cc @@ -23,7 +23,7 @@ bool StartTlsSocket::startSecureTransport() { } Network::TransportSocketPtr StartTlsSocketFactory::createTransportSocket( - Network::TransportSocketOptionsSharedPtr transport_socket_options) const { + Network::TransportSocketOptionsConstSharedPtr transport_socket_options) const { return std::make_unique( raw_socket_factory_->createTransportSocket(transport_socket_options), tls_socket_factory_->createTransportSocket(transport_socket_options), diff --git a/source/extensions/transport_sockets/starttls/starttls_socket.h b/source/extensions/transport_sockets/starttls/starttls_socket.h index 6583cc6a4e85f..20f24da3a5b69 100644 --- a/source/extensions/transport_sockets/starttls/starttls_socket.h +++ b/source/extensions/transport_sockets/starttls/starttls_socket.h @@ -18,7 +18,7 @@ class StartTlsSocket : public Network::TransportSocket, Logger::Loggable(currentConfigHelper(), transport_socket_factory_->createTransportSocket(options)); } diff --git a/source/extensions/transport_sockets/tap/tap.h b/source/extensions/transport_sockets/tap/tap.h index 745b8f0a8c4e1..6e15f159e33c5 100644 --- a/source/extensions/transport_sockets/tap/tap.h +++ b/source/extensions/transport_sockets/tap/tap.h @@ -39,7 +39,7 @@ class TapSocketFactory : public Network::TransportSocketFactory, // Network::TransportSocketFactory Network::TransportSocketPtr - createTransportSocket(Network::TransportSocketOptionsSharedPtr options) const override; + createTransportSocket(Network::TransportSocketOptionsConstSharedPtr options) const override; bool implementsSecureTransport() const override; bool usesProxyProtocolOptions() const override; diff --git a/source/extensions/transport_sockets/tls/cert_validator/default_validator.cc b/source/extensions/transport_sockets/tls/cert_validator/default_validator.cc index cf4463f6e5a5e..06fa51331293d 100644 --- a/source/extensions/transport_sockets/tls/cert_validator/default_validator.cc +++ b/source/extensions/transport_sockets/tls/cert_validator/default_validator.cc @@ -143,11 +143,6 @@ int DefaultCertValidator::initializeSslContexts(std::vector contexts, const Envoy::Ssl::CertificateValidationContextConfig* cert_validation_config = config_; if (cert_validation_config != nullptr) { - if (!cert_validation_config->verifySubjectAltNameList().empty()) { - verify_subject_alt_name_list_ = cert_validation_config->verifySubjectAltNameList(); - verify_mode = verify_mode_validation_context; - } - if (!cert_validation_config->subjectAltNameMatchers().empty()) { for (const envoy::type::matcher::v3::StringMatcher& matcher : cert_validation_config->subjectAltNameMatchers()) { @@ -204,13 +199,12 @@ int DefaultCertValidator::doVerifyCertChain( } } - Envoy::Ssl::ClientValidationStatus validated = verifyCertificate( - &leaf_cert, - transport_socket_options && - !transport_socket_options->verifySubjectAltNameListOverride().empty() - ? transport_socket_options->verifySubjectAltNameListOverride() - : verify_subject_alt_name_list_, - subject_alt_name_matchers_); + Envoy::Ssl::ClientValidationStatus validated = + verifyCertificate(&leaf_cert, + transport_socket_options != nullptr + ? transport_socket_options->verifySubjectAltNameListOverride() + : std::vector{}, + subject_alt_name_matchers_); if (ssl_extended_info) { if (ssl_extended_info->certificateValidationStatus() == @@ -238,9 +232,12 @@ Envoy::Ssl::ClientValidationStatus DefaultCertValidator::verifyCertificate( validated = Envoy::Ssl::ClientValidationStatus::Validated; } - if (!subject_alt_name_matchers.empty() && !matchSubjectAltName(cert, subject_alt_name_matchers)) { - stats_.fail_verify_san_.inc(); - return Envoy::Ssl::ClientValidationStatus::Failed; + if (!subject_alt_name_matchers.empty()) { + if (!matchSubjectAltName(cert, subject_alt_name_matchers)) { + stats_.fail_verify_san_.inc(); + return Envoy::Ssl::ClientValidationStatus::Failed; + } + validated = Envoy::Ssl::ClientValidationStatus::Validated; } if (!verify_certificate_hash_list_.empty() || !verify_certificate_spki_list_.empty()) { @@ -381,12 +378,6 @@ void DefaultCertValidator::updateDigestForSessionId(bssl::ScopedEVP_MD_CTX& md, rc = EVP_DigestUpdate(md.get(), hash_buffer, hash_length); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); - - // verify_subject_alt_name_list_ can only be set with a ca_cert - for (const std::string& name : verify_subject_alt_name_list_) { - rc = EVP_DigestUpdate(md.get(), name.data(), name.size()); - RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); - } } for (const auto& hash : verify_certificate_hash_list_) { diff --git a/source/extensions/transport_sockets/tls/cert_validator/default_validator.h b/source/extensions/transport_sockets/tls/cert_validator/default_validator.h index 03a037ef33df7..fb7e7acab1f4e 100644 --- a/source/extensions/transport_sockets/tls/cert_validator/default_validator.h +++ b/source/extensions/transport_sockets/tls/cert_validator/default_validator.h @@ -116,7 +116,6 @@ class DefaultCertValidator : public CertValidator { std::vector subject_alt_name_matchers_; std::vector> verify_certificate_hash_list_; std::vector> verify_certificate_spki_list_; - std::vector verify_subject_alt_name_list_; bool verify_trusted_ca_{false}; }; diff --git a/source/extensions/transport_sockets/tls/ssl_socket.cc b/source/extensions/transport_sockets/tls/ssl_socket.cc index 98c2989b57bf7..c2eef31132e3d 100644 --- a/source/extensions/transport_sockets/tls/ssl_socket.cc +++ b/source/extensions/transport_sockets/tls/ssl_socket.cc @@ -46,7 +46,7 @@ class NotReadySslSocket : public Network::TransportSocket { } // namespace SslSocket::SslSocket(Envoy::Ssl::ContextSharedPtr ctx, InitialState state, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options, + const Network::TransportSocketOptionsConstSharedPtr& transport_socket_options, Ssl::HandshakerFactoryCb handshaker_factory_cb) : transport_socket_options_(transport_socket_options), ctx_(std::dynamic_pointer_cast(ctx)), @@ -356,7 +356,7 @@ ClientSslSocketFactory::ClientSslSocketFactory(Envoy::Ssl::ClientContextConfigPt } Network::TransportSocketPtr ClientSslSocketFactory::createTransportSocket( - Network::TransportSocketOptionsSharedPtr transport_socket_options) const { + Network::TransportSocketOptionsConstSharedPtr transport_socket_options) const { // onAddOrUpdateSecret() could be invoked in the middle of checking the existence of ssl_ctx and // creating SslSocket using ssl_ctx. Capture ssl_ctx_ into a local variable so that we check and // use the same ssl_ctx to create SslSocket. @@ -402,7 +402,7 @@ Envoy::Ssl::ClientContextSharedPtr ClientSslSocketFactory::sslCtx() { } Network::TransportSocketPtr -ServerSslSocketFactory::createTransportSocket(Network::TransportSocketOptionsSharedPtr) const { +ServerSslSocketFactory::createTransportSocket(Network::TransportSocketOptionsConstSharedPtr) const { // onAddOrUpdateSecret() could be invoked in the middle of checking the existence of ssl_ctx and // creating SslSocket using ssl_ctx. Capture ssl_ctx_ into a local variable so that we check and // use the same ssl_ctx to create SslSocket. diff --git a/source/extensions/transport_sockets/tls/ssl_socket.h b/source/extensions/transport_sockets/tls/ssl_socket.h index 217c7abcff06e..186bebbabc067 100644 --- a/source/extensions/transport_sockets/tls/ssl_socket.h +++ b/source/extensions/transport_sockets/tls/ssl_socket.h @@ -48,7 +48,7 @@ class SslSocket : public Network::TransportSocket, protected Logger::Loggable { public: SslSocket(Envoy::Ssl::ContextSharedPtr ctx, InitialState state, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options, + const Network::TransportSocketOptionsConstSharedPtr& transport_socket_options, Ssl::HandshakerFactoryCb handshaker_factory_cb); // Network::TransportSocket @@ -90,7 +90,7 @@ class SslSocket : public Network::TransportSocket, return callbacks_ != nullptr && callbacks_->connection().dispatcher().isThreadSafe(); } - const Network::TransportSocketOptionsSharedPtr transport_socket_options_; + const Network::TransportSocketOptionsConstSharedPtr transport_socket_options_; Network::TransportSocketCallbacks* callbacks_{}; ContextImplSharedPtr ctx_; uint64_t bytes_to_retry_{}; @@ -107,7 +107,7 @@ class ClientSslSocketFactory : public Network::TransportSocketFactory, Envoy::Ssl::ContextManager& manager, Stats::Scope& stats_scope); Network::TransportSocketPtr - createTransportSocket(Network::TransportSocketOptionsSharedPtr options) const override; + createTransportSocket(Network::TransportSocketOptionsConstSharedPtr options) const override; bool implementsSecureTransport() const override; bool usesProxyProtocolOptions() const override { return false; } bool supportsAlpn() const override { return true; } @@ -137,7 +137,7 @@ class ServerSslSocketFactory : public Network::TransportSocketFactory, const std::vector& server_names); Network::TransportSocketPtr - createTransportSocket(Network::TransportSocketOptionsSharedPtr options) const override; + createTransportSocket(Network::TransportSocketOptionsConstSharedPtr options) const override; bool implementsSecureTransport() const override; bool usesProxyProtocolOptions() const override { return false; } diff --git a/source/extensions/upstreams/http/tcp/upstream_request.h b/source/extensions/upstreams/http/tcp/upstream_request.h index 7cdd070f041a7..ce947b05f943e 100644 --- a/source/extensions/upstreams/http/tcp/upstream_request.h +++ b/source/extensions/upstreams/http/tcp/upstream_request.h @@ -49,9 +49,10 @@ class TcpConnPool : public Router::GenericConnPool, public Envoy::Tcp::Connectio // Tcp::ConnectionPool::Callbacks void onPoolFailure(ConnectionPool::PoolFailureReason reason, + absl::string_view transport_failure_reason, Upstream::HostDescriptionConstSharedPtr host) override { upstream_handle_ = nullptr; - callbacks_->onPoolFailure(reason, "", host); + callbacks_->onPoolFailure(reason, transport_failure_reason, host); } void onPoolReady(Envoy::Tcp::ConnectionPool::ConnectionDataPtr&& conn_data, diff --git a/source/server/BUILD b/source/server/BUILD index 07b31d256467a..78b978d16a4f0 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -170,6 +170,7 @@ envoy_cc_library( "//envoy/server:drain_manager_interface", "//envoy/server:instance_interface", "//source/common/common:assert_lib", + "//source/common/common:callback_impl_lib", "//source/common/common:minimal_logger_lib", "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", ], @@ -410,6 +411,7 @@ envoy_cc_library( "//source/common/protobuf:utility_lib", "//source/common/stream_info:stream_info_lib", "//source/extensions/filters/network/http_connection_manager:config", + "//source/common/quic:quic_stat_names_lib", "//source/extensions/upstreams/http/generic:config", "@envoy_api//envoy/admin/v3:pkg_cc_proto", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", @@ -421,7 +423,6 @@ envoy_cc_library( "//source/common/quic:quic_factory_lib", "//source/common/quic:quic_transport_socket_factory_lib", "//source/common/quic:udp_gso_batch_writer_lib", - "//source/common/quic:quic_stat_names_lib", ]), ) diff --git a/source/server/active_tcp_listener.cc b/source/server/active_tcp_listener.cc index 880f234c8a7ba..a8a2120ba2a08 100644 --- a/source/server/active_tcp_listener.cc +++ b/source/server/active_tcp_listener.cc @@ -199,10 +199,8 @@ void ActiveTcpSocket::newConnection() { if (socket_->detectedTransportProtocol().empty()) { socket_->setDetectedTransportProtocol("raw_buffer"); } - // TODO(lambdai): add integration test - // TODO: Address issues in wider scope. See https://github.com/envoyproxy/envoy/issues/8925 - // Erase accept filter states because accept filters may not get the opportunity to clean up. - // Particularly the assigned events need to reset before assigning new events in the follow up. + // Clear the listener filter to ensure the file event registered by + // listener filter to be removed. reference https://github.com/envoyproxy/envoy/issues/8925. accept_filters_.clear(); // Create a new connection on this listener. listener_.newConnection(std::move(socket_), std::move(stream_info_)); diff --git a/source/server/admin/BUILD b/source/server/admin/BUILD index 533e22849b45f..19463acd7a35b 100644 --- a/source/server/admin/BUILD +++ b/source/server/admin/BUILD @@ -252,6 +252,8 @@ envoy_cc_library( "//envoy/server:admin_interface", "//envoy/server:instance_interface", "//source/common/buffer:buffer_lib", + "//source/common/common:matchers_lib", + "//source/common/common:statusor_lib", "//source/common/http:codes_lib", "//source/common/http:header_map_lib", "@envoy_api//envoy/admin/v3:pkg_cc_proto", diff --git a/source/server/admin/config_dump_handler.cc b/source/server/admin/config_dump_handler.cc index 69c685d8a6939..ec48f7c97090f 100644 --- a/source/server/admin/config_dump_handler.cc +++ b/source/server/admin/config_dump_handler.cc @@ -3,6 +3,9 @@ #include "envoy/config/core/v3/health_check.pb.h" #include "envoy/config/endpoint/v3/endpoint.pb.h" +#include "source/common/common/matchers.h" +#include "source/common/common/regex.h" +#include "source/common/common/statusor.h" #include "source/common/http/headers.h" #include "source/common/http/utility.h" #include "source/common/network/utility.h" @@ -12,6 +15,21 @@ namespace Envoy { namespace Server { namespace { + +// Validates that `field_mask` is valid for `message` and applies `TrimMessage`. +// Necessary because TrimMessage crashes if `field_mask` is invalid. +// Returns `true` on success. +bool checkFieldMaskAndTrimMessage(const Protobuf::FieldMask& field_mask, + Protobuf::Message& message) { + for (const auto& path : field_mask.paths()) { + if (!ProtobufUtil::FieldMaskUtil::GetFieldDescriptors(message.GetDescriptor(), path, nullptr)) { + return false; + } + } + ProtobufUtil::FieldMaskUtil::TrimMessage(field_mask, &message); + return true; +} + // Apply a field mask to a resource message. A simple field mask might look // like "cluster.name,cluster.alt_stat_name,last_updated" for a StaticCluster // resource. Unfortunately, since the "cluster" field is Any and the in-built @@ -31,7 +49,10 @@ namespace { // this to allow arbitrary indexing through Any fields. This is pretty // complicated, we would need to build a FieldMask tree similar to how the C++ // Protobuf library does this internally. -void trimResourceMessage(const Protobuf::FieldMask& field_mask, Protobuf::Message& message) { +/** + * @return true on success, false if `field_mask` is invalid. + */ +bool trimResourceMessage(const Protobuf::FieldMask& field_mask, Protobuf::Message& message) { const Protobuf::Descriptor* descriptor = message.GetDescriptor(); const Protobuf::Reflection* reflection = message.GetReflection(); // Figure out which paths cover Any fields. For each field, gather the paths to @@ -87,13 +108,15 @@ void trimResourceMessage(const Protobuf::FieldMask& field_mask, Protobuf::Messag inner_message.reset(dmf.GetPrototype(inner_descriptor)->New()); MessageUtil::unpackTo(any_message, *inner_message); // Trim message. - ProtobufUtil::FieldMaskUtil::TrimMessage(inner_field_mask, inner_message.get()); + if (!checkFieldMaskAndTrimMessage(inner_field_mask, *inner_message)) { + return false; + } // Pack it back into the Any resource. any_message.PackFrom(*inner_message); reflection->MutableMessage(&message, any_field)->CopyFrom(any_message); } } - ProtobufUtil::FieldMaskUtil::TrimMessage(outer_field_mask, &message); + return checkFieldMaskAndTrimMessage(outer_field_mask, message); } // Helper method to get the resource parameter. @@ -111,6 +134,24 @@ bool shouldIncludeEdsInDump(const Http::Utility::QueryParams& params) { return Utility::queryParam(params, "include_eds") != absl::nullopt; } +absl::StatusOr +buildNameMatcher(const Http::Utility::QueryParams& params) { + const auto name_regex = Utility::queryParam(params, "name_regex"); + if (!name_regex.has_value()) { + return std::make_unique(); + } + envoy::type::matcher::v3::RegexMatcher matcher; + *matcher.mutable_google_re2() = envoy::type::matcher::v3::RegexMatcher::GoogleRE2(); + matcher.set_regex(*name_regex); + TRY_ASSERT_MAIN_THREAD + return Regex::Utility::parseRegex(matcher); + END_TRY + catch (EnvoyException& e) { + return absl::InvalidArgumentError( + absl::StrCat("Error while parsing name_regex from ", *name_regex, ": ", e.what())); + } +} + } // namespace ConfigDumpHandler::ConfigDumpHandler(ConfigTracker& config_tracker, Server::Instance& server) @@ -123,17 +164,27 @@ Http::Code ConfigDumpHandler::handlerConfigDump(absl::string_view url, const auto resource = resourceParam(query_params); const auto mask = maskParam(query_params); const bool include_eds = shouldIncludeEdsInDump(query_params); + const absl::StatusOr name_matcher = buildNameMatcher(query_params); + if (!name_matcher.ok()) { + response.add(name_matcher.status().ToString()); + response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Text); + return Http::Code::BadRequest; + } envoy::admin::v3::ConfigDump dump; + absl::optional> err; if (resource.has_value()) { - auto err = addResourceToDump(dump, mask, resource.value(), include_eds); - if (err.has_value()) { - response.add(err.value().second); - return err.value().first; - } + err = addResourceToDump(dump, mask, resource.value(), **name_matcher, include_eds); } else { - addAllConfigToDump(dump, mask, include_eds); + err = addAllConfigToDump(dump, mask, **name_matcher, include_eds); + } + if (err.has_value()) { + response_headers.addReference(Http::Headers::get().XContentTypeOptions, + Http::Headers::get().XContentTypeOptionValues.Nosniff); + response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Text); + response.add(err.value().second); + return err.value().first; } MessageUtil::redact(dump); @@ -142,22 +193,24 @@ Http::Code ConfigDumpHandler::handlerConfigDump(absl::string_view url, return Http::Code::OK; } -absl::optional> -ConfigDumpHandler::addResourceToDump(envoy::admin::v3::ConfigDump& dump, - const absl::optional& mask, - const std::string& resource, bool include_eds) const { +absl::optional> ConfigDumpHandler::addResourceToDump( + envoy::admin::v3::ConfigDump& dump, const absl::optional& mask, + const std::string& resource, const Matchers::StringMatcher& name_matcher, + bool include_eds) const { Envoy::Server::ConfigTracker::CbsMap callbacks_map = config_tracker_.getCallbacksMap(); if (include_eds) { // TODO(mattklein123): Add ability to see warming clusters in admin output. auto all_clusters = server_.clusterManager().clusters(); if (!all_clusters.active_clusters_.empty()) { - callbacks_map.emplace("endpoint", [this] { return dumpEndpointConfigs(); }); + callbacks_map.emplace("endpoint", [this](const Matchers::StringMatcher& name_matcher) { + return dumpEndpointConfigs(name_matcher); + }); } } for (const auto& [name, callback] : callbacks_map) { UNREFERENCED_PARAMETER(name); - ProtobufTypes::MessagePtr message = callback(); + ProtobufTypes::MessagePtr message = callback(name_matcher); ASSERT(message); auto field_descriptor = message->GetDescriptor()->FindFieldByName(resource); @@ -176,7 +229,11 @@ ConfigDumpHandler::addResourceToDump(envoy::admin::v3::ConfigDump& dump, if (mask.has_value()) { Protobuf::FieldMask field_mask; ProtobufUtil::FieldMaskUtil::FromString(mask.value(), &field_mask); - trimResourceMessage(field_mask, msg); + if (!trimResourceMessage(field_mask, msg)) { + return absl::optional>{std::make_pair( + Http::Code::BadRequest, absl::StrCat("FieldMask ", field_mask.DebugString(), + " could not be successfully used."))}; + } } auto* config = dump.add_configs(); config->PackFrom(msg); @@ -191,21 +248,23 @@ ConfigDumpHandler::addResourceToDump(envoy::admin::v3::ConfigDump& dump, std::make_pair(Http::Code::NotFound, fmt::format("{} not found in config dump", resource))}; } -void ConfigDumpHandler::addAllConfigToDump(envoy::admin::v3::ConfigDump& dump, - const absl::optional& mask, - bool include_eds) const { +absl::optional> ConfigDumpHandler::addAllConfigToDump( + envoy::admin::v3::ConfigDump& dump, const absl::optional& mask, + const Matchers::StringMatcher& name_matcher, bool include_eds) const { Envoy::Server::ConfigTracker::CbsMap callbacks_map = config_tracker_.getCallbacksMap(); if (include_eds) { // TODO(mattklein123): Add ability to see warming clusters in admin output. auto all_clusters = server_.clusterManager().clusters(); if (!all_clusters.active_clusters_.empty()) { - callbacks_map.emplace("endpoint", [this] { return dumpEndpointConfigs(); }); + callbacks_map.emplace("endpoint", [this](const Matchers::StringMatcher& name_matcher) { + return dumpEndpointConfigs(name_matcher); + }); } } for (const auto& [name, callback] : callbacks_map) { UNREFERENCED_PARAMETER(name); - ProtobufTypes::MessagePtr message = callback(); + ProtobufTypes::MessagePtr message = callback(name_matcher); ASSERT(message); if (mask.has_value()) { @@ -213,15 +272,21 @@ void ConfigDumpHandler::addAllConfigToDump(envoy::admin::v3::ConfigDump& dump, ProtobufUtil::FieldMaskUtil::FromString(mask.value(), &field_mask); // We don't use trimMessage() above here since masks don't support // indexing through repeated fields. - ProtobufUtil::FieldMaskUtil::TrimMessage(field_mask, message.get()); + if (!checkFieldMaskAndTrimMessage(field_mask, *message)) { + return absl::optional>{std::make_pair( + Http::Code::BadRequest, absl::StrCat("FieldMask ", field_mask.DebugString(), + " could not be successfully used."))}; + } } auto* config = dump.add_configs(); config->PackFrom(*message); } + return absl::nullopt; } -ProtobufTypes::MessagePtr ConfigDumpHandler::dumpEndpointConfigs() const { +ProtobufTypes::MessagePtr +ConfigDumpHandler::dumpEndpointConfigs(const Matchers::StringMatcher& name_matcher) const { auto endpoint_config_dump = std::make_unique(); // TODO(mattklein123): Add ability to see warming clusters in admin output. auto all_clusters = server_.clusterManager().clusters(); @@ -236,6 +301,9 @@ ProtobufTypes::MessagePtr ConfigDumpHandler::dumpEndpointConfigs() const { } else { cluster_load_assignment.set_cluster_name(cluster_info->name()); } + if (!name_matcher.match(cluster_load_assignment.cluster_name())) { + continue; + } auto& policy = *cluster_load_assignment.mutable_policy(); for (auto& host_set : cluster.prioritySet().hostSetsPerPriority()) { @@ -269,7 +337,6 @@ ProtobufTypes::MessagePtr ConfigDumpHandler::dumpEndpointConfigs() const { } } } - if (cluster_info->addedViaApi()) { auto& dynamic_endpoint = *endpoint_config_dump->mutable_dynamic_endpoint_configs()->Add(); dynamic_endpoint.mutable_endpoint_config()->PackFrom(cluster_load_assignment); diff --git a/source/server/admin/config_dump_handler.h b/source/server/admin/config_dump_handler.h index f1531ddee04a1..d64801eaa8211 100644 --- a/source/server/admin/config_dump_handler.h +++ b/source/server/admin/config_dump_handler.h @@ -27,8 +27,9 @@ class ConfigDumpHandler : public HandlerContextBase { Buffer::Instance& response, AdminStream&) const; private: - void addAllConfigToDump(envoy::admin::v3::ConfigDump& dump, - const absl::optional& mask, bool include_eds) const; + absl::optional> + addAllConfigToDump(envoy::admin::v3::ConfigDump& dump, const absl::optional& mask, + const Matchers::StringMatcher& name_matcher, bool include_eds) const; /** * Add the config matching the passed resource to the passed config dump. * @return absl::nullopt on success, else the Http::Code and an error message that should be added @@ -36,7 +37,8 @@ class ConfigDumpHandler : public HandlerContextBase { */ absl::optional> addResourceToDump(envoy::admin::v3::ConfigDump& dump, const absl::optional& mask, - const std::string& resource, bool include_eds) const; + const std::string& resource, const Matchers::StringMatcher& name_matcher, + bool include_eds) const; /** * Helper methods to add endpoints config @@ -44,7 +46,7 @@ class ConfigDumpHandler : public HandlerContextBase { void addLbEndpoint(const Upstream::HostSharedPtr& host, envoy::config::endpoint::v3::LocalityLbEndpoints& locality_lb_endpoint) const; - ProtobufTypes::MessagePtr dumpEndpointConfigs() const; + ProtobufTypes::MessagePtr dumpEndpointConfigs(const Matchers::StringMatcher& name_matcher) const; ConfigTracker& config_tracker_; }; diff --git a/source/server/admin/stats_handler.cc b/source/server/admin/stats_handler.cc index 0a9bd56257a43..d8426f10abc57 100644 --- a/source/server/admin/stats_handler.cc +++ b/source/server/admin/stats_handler.cc @@ -74,7 +74,6 @@ Http::Code StatsHandler::handlerStats(absl::string_view url, server_.flushStats(); } - Http::Code rc = Http::Code::OK; const Http::Utility::QueryParams params = Http::Utility::parseAndDecodeQueryString(url); const bool used_only = params.find("usedonly") != params.end(); @@ -104,38 +103,27 @@ Http::Code StatsHandler::handlerStats(absl::string_view url, } } - if (const auto format_value = Utility::formatParam(params)) { - if (format_value.value() == "json") { - response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); - response.add( - statsAsJson(all_stats, text_readouts, server_.stats().histograms(), used_only, regex)); - } else if (format_value.value() == "prometheus") { - return handlerPrometheusStats(url, response_headers, response, admin_stream); - } else { - response.add("usage: /stats?format=json or /stats?format=prometheus \n"); - response.add("\n"); - rc = Http::Code::NotFound; - } - } else { // Display plain stats if format query param is not there. - for (const auto& text_readout : text_readouts) { - response.add(fmt::format("{}: \"{}\"\n", text_readout.first, - Html::Utility::sanitize(text_readout.second))); - } - for (const auto& stat : all_stats) { - response.add(fmt::format("{}: {}\n", stat.first, stat.second)); - } - std::map all_histograms; - for (const Stats::ParentHistogramSharedPtr& histogram : server_.stats().histograms()) { - if (shouldShowMetric(*histogram, used_only, regex)) { - auto insert = all_histograms.emplace(histogram->name(), histogram->quantileSummary()); - ASSERT(insert.second); // No duplicates expected. - } - } - for (const auto& histogram : all_histograms) { - response.add(fmt::format("{}: {}\n", histogram.first, histogram.second)); - } + absl::optional format_value = Utility::formatParam(params); + if (!format_value.has_value()) { + // Display plain stats if format query param is not there. + statsAsText(all_stats, text_readouts, server_.stats().histograms(), used_only, regex, response); + return Http::Code::OK; + } + + if (format_value.value() == "json") { + response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); + response.add( + statsAsJson(all_stats, text_readouts, server_.stats().histograms(), used_only, regex)); + return Http::Code::OK; + } + + if (format_value.value() == "prometheus") { + return handlerPrometheusStats(url, response_headers, response, admin_stream); } - return rc; + + response.add("usage: /stats?format=json or /stats?format=prometheus \n"); + response.add("\n"); + return Http::Code::NotFound; } Http::Code StatsHandler::handlerPrometheusStats(absl::string_view path_and_query, @@ -174,11 +162,36 @@ Http::Code StatsHandler::handlerContention(absl::string_view, return Http::Code::OK; } +void StatsHandler::statsAsText(const std::map& all_stats, + const std::map& text_readouts, + const std::vector& histograms, + bool used_only, const absl::optional& regex, + Buffer::Instance& response) { + // Display plain stats if format query param is not there. + for (const auto& text_readout : text_readouts) { + response.add(fmt::format("{}: \"{}\"\n", text_readout.first, + Html::Utility::sanitize(text_readout.second))); + } + for (const auto& stat : all_stats) { + response.add(fmt::format("{}: {}\n", stat.first, stat.second)); + } + std::map all_histograms; + for (const Stats::ParentHistogramSharedPtr& histogram : histograms) { + if (shouldShowMetric(*histogram, used_only, regex)) { + auto insert = all_histograms.emplace(histogram->name(), histogram->quantileSummary()); + ASSERT(insert.second); // No duplicates expected. + } + } + for (const auto& histogram : all_histograms) { + response.add(fmt::format("{}: {}\n", histogram.first, histogram.second)); + } +} + std::string StatsHandler::statsAsJson(const std::map& all_stats, const std::map& text_readouts, const std::vector& all_histograms, - const bool used_only, const absl::optional regex, + const bool used_only, const absl::optional& regex, const bool pretty_print) { ProtobufWkt::Struct document; diff --git a/source/server/admin/stats_handler.h b/source/server/admin/stats_handler.h index 7934c19c11d1c..84ba69fbde949 100644 --- a/source/server/admin/stats_handler.h +++ b/source/server/admin/stats_handler.h @@ -60,9 +60,14 @@ class StatsHandler : public HandlerContextBase { static std::string statsAsJson(const std::map& all_stats, const std::map& text_readouts, const std::vector& all_histograms, - bool used_only, - const absl::optional regex = absl::nullopt, + bool used_only, const absl::optional& regex, bool pretty_print = false); + + void statsAsText(const std::map& all_stats, + const std::map& text_readouts, + const std::vector& all_histograms, + bool used_only, const absl::optional& regex, + Buffer::Instance& response); }; } // namespace Server diff --git a/source/server/api_listener_impl.h b/source/server/api_listener_impl.h index c0e2d0bc7c6c0..fdb8498ad1f0f 100644 --- a/source/server/api_listener_impl.h +++ b/source/server/api_listener_impl.h @@ -44,6 +44,10 @@ class ApiListenerImplBase : public ApiListener, // Network::DrainDecision // TODO(junr03): hook up draining to listener state management. bool drainClose() const override { return false; } + Common::CallbackHandlePtr addOnDrainCloseCb(DrainCloseCb) const override { + NOT_REACHED_GCOVR_EXCL_LINE; + return nullptr; + } protected: ApiListenerImplBase(const envoy::config::listener::v3::Listener& config, diff --git a/source/server/drain_manager_impl.cc b/source/server/drain_manager_impl.cc index 41e8b8db6decb..a1c3c2d696d39 100644 --- a/source/server/drain_manager_impl.cc +++ b/source/server/drain_manager_impl.cc @@ -3,8 +3,10 @@ #include #include #include +#include #include "envoy/config/listener/v3/listener.pb.h" +#include "envoy/event/dispatcher.h" #include "envoy/event/timer.h" #include "source/common/common/assert.h" @@ -13,8 +15,30 @@ namespace Envoy { namespace Server { DrainManagerImpl::DrainManagerImpl(Instance& server, - envoy::config::listener::v3::Listener::DrainType drain_type) - : server_(server), drain_type_(drain_type) {} + envoy::config::listener::v3::Listener::DrainType drain_type, + Event::Dispatcher& dispatcher) + : server_(server), dispatcher_(dispatcher), drain_type_(drain_type), + children_(Common::ThreadSafeCallbackManager::create()) {} + +DrainManagerPtr +DrainManagerImpl::createChildManager(Event::Dispatcher& dispatcher, + envoy::config::listener::v3::Listener::DrainType drain_type) { + auto child = std::make_unique(server_, drain_type, dispatcher); + + // Wire up the child so that when the parent starts draining, the child also sees the + // state-change + auto child_cb = children_->add(dispatcher, [child = child.get()] { + if (!child->draining_) { + child->startDrainSequence([] {}); + } + }); + child->parent_callback_handle_ = std::move(child_cb); + return child; +} + +DrainManagerPtr DrainManagerImpl::createChildManager(Event::Dispatcher& dispatcher) { + return createChildManager(dispatcher, drain_type_); +} bool DrainManagerImpl::drainClose() const { // If we are actively health check failed and the drain type is default, always drain close. @@ -39,7 +63,7 @@ bool DrainManagerImpl::drainClose() const { // P(return true) = elapsed time / drain timeout // If the drain deadline is exceeded, skip the probability calculation. - const MonotonicTime current_time = server_.dispatcher().timeSource().monotonicTime(); + const MonotonicTime current_time = dispatcher_.timeSource().monotonicTime(); if (current_time >= drain_deadline_) { return true; } @@ -52,15 +76,92 @@ bool DrainManagerImpl::drainClose() const { (server_.api().randomGenerator().random() % server_.options().drainTime().count()); } +Common::CallbackHandlePtr DrainManagerImpl::addOnDrainCloseCb(DrainCloseCb cb) const { + ASSERT(dispatcher_.isThreadSafe()); + + if (draining_) { + const MonotonicTime current_time = dispatcher_.timeSource().monotonicTime(); + + // Calculate the delay. If using an immediate drain-strategy or past our deadline, use + // a zero millisecond delay. Otherwise, pick a random value within the remaining time-span. + std::chrono::milliseconds drain_delay = + (server_.options().drainStrategy() != Server::DrainStrategy::Immediate && + current_time < drain_deadline_) + ? std::chrono::milliseconds(server_.api().randomGenerator().random() % + std::chrono::duration_cast( + drain_deadline_ - current_time) + .count()) + : std::chrono::milliseconds{0}; + cb(drain_delay); + return nullptr; + } + + return cbs_.add(cb); +} + +void DrainManagerImpl::addDrainCompleteCallback(std::function cb) { + ASSERT(draining_); + + // If the drain-tick-timer is active, add the callback to the queue. If not defined + // then it must have already expired, invoke the callback immediately. + if (drain_tick_timer_) { + drain_complete_cbs_.push_back(cb); + } else { + cb(); + } +} + void DrainManagerImpl::startDrainSequence(std::function drain_complete_cb) { ASSERT(drain_complete_cb); - ASSERT(!draining_); + + // If we've already started draining (either through direct invocation or through + // parent-initiated draining), enqueue the drain_complete_cb and return + if (draining_) { + addDrainCompleteCallback(drain_complete_cb); + return; + } + ASSERT(!drain_tick_timer_); draining_ = true; - drain_tick_timer_ = server_.dispatcher().createTimer(drain_complete_cb); + + // Signal to child drain-managers to start their drain sequence + children_->runCallbacks(); + + // Schedule callback to run at end of drain time + drain_tick_timer_ = dispatcher_.createTimer([this]() { + for (auto& cb : drain_complete_cbs_) { + cb(); + } + drain_complete_cbs_.clear(); + drain_tick_timer_.reset(); + }); + addDrainCompleteCallback(drain_complete_cb); const std::chrono::seconds drain_delay(server_.options().drainTime()); drain_tick_timer_->enableTimer(drain_delay); - drain_deadline_ = server_.dispatcher().timeSource().monotonicTime() + drain_delay; + drain_deadline_ = dispatcher_.timeSource().monotonicTime() + drain_delay; + + // Call registered on-drain callbacks - with gradual delays + // Note: This will distribute drain events in the first 1/4th of the drain window + // to ensure that we initiate draining with enough time for graceful shutdowns. + const MonotonicTime current_time = dispatcher_.timeSource().monotonicTime(); + std::chrono::seconds remaining_time{0}; + if (server_.options().drainStrategy() != Server::DrainStrategy::Immediate && + current_time < drain_deadline_) { + remaining_time = + std::chrono::duration_cast(drain_deadline_ - current_time); + ASSERT(server_.options().drainTime() >= remaining_time); + } + + uint32_t step_count = 0; + size_t num_cbs = cbs_.size(); + cbs_.runCallbacksWith([&]() { + // switch to floating-point math to avoid issues with integer division + std::chrono::milliseconds delay{static_cast( + static_cast(step_count) / 4 / num_cbs * + std::chrono::duration_cast(remaining_time).count())}; + step_count++; + return delay; + }); } void DrainManagerImpl::startParentShutdownSequence() { diff --git a/source/server/drain_manager_impl.h b/source/server/drain_manager_impl.h index 352d0669b38ae..6c20ec8b04c7e 100644 --- a/source/server/drain_manager_impl.h +++ b/source/server/drain_manager_impl.h @@ -1,13 +1,17 @@ #pragma once +#include #include +#include #include "envoy/common/time.h" #include "envoy/config/listener/v3/listener.pb.h" +#include "envoy/event/dispatcher.h" #include "envoy/event/timer.h" #include "envoy/server/drain_manager.h" #include "envoy/server/instance.h" +#include "source/common/common/callback_impl.h" #include "source/common/common/logger.h" namespace Envoy { @@ -21,23 +25,41 @@ namespace Server { */ class DrainManagerImpl : Logger::Loggable, public DrainManager { public: - DrainManagerImpl(Instance& server, envoy::config::listener::v3::Listener::DrainType drain_type); + DrainManagerImpl(Instance& server, envoy::config::listener::v3::Listener::DrainType drain_type, + Event::Dispatcher& dispatcher); // Network::DrainDecision bool drainClose() const override; + Common::CallbackHandlePtr addOnDrainCloseCb(DrainCloseCb cb) const override; // Server::DrainManager void startDrainSequence(std::function drain_complete_cb) override; bool draining() const override { return draining_; } void startParentShutdownSequence() override; + DrainManagerPtr + createChildManager(Event::Dispatcher& dispatcher, + envoy::config::listener::v3::Listener::DrainType drain_type) override; + DrainManagerPtr createChildManager(Event::Dispatcher& dispatcher) override; private: + void addDrainCompleteCallback(std::function cb); + Instance& server_; + Event::Dispatcher& dispatcher_; const envoy::config::listener::v3::Listener::DrainType drain_type_; std::atomic draining_{false}; Event::TimerPtr drain_tick_timer_; MonotonicTime drain_deadline_; + mutable Common::CallbackManager cbs_{}; + std::vector> drain_complete_cbs_{}; + + // Callbacks called by startDrainSequence to cascade/proxy to children + std::shared_ptr children_; + + // Callback handle parent will invoke to initiate drain-sequence. Created and set + // by the parent drain-manager. + Common::CallbackHandlePtr parent_callback_handle_; Event::TimerPtr parent_shutdown_timer_; }; diff --git a/source/server/filter_chain_manager_impl.cc b/source/server/filter_chain_manager_impl.cc index 427b3cb0eeef8..095d16eb6b505 100644 --- a/source/server/filter_chain_manager_impl.cc +++ b/source/server/filter_chain_manager_impl.cc @@ -19,7 +19,9 @@ namespace Server { namespace { -// Return a fake address for use when either the source or destination is UDS. +// Return a fake address for use when either the source or destination is unix domain socket. +// This address will only match the fallback matcher of 0.0.0.0/0, which is the default +// when no IP matcher is configured. Network::Address::InstanceConstSharedPtr fakeAddress() { CONSTRUCT_ON_FIRST_USE(Network::Address::InstanceConstSharedPtr, Network::Utility::parseInternetAddress("255.255.255.255")); @@ -174,20 +176,23 @@ void FilterChainManagerImpl::addFilterChains( } filter_chains.insert({filter_chain_match, filter_chain->name()}); - // Validate IP addresses. - std::vector destination_ips; - destination_ips.reserve(filter_chain_match.prefix_ranges().size()); - for (const auto& destination_ip : filter_chain_match.prefix_ranges()) { - const auto& cidr_range = Network::Address::CidrRange::create(destination_ip); - destination_ips.push_back(cidr_range.asString()); - } + auto createAddressVector = [](const auto& prefix_ranges) -> std::vector { + std::vector ips; + ips.reserve(prefix_ranges.size()); + for (const auto& ip : prefix_ranges) { + const auto& cidr_range = Network::Address::CidrRange::create(ip); + ips.push_back(cidr_range.asString()); + } + return ips; + }; - std::vector source_ips; - source_ips.reserve(filter_chain_match.source_prefix_ranges().size()); - for (const auto& source_ip : filter_chain_match.source_prefix_ranges()) { - const auto& cidr_range = Network::Address::CidrRange::create(source_ip); - source_ips.push_back(cidr_range.asString()); - } + // Validate IP addresses. + std::vector destination_ips = + createAddressVector(filter_chain_match.prefix_ranges()); + std::vector source_ips = + createAddressVector(filter_chain_match.source_prefix_ranges()); + std::vector direct_source_ips = + createAddressVector(filter_chain_match.direct_source_prefix_ranges()); std::vector server_names; // Reject partial wildcards, we don't match on them. @@ -215,8 +220,9 @@ void FilterChainManagerImpl::addFilterChains( destination_ports_map_, PROTOBUF_GET_WRAPPED_OR_DEFAULT(filter_chain_match, destination_port, 0), destination_ips, server_names, filter_chain_match.transport_protocol(), - filter_chain_match.application_protocols(), filter_chain_match.source_type(), source_ips, - filter_chain_match.source_ports(), filter_chain_impl); + filter_chain_match.application_protocols(), direct_source_ips, + filter_chain_match.source_type(), source_ips, filter_chain_match.source_ports(), + filter_chain_impl); fc_contexts_[*filter_chain] = filter_chain_impl; } @@ -265,6 +271,7 @@ void FilterChainManagerImpl::addFilterChainForDestinationPorts( const std::vector& destination_ips, const absl::Span server_names, const std::string& transport_protocol, const absl::Span application_protocols, + const std::vector& direct_source_ips, const envoy::config::listener::v3::FilterChainMatch::ConnectionSourceType source_type, const std::vector& source_ips, const absl::Span source_ports, @@ -275,26 +282,28 @@ void FilterChainManagerImpl::addFilterChainForDestinationPorts( } addFilterChainForDestinationIPs(destination_ports_map[destination_port].first, destination_ips, server_names, transport_protocol, application_protocols, - source_type, source_ips, source_ports, filter_chain); + direct_source_ips, source_type, source_ips, source_ports, + filter_chain); } void FilterChainManagerImpl::addFilterChainForDestinationIPs( DestinationIPsMap& destination_ips_map, const std::vector& destination_ips, const absl::Span server_names, const std::string& transport_protocol, const absl::Span application_protocols, + const std::vector& direct_source_ips, const envoy::config::listener::v3::FilterChainMatch::ConnectionSourceType source_type, const std::vector& source_ips, const absl::Span source_ports, const Network::FilterChainSharedPtr& filter_chain) { if (destination_ips.empty()) { addFilterChainForServerNames(destination_ips_map[EMPTY_STRING], server_names, - transport_protocol, application_protocols, source_type, source_ips, - source_ports, filter_chain); + transport_protocol, application_protocols, direct_source_ips, + source_type, source_ips, source_ports, filter_chain); } else { for (const auto& destination_ip : destination_ips) { addFilterChainForServerNames(destination_ips_map[destination_ip], server_names, - transport_protocol, application_protocols, source_type, - source_ips, source_ports, filter_chain); + transport_protocol, application_protocols, direct_source_ips, + source_type, source_ips, source_ports, filter_chain); } } } @@ -303,6 +312,7 @@ void FilterChainManagerImpl::addFilterChainForServerNames( ServerNamesMapSharedPtr& server_names_map_ptr, const absl::Span server_names, const std::string& transport_protocol, const absl::Span application_protocols, + const std::vector& direct_source_ips, const envoy::config::listener::v3::FilterChainMatch::ConnectionSourceType source_type, const std::vector& source_ips, const absl::Span source_ports, @@ -314,19 +324,19 @@ void FilterChainManagerImpl::addFilterChainForServerNames( if (server_names.empty()) { addFilterChainForApplicationProtocols(server_names_map[EMPTY_STRING][transport_protocol], - application_protocols, source_type, source_ips, - source_ports, filter_chain); + application_protocols, direct_source_ips, source_type, + source_ips, source_ports, filter_chain); } else { for (const auto& server_name : server_names) { if (isWildcardServerName(server_name)) { // Add mapping for the wildcard domain, i.e. ".example.com" for "*.example.com". addFilterChainForApplicationProtocols( server_names_map[server_name.substr(1)][transport_protocol], application_protocols, - source_type, source_ips, source_ports, filter_chain); + direct_source_ips, source_type, source_ips, source_ports, filter_chain); } else { addFilterChainForApplicationProtocols(server_names_map[server_name][transport_protocol], - application_protocols, source_type, source_ips, - source_ports, filter_chain); + application_protocols, direct_source_ips, source_type, + source_ips, source_ports, filter_chain); } } } @@ -335,27 +345,52 @@ void FilterChainManagerImpl::addFilterChainForServerNames( void FilterChainManagerImpl::addFilterChainForApplicationProtocols( ApplicationProtocolsMap& application_protocols_map, const absl::Span application_protocols, + const std::vector& direct_source_ips, const envoy::config::listener::v3::FilterChainMatch::ConnectionSourceType source_type, const std::vector& source_ips, const absl::Span source_ports, const Network::FilterChainSharedPtr& filter_chain) { if (application_protocols.empty()) { - addFilterChainForSourceTypes(application_protocols_map[EMPTY_STRING], source_type, source_ips, - source_ports, filter_chain); + addFilterChainForDirectSourceIPs(application_protocols_map[EMPTY_STRING].first, + direct_source_ips, source_type, source_ips, source_ports, + filter_chain); } else { for (const auto& application_protocol_ptr : application_protocols) { - addFilterChainForSourceTypes(application_protocols_map[*application_protocol_ptr], - source_type, source_ips, source_ports, filter_chain); + addFilterChainForDirectSourceIPs(application_protocols_map[*application_protocol_ptr].first, + direct_source_ips, source_type, source_ips, source_ports, + filter_chain); + } + } +} + +void FilterChainManagerImpl::addFilterChainForDirectSourceIPs( + DirectSourceIPsMap& direct_source_ips_map, const std::vector& direct_source_ips, + const envoy::config::listener::v3::FilterChainMatch::ConnectionSourceType source_type, + const std::vector& source_ips, + const absl::Span source_ports, + const Network::FilterChainSharedPtr& filter_chain) { + if (direct_source_ips.empty()) { + addFilterChainForSourceTypes(direct_source_ips_map[EMPTY_STRING], source_type, source_ips, + source_ports, filter_chain); + } else { + for (const auto& direct_source_ip : direct_source_ips) { + addFilterChainForSourceTypes(direct_source_ips_map[direct_source_ip], source_type, source_ips, + source_ports, filter_chain); } } } void FilterChainManagerImpl::addFilterChainForSourceTypes( - SourceTypesArray& source_types_array, + SourceTypesArraySharedPtr& source_types_array_ptr, const envoy::config::listener::v3::FilterChainMatch::ConnectionSourceType source_type, const std::vector& source_ips, const absl::Span source_ports, const Network::FilterChainSharedPtr& filter_chain) { + if (source_types_array_ptr == nullptr) { + source_types_array_ptr = std::make_shared(); + } + + SourceTypesArray& source_types_array = *source_types_array_ptr; if (source_ips.empty()) { addFilterChainForSourceIPs(source_types_array[source_type].first, EMPTY_STRING, source_ports, filter_chain); @@ -527,14 +562,31 @@ const Network::FilterChain* FilterChainManagerImpl::findFilterChainForApplicatio for (const auto& application_protocol : socket.requestedApplicationProtocols()) { const auto application_protocol_match = application_protocols_map.find(application_protocol); if (application_protocol_match != application_protocols_map.end()) { - return findFilterChainForSourceTypes(application_protocol_match->second, socket); + return findFilterChainForDirectSourceIP(*application_protocol_match->second.second, socket); } } // Match on a filter chain without application protocol requirements. const auto any_protocol_match = application_protocols_map.find(EMPTY_STRING); if (any_protocol_match != application_protocols_map.end()) { - return findFilterChainForSourceTypes(any_protocol_match->second, socket); + return findFilterChainForDirectSourceIP(*any_protocol_match->second.second, socket); + } + + return nullptr; +} + +const Network::FilterChain* FilterChainManagerImpl::findFilterChainForDirectSourceIP( + const DirectSourceIPsTrie& direct_source_ips_trie, + const Network::ConnectionSocket& socket) const { + auto address = socket.addressProvider().directRemoteAddress(); + if (address->type() != Network::Address::Type::Ip) { + address = fakeAddress(); + } + + const auto& data = direct_source_ips_trie.getData(address); + if (!data.empty()) { + ASSERT(data.size() == 1); + return findFilterChainForSourceTypes(*data.back(), socket); } return nullptr; @@ -627,20 +679,34 @@ void FilterChainManagerImpl::convertIPsToTries() { UNREFERENCED_PARAMETER(server_name); for (auto& [transport_protocol, application_protocols_map] : transport_protocols_map) { UNREFERENCED_PARAMETER(transport_protocol); - for (auto& [application_protocol, source_arrays] : application_protocols_map) { + for (auto& [application_protocol, direct_source_ips_pair] : application_protocols_map) { UNREFERENCED_PARAMETER(application_protocol); - for (auto& [source_ips_map, source_ips_trie] : source_arrays) { - std::vector< - std::pair>> - source_ips_list; - source_ips_list.reserve(source_ips_map.size()); - - for (auto& [source_ip, source_port_map_ptr] : source_ips_map) { - source_ips_list.push_back(makeCidrListEntry(source_ip, source_port_map_ptr)); - } + auto& [direct_source_ips_map, direct_source_ips_trie] = direct_source_ips_pair; + + std::vector< + std::pair>> + direct_source_ips_list; + direct_source_ips_list.reserve(direct_source_ips_map.size()); - source_ips_trie = std::make_unique(source_ips_list, true); + for (auto& [direct_source_ip, source_arrays_ptr] : direct_source_ips_map) { + direct_source_ips_list.push_back( + makeCidrListEntry(direct_source_ip, source_arrays_ptr)); + + for (auto& [source_ips_map, source_ips_trie] : *source_arrays_ptr) { + std::vector< + std::pair>> + source_ips_list; + source_ips_list.reserve(source_ips_map.size()); + + for (auto& [source_ip, source_port_map_ptr] : source_ips_map) { + source_ips_list.push_back(makeCidrListEntry(source_ip, source_port_map_ptr)); + } + + source_ips_trie = std::make_unique(source_ips_list, true); + } } + direct_source_ips_trie = + std::make_unique(direct_source_ips_list, true); } } } diff --git a/source/server/filter_chain_manager_impl.h b/source/server/filter_chain_manager_impl.h index bb7b9cd32af0b..4e6caf625dbf9 100644 --- a/source/server/filter_chain_manager_impl.h +++ b/source/server/filter_chain_manager_impl.h @@ -47,6 +47,10 @@ class PerFilterChainFactoryContextImpl : public Configuration::FilterChainFactor // DrainDecision bool drainClose() const override; + Common::CallbackHandlePtr addOnDrainCloseCb(DrainCloseCb) const override { + NOT_REACHED_GCOVR_EXCL_LINE; + return nullptr; + } // Configuration::FactoryContext AccessLog::AccessLogManager& accessLogManager() override; @@ -236,7 +240,20 @@ class FilterChainManagerImpl : public Network::FilterChainManager, using SourceIPsTrie = Network::LcTrie::LcTrie; using SourceIPsTriePtr = std::unique_ptr; using SourceTypesArray = std::array, 3>; - using ApplicationProtocolsMap = absl::flat_hash_map; + using SourceTypesArraySharedPtr = std::shared_ptr; + using DirectSourceIPsMap = absl::flat_hash_map; + using DirectSourceIPsTrie = Network::LcTrie::LcTrie; + using DirectSourceIPsTriePtr = std::unique_ptr; + + // This would nominally be a `std::pair`, but that version crashes the Windows clang_cl compiler + // for unknown reasons. This variation, which is equivalent, does not crash the compiler. + // The `std::pair` version was confirmed to crash both clang 11 and clang 12. + struct DirectSourceIPsPair { + DirectSourceIPsMap first; + DirectSourceIPsTriePtr second; + }; + + using ApplicationProtocolsMap = absl::flat_hash_map; using TransportProtocolsMap = absl::flat_hash_map; // Both exact server names and wildcard domains are part of the same map, in which wildcard // domains are prefixed with "." (i.e. ".example.com" for "*.example.com") to differentiate @@ -254,6 +271,7 @@ class FilterChainManagerImpl : public Network::FilterChainManager, const std::vector& destination_ips, const absl::Span server_names, const std::string& transport_protocol, const absl::Span application_protocols, + const std::vector& direct_source_ips, const envoy::config::listener::v3::FilterChainMatch::ConnectionSourceType source_type, const std::vector& source_ips, const absl::Span source_ports, @@ -262,6 +280,7 @@ class FilterChainManagerImpl : public Network::FilterChainManager, DestinationIPsMap& destination_ips_map, const std::vector& destination_ips, const absl::Span server_names, const std::string& transport_protocol, const absl::Span application_protocols, + const std::vector& direct_source_ips, const envoy::config::listener::v3::FilterChainMatch::ConnectionSourceType source_type, const std::vector& source_ips, const absl::Span source_ports, @@ -270,6 +289,7 @@ class FilterChainManagerImpl : public Network::FilterChainManager, ServerNamesMapSharedPtr& server_names_map_ptr, const absl::Span server_names, const std::string& transport_protocol, const absl::Span application_protocols, + const std::vector& direct_source_ips, const envoy::config::listener::v3::FilterChainMatch::ConnectionSourceType source_type, const std::vector& source_ips, const absl::Span source_ports, @@ -277,12 +297,19 @@ class FilterChainManagerImpl : public Network::FilterChainManager, void addFilterChainForApplicationProtocols( ApplicationProtocolsMap& application_protocol_map, const absl::Span application_protocols, + const std::vector& direct_source_ips, + const envoy::config::listener::v3::FilterChainMatch::ConnectionSourceType source_type, + const std::vector& source_ips, + const absl::Span source_ports, + const Network::FilterChainSharedPtr& filter_chain); + void addFilterChainForDirectSourceIPs( + DirectSourceIPsMap& direct_source_ips_map, const std::vector& direct_source_ips, const envoy::config::listener::v3::FilterChainMatch::ConnectionSourceType source_type, const std::vector& source_ips, const absl::Span source_ports, const Network::FilterChainSharedPtr& filter_chain); void addFilterChainForSourceTypes( - SourceTypesArray& source_types_array, + SourceTypesArraySharedPtr& source_types_array_ptr, const envoy::config::listener::v3::FilterChainMatch::ConnectionSourceType source_type, const std::vector& source_ips, const absl::Span source_ports, @@ -307,6 +334,9 @@ class FilterChainManagerImpl : public Network::FilterChainManager, findFilterChainForApplicationProtocols(const ApplicationProtocolsMap& application_protocols_map, const Network::ConnectionSocket& socket) const; const Network::FilterChain* + findFilterChainForDirectSourceIP(const DirectSourceIPsTrie& direct_source_ips_trie, + const Network::ConnectionSocket& socket) const; + const Network::FilterChain* findFilterChainForSourceTypes(const SourceTypesArray& source_types, const Network::ConnectionSocket& socket) const; diff --git a/source/server/listener_impl.cc b/source/server/listener_impl.cc index 0606862c8238d..3d6f483620980 100644 --- a/source/server/listener_impl.cc +++ b/source/server/listener_impl.cc @@ -296,12 +296,8 @@ ListenerImpl::ListenerImpl(const envoy::config::listener::v3::Listener& config, // ready. listener_init_target_.ready(); } - }) -#ifdef ENVOY_ENABLE_QUIC - , - quic_stat_names_(parent_.quicStatNames()) -#endif -{ + }), + quic_stat_names_(parent_.quicStatNames()) { const absl::optional runtime_val = listener_factory_context_->runtime().snapshot().get(cx_limit_runtime_key_); @@ -368,12 +364,8 @@ ListenerImpl::ListenerImpl(ListenerImpl& origin, [this] { ASSERT(workers_started_); parent_.inPlaceFilterChainUpdate(*this); - }) -#ifdef ENVOY_ENABLE_QUIC - , - quic_stat_names_(parent_.quicStatNames()) -#endif -{ + }), + quic_stat_names_(parent_.quicStatNames()) { buildAccessLog(); auto socket_type = Network::Utility::protobufAddressSocketType(config.address()); buildListenSocketOptions(socket_type); diff --git a/source/server/listener_impl.h b/source/server/listener_impl.h index d04613f030692..1fa60494715f3 100644 --- a/source/server/listener_impl.h +++ b/source/server/listener_impl.h @@ -17,14 +17,11 @@ #include "source/common/common/logger.h" #include "source/common/init/manager_impl.h" #include "source/common/init/target_impl.h" +#include "source/common/quic/quic_stat_names.h" #include "source/server/filter_chain_manager_impl.h" #include "absl/base/call_once.h" -#ifdef ENVOY_ENABLE_QUIC -#include "source/common/quic/quic_stat_names.h" -#endif - namespace Envoy { namespace Server { @@ -133,6 +130,10 @@ class ListenerFactoryContextBaseImpl final : public Configuration::FactoryContex bool drainClose() const override { return drain_manager_->drainClose() || server_.drainManager().drainClose(); } + Common::CallbackHandlePtr addOnDrainCloseCb(DrainCloseCb) const override { + NOT_REACHED_GCOVR_EXCL_LINE; + return nullptr; + } Server::DrainManager& drainManager(); private: @@ -427,9 +428,7 @@ class ListenerImpl final : public Network::ListenerConfig, // callback during the destroy of ListenerImpl. Init::WatcherImpl local_init_watcher_; -#ifdef ENVOY_ENABLE_QUIC Quic::QuicStatNames& quic_stat_names_; -#endif // to access ListenerManagerImpl::factory_. friend class ListenerFilterChainFactoryBuilder; diff --git a/source/server/listener_manager_impl.cc b/source/server/listener_manager_impl.cc index ca25b2ad23b18..b5de079b49fcc 100644 --- a/source/server/listener_manager_impl.cc +++ b/source/server/listener_manager_impl.cc @@ -240,7 +240,7 @@ Network::SocketSharedPtr ProdListenerComponentFactory::createListenSocket( DrainManagerPtr ProdListenerComponentFactory::createDrainManager( envoy::config::listener::v3::Listener::DrainType drain_type) { - return DrainManagerPtr{new DrainManagerImpl(server_, drain_type)}; + return DrainManagerPtr{new DrainManagerImpl(server_, drain_type, server_.dispatcher())}; } DrainingFilterChainsManager::DrainingFilterChainsManager(ListenerImplPtr&& draining_listener, @@ -255,15 +255,20 @@ ListenerManagerImpl::ListenerManagerImpl(Instance& server, : server_(server), factory_(listener_factory), scope_(server.stats().createScope("listener_manager.")), stats_(generateStats(*scope_)), config_tracker_entry_(server.admin().getConfigTracker().add( - "listeners", [this] { return dumpListenerConfigs(); })), - enable_dispatcher_stats_(enable_dispatcher_stats) { + "listeners", + [this](const Matchers::StringMatcher& name_matcher) { + return dumpListenerConfigs(name_matcher); + })), + enable_dispatcher_stats_(enable_dispatcher_stats), + quic_stat_names_(server_.stats().symbolTable()) { for (uint32_t i = 0; i < server.options().concurrency(); i++) { workers_.emplace_back( worker_factory.createWorker(i, server.overloadManager(), absl::StrCat("worker_", i))); } } -ProtobufTypes::MessagePtr ListenerManagerImpl::dumpListenerConfigs() { +ProtobufTypes::MessagePtr +ListenerManagerImpl::dumpListenerConfigs(const Matchers::StringMatcher& name_matcher) { auto config_dump = std::make_unique(); config_dump->set_version_info(lds_api_ != nullptr ? lds_api_->versionInfo() : ""); @@ -272,6 +277,9 @@ ProtobufTypes::MessagePtr ListenerManagerImpl::dumpListenerConfigs() { absl::flat_hash_map listener_map; for (const auto& listener : active_listeners_) { + if (!name_matcher.match(listener->config().name())) { + continue; + } if (listener->blockRemove()) { auto& static_listener = *config_dump->mutable_static_listeners()->Add(); static_listener.mutable_listener()->PackFrom(API_RECOVER_ORIGINAL(listener->config())); @@ -296,6 +304,9 @@ ProtobufTypes::MessagePtr ListenerManagerImpl::dumpListenerConfigs() { } for (const auto& listener : warming_listeners_) { + if (!name_matcher.match(listener->config().name())) { + continue; + } DynamicListener* dynamic_listener = getOrCreateDynamicListener(listener->name(), *config_dump, listener_map); DynamicListenerState* dump_listener = dynamic_listener->mutable_warming_state(); @@ -303,6 +314,9 @@ ProtobufTypes::MessagePtr ListenerManagerImpl::dumpListenerConfigs() { } for (const auto& draining_listener : draining_listeners_) { + if (!name_matcher.match(draining_listener.listener_->config().name())) { + continue; + } const auto& listener = draining_listener.listener_; DynamicListener* dynamic_listener = getOrCreateDynamicListener(listener->name(), *config_dump, listener_map); diff --git a/source/server/listener_manager_impl.h b/source/server/listener_manager_impl.h index 8b1ee3f1d8b65..eefde4c3af893 100644 --- a/source/server/listener_manager_impl.h +++ b/source/server/listener_manager_impl.h @@ -18,15 +18,12 @@ #include "envoy/server/worker.h" #include "envoy/stats/scope.h" +#include "source/common/quic/quic_stat_names.h" #include "source/server/filter_chain_factory_context_callback.h" #include "source/server/filter_chain_manager_impl.h" #include "source/server/lds_api.h" #include "source/server/listener_impl.h" -#ifdef ENVOY_ENABLE_QUIC -#include "source/common/quic/quic_stat_names.h" -#endif - namespace Envoy { namespace Server { @@ -206,9 +203,7 @@ class ListenerManagerImpl : public ListenerManager, Logger::Loggable overridden_listener, ListenerImpl& listener, ListenerCompletionCallback completion_callback); - ProtobufTypes::MessagePtr dumpListenerConfigs(); + ProtobufTypes::MessagePtr dumpListenerConfigs(const Matchers::StringMatcher& name_matcher); static ListenerManagerStats generateStats(Stats::Scope& scope); static bool hasListenerWithAddress(const ListenerList& list, const Network::Address::Instance& address); @@ -324,9 +319,7 @@ class ListenerManagerImpl : public ListenerManager, Logger::Loggable> error_state_tracker_; FailureStates overall_error_state_; -#ifdef ENVOY_ENABLE_QUIC - Quic::QuicStatNames quic_stat_names_ = Quic::QuicStatNames(server_.stats().symbolTable()); -#endif + Quic::QuicStatNames quic_stat_names_; }; class ListenerFilterChainFactoryBuilder : public FilterChainFactoryBuilder { diff --git a/source/server/server.cc b/source/server/server.cc index 5b8520e2fffc4..9dc4000dac77b 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -530,8 +530,8 @@ void InstanceImpl::initialize(const Options& options, } else { ENVOY_LOG(warn, "No admin address given, so no admin HTTP server started."); } - config_tracker_entry_ = - admin_->getConfigTracker().add("bootstrap", [this] { return dumpBootstrapConfig(); }); + config_tracker_entry_ = admin_->getConfigTracker().add( + "bootstrap", [this](const Matchers::StringMatcher&) { return dumpBootstrapConfig(); }); if (initial_config.admin().address()) { admin_->addListenerToHandler(handler_.get()); } diff --git a/test/common/access_log/access_log_impl_test.cc b/test/common/access_log/access_log_impl_test.cc index a6461165fde22..427392a2df98b 100644 --- a/test/common/access_log/access_log_impl_test.cc +++ b/test/common/access_log/access_log_impl_test.cc @@ -989,6 +989,7 @@ name: accesslog - DT - UPE - NC + - OM typed_config: "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog path: /dev/null diff --git a/test/common/common/BUILD b/test/common/common/BUILD index 0e573f2be0ec8..a4705d1bc45f3 100644 --- a/test/common/common/BUILD +++ b/test/common/common/BUILD @@ -318,7 +318,10 @@ envoy_cc_test( envoy_cc_test( name = "callback_impl_test", srcs = ["callback_impl_test.cc"], - deps = ["//source/common/common:callback_impl_lib"], + deps = [ + "//source/common/common:callback_impl_lib", + "//test/mocks/event:event_mocks", + ], ) envoy_cc_benchmark_binary( diff --git a/test/common/common/base64_test.cc b/test/common/common/base64_test.cc index 3c6bf92a05740..e00ae7f998271 100644 --- a/test/common/common/base64_test.cc +++ b/test/common/common/base64_test.cc @@ -132,6 +132,47 @@ TEST(Base64Test, BinaryBufferEncode) { EXPECT_EQ("AAECAwgKCQCqvN4=", Base64::encode(buffer, 30)); } +TEST(Base64Test, CompletePadding) { + struct CompletePaddingBase64UrlTestCases { + std::string base64, base64_with_padding; + }; + + // For base64 encoding, there are only three length needed to test + // - 3n bytes => 4n bytes, no padding needed + // - 3n + 1 bytes => 4n + 2 bytes, 2 padding needed + // - 3n + 2 bytes => 4n + 3 bytes, 1 padding needed + CompletePaddingBase64UrlTestCases testCases[3] = { + // Payload text(3n bytes): + {"eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG8iLCJpYXQiOjE1MTYyMzkwMjJ" + "9", + // No padding added. + "eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG8iLCJpYXQiOjE1MTYyMzkwMjJ" + "9"}, + // Payload text(3n + 1 bytes): + {"eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2" + "MjM5MDIyfQ", + // 2 padding added. + "eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2" + "MjM5MDIyfQ=="}, + // Payload text(3n + 2 bytes): + {"eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lZSIsImlhdCI6MTUx" + "NjIzOTAyMn0", + // 1 padding added. + "eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lZSIsImlhdCI6MTUx" + "NjIzOTAyMn0="}}; + for (auto& tc : testCases) { + // Ensure these two base64 binaries are equivalent after decoding. + EXPECT_EQ(Base64::decodeWithoutPadding(tc.base64), + Base64::decodeWithoutPadding(tc.base64_with_padding)); + // Ensure the `base64_with_padding` is correctly padded. + EXPECT_NE(Base64::decode(tc.base64_with_padding), ""); + + std::string base64_padded = tc.base64; + Base64::completePadding(base64_padded); + EXPECT_EQ(base64_padded, tc.base64_with_padding); + } +} + TEST(Base64UrlTest, EncodeString) { EXPECT_EQ("", Base64Url::encode("", 0)); EXPECT_EQ("AAA", Base64Url::encode("\0\0", 2)); diff --git a/test/common/common/callback_impl_test.cc b/test/common/common/callback_impl_test.cc index 6261766e2a2e5..10b1fe7e9cf88 100644 --- a/test/common/common/callback_impl_test.cc +++ b/test/common/common/callback_impl_test.cc @@ -1,5 +1,10 @@ +#include +#include + #include "source/common/common/callback_impl.h" +#include "test/mocks/event/mocks.h" + #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -53,5 +58,86 @@ TEST_F(CallbackManagerTest, DestroyManagerBeforeHandle) { handle.reset(); } +class ThreadSafeCallbackManagerTest : public testing::Test { +public: + MOCK_METHOD(void, called, (int arg)); +}; + +// Test basic behaviors of the thread-safe callback-manager with respect to callback registration, +// de-registration, and execution. +TEST_F(ThreadSafeCallbackManagerTest, All) { + InSequence s; + + testing::NiceMock cb_dispatcher; + ON_CALL(cb_dispatcher, post(_)).WillByDefault(Invoke([](std::function cb) { cb(); })); + + auto manager = ThreadSafeCallbackManager::create(); + + auto handle1 = manager->add(cb_dispatcher, [this]() -> void { called(5); }); + auto handle2 = manager->add(cb_dispatcher, [this]() -> void { called(10); }); + + EXPECT_CALL(*this, called(5)); + EXPECT_CALL(*this, called(10)); + manager->runCallbacks(); + + handle1.reset(); + EXPECT_CALL(*this, called(10)); + manager->runCallbacks(); + + EXPECT_CALL(*this, called(10)); + EXPECT_CALL(*this, called(20)); + auto handle3 = manager->add(cb_dispatcher, [this]() -> void { called(20); }); + manager->runCallbacks(); + handle3.reset(); + + EXPECT_CALL(*this, called(10)); + manager->runCallbacks(); +} + +// Validate that the handles returned from callback-registration can outlive the manager +// and can be destructed without error. +TEST_F(ThreadSafeCallbackManagerTest, DestroyManagerBeforeHandle) { + testing::NiceMock cb_dispatcher; + ON_CALL(cb_dispatcher, post(_)).WillByDefault(Invoke([](std::function cb) { cb(); })); + + CallbackHandlePtr handle; + { + auto manager = ThreadSafeCallbackManager::create(); + handle = manager->add(cb_dispatcher, [this]() -> void { called(5); }); + EXPECT_CALL(*this, called(5)); + manager->runCallbacks(); + } + EXPECT_NE(nullptr, handle); + // It should be safe to destruct the handle after the manager. + handle.reset(); +} + +// Validate that a callback added and removed from a thread (and thus dispatcher) that +// no longer exist is a safe operation. +TEST_F(ThreadSafeCallbackManagerTest, RegisterAndRemoveOnExpiredThread) { + auto manager = ThreadSafeCallbackManager::create(); + + testing::NiceMock cb_dispatcher; + ON_CALL(cb_dispatcher, post(_)).WillByDefault(Invoke([](std::function cb) { cb(); })); + + // Register a callback in a new thread and then remove it + auto t = std::thread([this, manager = manager.get()] { + testing::NiceMock cb_dispatcher; + ON_CALL(cb_dispatcher, post(_)).WillByDefault(Invoke([](std::function cb) { cb(); })); + + auto handle = manager->add(cb_dispatcher, [this]() { called(20); }); + handle.reset(); + }); + + // Add another callback on the main thread + auto handle = manager->add(cb_dispatcher, [this]() { called(10); }); + + // Validate that we can wait for the above thread to terminate (and de-register the + // callback), then run the remaining callbacks. + t.join(); + EXPECT_CALL(*this, called(10)); + manager->runCallbacks(); +} + } // namespace Common } // namespace Envoy diff --git a/test/common/common/regex_test.cc b/test/common/common/regex_test.cc index 5d33d23bafbdf..24cd3efc555d5 100644 --- a/test/common/common/regex_test.cc +++ b/test/common/common/regex_test.cc @@ -17,9 +17,6 @@ TEST(Utility, ParseStdRegex) { EXPECT_THROW_WITH_REGEX(Utility::parseStdRegex("(+invalid)"), EnvoyException, "Invalid regex '\\(\\+invalid\\)': .+"); - EXPECT_THROW_WITH_REGEX(Utility::parseStdRegexAsCompiledMatcher("(+invalid)"), EnvoyException, - "Invalid regex '\\(\\+invalid\\)': .+"); - { std::regex regex = Utility::parseStdRegex("x*"); EXPECT_NE(0, regex.flags() & std::regex::optimize); @@ -30,15 +27,6 @@ TEST(Utility, ParseStdRegex) { EXPECT_NE(0, regex.flags() & std::regex::icase); EXPECT_EQ(0, regex.flags() & std::regex::optimize); } - - { - // Regression test to cover high-complexity regular expressions that throw on std::regex_match. - // Note that not all std::regex_match implementations will throw when matching against the - // expression below, but at least clang 9.0.0 under linux does. - auto matcher = Utility::parseStdRegexAsCompiledMatcher( - "|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||"); - EXPECT_FALSE(matcher->match("0")); - } } TEST(Utility, ParseRegex) { diff --git a/test/common/common/stl_helpers_test.cc b/test/common/common/stl_helpers_test.cc index 8f4b297c74055..4123ec9a0548a 100644 --- a/test/common/common/stl_helpers_test.cc +++ b/test/common/common/stl_helpers_test.cc @@ -22,4 +22,12 @@ TEST(StlHelpersTest, AccumulateToString) { [](const int& i) { return std::to_string(i); })); } +TEST(StlHelpersTest, ContainsReferenceTest) { + std::string str1{"1"}; + std::vector> numbers{str1}; + EXPECT_TRUE(containsReference(numbers, str1)); + std::string str2{"2"}; + EXPECT_FALSE(containsReference(numbers, str2)); +} + } // namespace Envoy diff --git a/test/common/config/config_provider_impl_test.cc b/test/common/config/config_provider_impl_test.cc index 6b3d6d36cc0f7..f269aaa77939d 100644 --- a/test/common/config/config_provider_impl_test.cc +++ b/test/common/config/config_provider_impl_test.cc @@ -140,8 +140,12 @@ class DummyConfigProviderManager : public ConfigProviderManagerImplBase { ~DummyConfigProviderManager() override = default; + ProtobufTypes::MessagePtr dumpConfigs() const { + return dumpConfigs(Matchers::UniversalStringMatcher()); + } + // Envoy::Config::ConfigProviderManagerImplBase - ProtobufTypes::MessagePtr dumpConfigs() const override { + ProtobufTypes::MessagePtr dumpConfigs(const Matchers::StringMatcher&) const override { auto config_dump = std::make_unique(); for (const auto& element : configSubscriptions()) { auto subscription = element.second.lock(); @@ -411,7 +415,8 @@ TEST_F(ConfigProviderImplTest, ConfigDump) { initialize(); // Empty dump first. auto message_ptr = - server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["dummy"](); + server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["dummy"]( + Matchers::UniversalStringMatcher()); const auto& dummy_config_dump = static_cast(*message_ptr); @@ -430,7 +435,8 @@ TEST_F(ConfigProviderImplTest, ConfigDump) { ConfigProviderPtr static_config = provider_manager_->createStaticConfigProvider( parseDummyConfigFromYaml(config_yaml), server_factory_context_, ConfigProviderManager::NullOptionalArg()); - message_ptr = server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["dummy"](); + message_ptr = server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["dummy"]( + Matchers::UniversalStringMatcher()); const auto& dummy_config_dump2 = static_cast(*message_ptr); TestUtility::loadFromYaml(R"EOF( @@ -457,7 +463,8 @@ TEST_F(ConfigProviderImplTest, ConfigDump) { const auto decoded_resources = TestUtility::decodeResources({dummy_config}, "a"); subscription.onConfigUpdate(decoded_resources.refvec_, "v1"); - message_ptr = server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["dummy"](); + message_ptr = server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["dummy"]( + Matchers::UniversalStringMatcher()); const auto& dummy_config_dump3 = static_cast(*message_ptr); TestUtility::loadFromYaml(R"EOF( @@ -475,7 +482,8 @@ TEST_F(ConfigProviderImplTest, ConfigDump) { ConfigProviderPtr static_config2 = provider_manager_->createStaticConfigProvider( parseDummyConfigFromYaml("a: another static dummy config"), server_factory_context_, ConfigProviderManager::NullOptionalArg()); - message_ptr = server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["dummy"](); + message_ptr = server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["dummy"]( + Matchers::UniversalStringMatcher()); const auto& dummy_config_dump4 = static_cast(*message_ptr); TestUtility::loadFromYaml(R"EOF( @@ -602,8 +610,12 @@ class DeltaDummyConfigProviderManager : public ConfigProviderManagerImplBase { DeltaDummyConfigProviderManager(Server::Admin& admin) : ConfigProviderManagerImplBase(admin, "dummy") {} + ProtobufTypes::MessagePtr dumpConfigs() const { + return dumpConfigs(Matchers::UniversalStringMatcher()); + } + // Envoy::Config::ConfigProviderManagerImplBase - ProtobufTypes::MessagePtr dumpConfigs() const override { + ProtobufTypes::MessagePtr dumpConfigs(const Matchers::StringMatcher&) const override { auto config_dump = std::make_unique(); for (const auto& element : configSubscriptions()) { auto subscription = element.second.lock(); diff --git a/test/common/config/subscription_factory_impl_test.cc b/test/common/config/subscription_factory_impl_test.cc index f41376504c025..c99d7558e9ee5 100644 --- a/test/common/config/subscription_factory_impl_test.cc +++ b/test/common/config/subscription_factory_impl_test.cc @@ -227,20 +227,6 @@ TEST_F(SubscriptionFactoryTest, FilesystemCollectionSubscriptionNonExistentFile) "'/blahblah' does not exist"); } -TEST_F(SubscriptionFactoryTest, LegacySubscription) { - envoy::config::core::v3::ConfigSource config; - auto* api_config_source = config.mutable_api_config_source(); - api_config_source->set_api_type( - envoy::config::core::v3::ApiConfigSource::hidden_envoy_deprecated_UNSUPPORTED_REST_LEGACY); - api_config_source->set_transport_api_version(envoy::config::core::v3::V3); - api_config_source->add_cluster_names("static_cluster"); - Upstream::ClusterManager::ClusterSet primary_clusters; - primary_clusters.insert("static_cluster"); - EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters)); - EXPECT_THROW_WITH_REGEX(subscriptionFromConfigSource(config)->start({"static_cluster"}), - EnvoyException, "REST_LEGACY no longer a supported ApiConfigSource.*"); -} - TEST_F(SubscriptionFactoryTest, HttpSubscriptionCustomRequestTimeout) { envoy::config::core::v3::ConfigSource config; auto* api_config_source = config.mutable_api_config_source(); diff --git a/test/common/config/utility_test.cc b/test/common/config/utility_test.cc index cd2ff60b96f36..adc3c22f24e0a 100644 --- a/test/common/config/utility_test.cc +++ b/test/common/config/utility_test.cc @@ -71,12 +71,10 @@ TEST(UtilityTest, ConfigSourceInitFetchTimeout) { TEST(UtilityTest, TranslateApiConfigSource) { envoy::config::core::v3::ApiConfigSource api_config_source_rest_legacy; - Utility::translateApiConfigSource("test_rest_legacy_cluster", 10000, - ApiType::get().UnsupportedRestLegacy, + Utility::translateApiConfigSource("test_rest_legacy_cluster", 10000, ApiType::get().Rest, api_config_source_rest_legacy); - EXPECT_EQ( - envoy::config::core::v3::ApiConfigSource::hidden_envoy_deprecated_UNSUPPORTED_REST_LEGACY, - api_config_source_rest_legacy.api_type()); + EXPECT_EQ(envoy::config::core::v3::ApiConfigSource::REST, + api_config_source_rest_legacy.api_type()); EXPECT_EQ(10000, DurationUtil::durationToMilliseconds(api_config_source_rest_legacy.refresh_delay())); EXPECT_EQ("test_rest_legacy_cluster", api_config_source_rest_legacy.cluster_names(0)); diff --git a/test/common/formatter/substitution_formatter_test.cc b/test/common/formatter/substitution_formatter_test.cc index ed1d38fbbea91..72fdcafdfef38 100644 --- a/test/common/formatter/substitution_formatter_test.cc +++ b/test/common/formatter/substitution_formatter_test.cc @@ -674,8 +674,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { { StreamInfoFormatter upstream_format("REQUESTED_SERVER_NAME"); std::string requested_server_name = "stub_server"; - EXPECT_CALL(stream_info, requestedServerName()) - .WillRepeatedly(ReturnRef(requested_server_name)); + stream_info.downstream_address_provider_->setRequestedServerName(requested_server_name); EXPECT_EQ("stub_server", upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, @@ -686,8 +685,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { { StreamInfoFormatter upstream_format("REQUESTED_SERVER_NAME"); std::string requested_server_name; - EXPECT_CALL(stream_info, requestedServerName()) - .WillRepeatedly(ReturnRef(requested_server_name)); + stream_info.downstream_address_provider_->setRequestedServerName(requested_server_name); EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index 08f56b0a338fb..164f7d46f947d 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -293,11 +293,7 @@ TEST_F(HttpConnectionManagerImplTest, 100ContinueResponseWithDecoderPause) { // When create new stream, the stream info will be populated from the connection. TEST_F(HttpConnectionManagerImplTest, PopulateStreamInfo) { setup(true, "", false); - - absl::string_view server_name = "fake-server"; EXPECT_CALL(filter_callbacks_.connection_, id()).WillRepeatedly(Return(1234)); - EXPECT_CALL(filter_callbacks_.connection_, requestedServerName()) - .WillRepeatedly(Return(server_name)); // Set up the codec. Buffer::OwnedImpl fake_input("input"); @@ -308,7 +304,7 @@ TEST_F(HttpConnectionManagerImplTest, PopulateStreamInfo) { EXPECT_EQ(requestIDExtension().get(), decoder_->streamInfo().getRequestIDProvider()); EXPECT_EQ(ssl_connection_, decoder_->streamInfo().downstreamSslConnection()); EXPECT_EQ(1234U, decoder_->streamInfo().connectionID()); - EXPECT_EQ(server_name, decoder_->streamInfo().requestedServerName()); + EXPECT_EQ(server_name_, decoder_->streamInfo().downstreamAddressProvider().requestedServerName()); // Clean up. filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); diff --git a/test/common/http/conn_manager_impl_test_base.cc b/test/common/http/conn_manager_impl_test_base.cc index 5cce6546afe8a..9f7b81bb97ae1 100644 --- a/test/common/http/conn_manager_impl_test_base.cc +++ b/test/common/http/conn_manager_impl_test_base.cc @@ -70,6 +70,8 @@ void HttpConnectionManagerImplTest::setup(bool ssl, const std::string& server_na std::make_shared("0.0.0.0")); filter_callbacks_.connection_.stream_info_.downstream_address_provider_ ->setDirectRemoteAddressForTest(std::make_shared("0.0.0.0")); + filter_callbacks_.connection_.stream_info_.downstream_address_provider_->setRequestedServerName( + server_name_); conn_manager_ = std::make_unique( *this, drain_close_, random_, http_context_, runtime_, local_info_, cluster_manager_, overload_manager_, test_time_.timeSystem()); diff --git a/test/common/http/conn_pool_grid_test.cc b/test/common/http/conn_pool_grid_test.cc index 87bd042763801..945af22b86ab7 100644 --- a/test/common/http/conn_pool_grid_test.cc +++ b/test/common/http/conn_pool_grid_test.cc @@ -127,7 +127,7 @@ class ConnectivityGridTestBase : public Event::TestUsingSimulatedTime, public te } const Network::ConnectionSocket::OptionsSharedPtr socket_options_; - const Network::TransportSocketOptionsSharedPtr transport_socket_options_; + const Network::TransportSocketOptionsConstSharedPtr transport_socket_options_; ConnectivityGrid::ConnectivityOptions options_; Upstream::ClusterConnectivityState state_; NiceMock dispatcher_; diff --git a/test/common/http/header_map_impl_test.cc b/test/common/http/header_map_impl_test.cc index 36ca86676e454..ab770d1b48b1a 100644 --- a/test/common/http/header_map_impl_test.cc +++ b/test/common/http/header_map_impl_test.cc @@ -1370,5 +1370,29 @@ TEST_P(HeaderMapImplTest, ValidHeaderString) { EXPECT_FALSE(validHeaderString("abc\n")); } +TEST_P(HeaderMapImplTest, HttpTraceContextTest) { + { + TestRequestHeaderMapImpl request_headers{{"host", "foo"}}; + EXPECT_EQ(request_headers.getTraceContext("host").value(), "foo"); + + request_headers.setTraceContext("trace_key", "trace_value"); + EXPECT_EQ(request_headers.getTraceContext("trace_key").value(), "trace_value"); + + std::string trace_ref_key = "trace_ref_key"; + request_headers.setTraceContextReferenceKey(trace_ref_key, "trace_value"); + auto* header_entry = request_headers.get(Http::LowerCaseString(trace_ref_key))[0]; + EXPECT_EQ(reinterpret_cast(trace_ref_key.data()), + reinterpret_cast(header_entry->key().getStringView().data())); + + std::string trace_ref_value = "trace_ref_key"; + request_headers.setTraceContextReference(trace_ref_key, trace_ref_value); + header_entry = request_headers.get(Http::LowerCaseString(trace_ref_key))[0]; + EXPECT_EQ(reinterpret_cast(trace_ref_key.data()), + reinterpret_cast(header_entry->key().getStringView().data())); + EXPECT_EQ(reinterpret_cast(trace_ref_value.data()), + reinterpret_cast(header_entry->value().getStringView().data())); + } +} + } // namespace Http } // namespace Envoy diff --git a/test/common/http/http1/conn_pool_test.cc b/test/common/http/http1/conn_pool_test.cc index 809ceb21e5145..c0493c7307b47 100644 --- a/test/common/http/http1/conn_pool_test.cc +++ b/test/common/http/http1/conn_pool_test.cc @@ -300,7 +300,7 @@ TEST_F(Http1ConnPoolImplTest, VerifyAlpnFallback) { auto factory = std::make_unique(); EXPECT_CALL(*factory, createTransportSocket(_)) .WillOnce(Invoke( - [](Network::TransportSocketOptionsSharedPtr options) -> Network::TransportSocketPtr { + [](Network::TransportSocketOptionsConstSharedPtr options) -> Network::TransportSocketPtr { EXPECT_TRUE(options != nullptr); EXPECT_EQ(options->applicationProtocolFallback()[0], Http::Utility::AlpnNames::get().Http11); diff --git a/test/common/http/http2/codec_impl_test.cc b/test/common/http/http2/codec_impl_test.cc index 6db02497e4289..6441c7df73300 100644 --- a/test/common/http/http2/codec_impl_test.cc +++ b/test/common/http/http2/codec_impl_test.cc @@ -1076,14 +1076,14 @@ TEST_P(Http2CodecImplTest, ShouldDumpActiveStreamsWithoutAllocatingMemory) { Stats::TestUtil::MemoryTest memory_test; server_->dumpState(ostream, 1); EXPECT_EQ(memory_test.consumedBytes(), 0); - - // Check contents for active stream, trailers to encode and header map. + // Check contents for active stream, local_end_stream_, trailers to encode and header map. EXPECT_THAT( ostream.contents(), HasSubstr( "Number of active streams: 1, current_stream_id_: null Dumping 1 Active Streams:\n" " stream: \n" " ConnectionImpl::StreamImpl")); + EXPECT_THAT(ostream.contents(), HasSubstr("local_end_stream_: 1")); EXPECT_THAT(ostream.contents(), HasSubstr("pending_trailers_to_encode_: null\n" " absl::get(headers_or_trailers_): \n" @@ -1103,13 +1103,14 @@ TEST_P(Http2CodecImplTest, ShouldDumpActiveStreamsWithoutAllocatingMemory) { client_->dumpState(ostream, 1); EXPECT_EQ(memory_test.consumedBytes(), 0); - // Check contents for active stream, trailers to encode and header map. + // Check contents for active stream, local_end_stream_, trailers to encode and header map. EXPECT_THAT( ostream.contents(), HasSubstr( "Number of active streams: 1, current_stream_id_: null Dumping 1 Active Streams:\n" " stream: \n" " ConnectionImpl::StreamImpl")); + EXPECT_THAT(ostream.contents(), HasSubstr("local_end_stream_: 0")); EXPECT_THAT(ostream.contents(), HasSubstr("pending_trailers_to_encode_: null\n" " absl::get(headers_or_trailers_): \n" diff --git a/test/common/http/http2/conn_pool_test.cc b/test/common/http/http2/conn_pool_test.cc index e83eaabf94861..e3784cbaaf718 100644 --- a/test/common/http/http2/conn_pool_test.cc +++ b/test/common/http/http2/conn_pool_test.cc @@ -41,7 +41,7 @@ class TestConnPoolImpl : public FixedHttpConnPoolImpl { TestConnPoolImpl(Event::Dispatcher& dispatcher, Random::RandomGenerator& random_generator, Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options, + const Network::TransportSocketOptionsConstSharedPtr& transport_socket_options, Envoy::Upstream::ClusterConnectivityState& state) : FixedHttpConnPoolImpl( std::move(host), std::move(priority), dispatcher, options, transport_socket_options, @@ -347,7 +347,7 @@ TEST_F(Http2ConnPoolImplTest, VerifyAlpnFallback) { createTestClients(1); EXPECT_CALL(*factory_ptr, createTransportSocket(_)) .WillOnce(Invoke( - [](Network::TransportSocketOptionsSharedPtr options) -> Network::TransportSocketPtr { + [](Network::TransportSocketOptionsConstSharedPtr options) -> Network::TransportSocketPtr { EXPECT_TRUE(options != nullptr); EXPECT_EQ(options->applicationProtocolFallback()[0], Http::Utility::AlpnNames::get().Http2); diff --git a/test/common/http/http3/conn_pool_test.cc b/test/common/http/http3/conn_pool_test.cc index 547aa28be636f..fdb472e8c7665 100644 --- a/test/common/http/http3/conn_pool_test.cc +++ b/test/common/http/http3/conn_pool_test.cc @@ -44,7 +44,7 @@ class Http3ConnPoolImplTest : public Event::TestUsingSimulatedTime, public testi EXPECT_CALL(mockHost(), transportSocketFactory()).WillRepeatedly(testing::ReturnRef(factory_)); new Event::MockSchedulableCallback(&dispatcher_); Network::ConnectionSocket::OptionsSharedPtr options; - Network::TransportSocketOptionsSharedPtr transport_options; + Network::TransportSocketOptionsConstSharedPtr transport_options; pool_ = allocateConnPool(dispatcher_, random_, host_, Upstream::ResourcePriority::Default, options, transport_options, state_, simTime()); } diff --git a/test/common/http/match_wrapper/config_test.cc b/test/common/http/match_wrapper/config_test.cc index db2681a143995..f89f1a1f458a6 100644 --- a/test/common/http/match_wrapper/config_test.cc +++ b/test/common/http/match_wrapper/config_test.cc @@ -164,7 +164,7 @@ TEST(MatchWrapper, WithMatcherInvalidDataInput) { )EOF"); MatchWrapperConfig match_wrapper_config; - EXPECT_THROW_WITH_MESSAGE( + EXPECT_THROW_WITH_REGEX( match_wrapper_config.createFilterFactoryFromProto(config, "", factory_context), EnvoyException, "requirement violation while creating match tree: INVALID_ARGUMENT: data input typeUrl " diff --git a/test/common/http/utility_test.cc b/test/common/http/utility_test.cc index 32149b467c395..18fe7de3c6ca5 100644 --- a/test/common/http/utility_test.cc +++ b/test/common/http/utility_test.cc @@ -870,6 +870,8 @@ TEST(HttpUtility, ResetReasonToString) { Utility::resetReasonToString(Http::StreamResetReason::RemoteRefusedStreamReset)); EXPECT_EQ("remote error with CONNECT request", Utility::resetReasonToString(Http::StreamResetReason::ConnectError)); + EXPECT_EQ("overload manager reset", + Utility::resetReasonToString(Http::StreamResetReason::OverloadManager)); } class TestConfig : public Router::RouteSpecificFilterConfig { diff --git a/test/common/network/address_impl_test.cc b/test/common/network/address_impl_test.cc index 86af3022cc583..3eb42caf7fd27 100644 --- a/test/common/network/address_impl_test.cc +++ b/test/common/network/address_impl_test.cc @@ -466,15 +466,15 @@ TEST(AddressFromSockAddrDeathTest, IPv4) { EXPECT_EQ(1, inet_pton(AF_INET, "1.2.3.4", &sin.sin_addr)); sin.sin_port = htons(6502); - EXPECT_DEATH(addressFromSockAddr(ss, 1), "ss_len"); - EXPECT_DEATH(addressFromSockAddr(ss, sizeof(sockaddr_in) - 1), "ss_len"); - EXPECT_DEATH(addressFromSockAddr(ss, sizeof(sockaddr_in) + 1), "ss_len"); + EXPECT_DEATH(*addressFromSockAddr(ss, 1), "ss_len"); + EXPECT_DEATH(*addressFromSockAddr(ss, sizeof(sockaddr_in) - 1), "ss_len"); + EXPECT_DEATH(*addressFromSockAddr(ss, sizeof(sockaddr_in) + 1), "ss_len"); - EXPECT_EQ("1.2.3.4:6502", addressFromSockAddr(ss, sizeof(sockaddr_in))->asString()); + EXPECT_EQ("1.2.3.4:6502", (*addressFromSockAddr(ss, sizeof(sockaddr_in)))->asString()); // Invalid family. sin.sin_family = AF_UNSPEC; - EXPECT_THROW(addressFromSockAddr(ss, sizeof(sockaddr_in)), EnvoyException); + EXPECT_FALSE(addressFromSockAddr(ss, sizeof(sockaddr_in)).ok()); } TEST(AddressFromSockAddrDeathTest, IPv6) { @@ -485,20 +485,22 @@ TEST(AddressFromSockAddrDeathTest, IPv6) { EXPECT_EQ(1, inet_pton(AF_INET6, "01:023::00Ef", &sin6.sin6_addr)); sin6.sin6_port = htons(32000); - EXPECT_DEATH(addressFromSockAddr(ss, 1), "ss_len"); - EXPECT_DEATH(addressFromSockAddr(ss, sizeof(sockaddr_in6) - 1), "ss_len"); - EXPECT_DEATH(addressFromSockAddr(ss, sizeof(sockaddr_in6) + 1), "ss_len"); + EXPECT_DEATH(*addressFromSockAddr(ss, 1), "ss_len"); + EXPECT_DEATH(*addressFromSockAddr(ss, sizeof(sockaddr_in6) - 1), "ss_len"); + EXPECT_DEATH(*addressFromSockAddr(ss, sizeof(sockaddr_in6) + 1), "ss_len"); - EXPECT_EQ("[1:23::ef]:32000", addressFromSockAddr(ss, sizeof(sockaddr_in6))->asString()); + EXPECT_EQ("[1:23::ef]:32000", (*addressFromSockAddr(ss, sizeof(sockaddr_in6)))->asString()); // Test that IPv4-mapped IPv6 address is returned as an Ipv4Instance when 'v6only' parameter is // 'false', but not otherwise. EXPECT_EQ(1, inet_pton(AF_INET6, "::ffff:192.0.2.128", &sin6.sin6_addr)); - EXPECT_EQ(IpVersion::v4, addressFromSockAddr(ss, sizeof(sockaddr_in6), false)->ip()->version()); - EXPECT_EQ("192.0.2.128:32000", addressFromSockAddr(ss, sizeof(sockaddr_in6), false)->asString()); - EXPECT_EQ(IpVersion::v6, addressFromSockAddr(ss, sizeof(sockaddr_in6), true)->ip()->version()); + EXPECT_EQ(IpVersion::v4, + (*addressFromSockAddr(ss, sizeof(sockaddr_in6), false))->ip()->version()); + EXPECT_EQ("192.0.2.128:32000", + (*addressFromSockAddr(ss, sizeof(sockaddr_in6), false))->asString()); + EXPECT_EQ(IpVersion::v6, (*addressFromSockAddr(ss, sizeof(sockaddr_in6), true))->ip()->version()); EXPECT_EQ("[::ffff:192.0.2.128]:32000", - addressFromSockAddr(ss, sizeof(sockaddr_in6), true)->asString()); + (*addressFromSockAddr(ss, sizeof(sockaddr_in6), true))->asString()); } TEST(AddressFromSockAddrDeathTest, Pipe) { @@ -508,20 +510,20 @@ TEST(AddressFromSockAddrDeathTest, Pipe) { StringUtil::strlcpy(sun.sun_path, "/some/path", sizeof sun.sun_path); - EXPECT_DEATH(addressFromSockAddr(ss, 1), "ss_len"); - EXPECT_DEATH(addressFromSockAddr(ss, offsetof(struct sockaddr_un, sun_path)), "ss_len"); + EXPECT_DEATH(*addressFromSockAddr(ss, 1), "ss_len"); + EXPECT_DEATH(*addressFromSockAddr(ss, offsetof(struct sockaddr_un, sun_path)), "ss_len"); socklen_t ss_len = offsetof(struct sockaddr_un, sun_path) + 1 + strlen(sun.sun_path); - EXPECT_EQ("/some/path", addressFromSockAddr(ss, ss_len)->asString()); + EXPECT_EQ("/some/path", (*addressFromSockAddr(ss, ss_len))->asString()); // Abstract socket namespace. StringUtil::strlcpy(&sun.sun_path[1], "/some/abstract/path", sizeof sun.sun_path); sun.sun_path[0] = '\0'; ss_len = offsetof(struct sockaddr_un, sun_path) + 1 + strlen("/some/abstract/path"); #if defined(__linux__) - EXPECT_EQ("@/some/abstract/path", addressFromSockAddr(ss, ss_len)->asString()); + EXPECT_EQ("@/some/abstract/path", (*addressFromSockAddr(ss, ss_len))->asString()); #else - EXPECT_THROW(addressFromSockAddr(ss, ss_len), EnvoyException); + EXPECT_FALSE(addressFromSockAddr(ss, ss_len).ok()); #endif } diff --git a/test/common/network/connection_impl_test.cc b/test/common/network/connection_impl_test.cc index 63f5a10898aef..25bf57e1f8921 100644 --- a/test/common/network/connection_impl_test.cc +++ b/test/common/network/connection_impl_test.cc @@ -200,7 +200,7 @@ class ConnectionImplTest : public testing::TestWithParam { dispatcher_ = api_->allocateDispatcher("test_thread", Buffer::WatermarkFactoryPtr{factory}); // The first call to create a client session will get a MockBuffer. // Other calls for server sessions will by default get a normal OwnedImpl. - EXPECT_CALL(*factory, create_(_, _, _)) + EXPECT_CALL(*factory, createBuffer_(_, _, _)) .Times(AnyNumber()) .WillOnce(Invoke([&](std::function below_low, std::function above_high, std::function above_overflow) -> Buffer::Instance* { @@ -224,11 +224,11 @@ class ConnectionImplTest : public testing::TestWithParam { ConnectionMocks createConnectionMocks(bool create_timer = true) { auto dispatcher = std::make_unique>(); - EXPECT_CALL(dispatcher->buffer_factory_, create_(_, _, _)) + EXPECT_CALL(dispatcher->buffer_factory_, createBuffer_(_, _, _)) .WillRepeatedly(Invoke([](std::function below_low, std::function above_high, std::function above_overflow) -> Buffer::Instance* { - // ConnectionImpl calls Envoy::MockBufferFactory::create(), which calls create_() and - // wraps the returned raw pointer below with a unique_ptr. + // ConnectionImpl calls Envoy::MockBufferFactory::create(), which calls createBuffer_() + // and wraps the returned raw pointer below with a unique_ptr. return new Buffer::WatermarkBuffer(below_low, above_high, above_overflow); })); @@ -1621,7 +1621,7 @@ TEST_P(ConnectionImplTest, FlushWriteAndDelayConfigDisabledTest) { NiceMock callbacks; NiceMock dispatcher; - EXPECT_CALL(dispatcher.buffer_factory_, create_(_, _, _)) + EXPECT_CALL(dispatcher.buffer_factory_, createBuffer_(_, _, _)) .WillRepeatedly(Invoke([](std::function below_low, std::function above_high, std::function above_overflow) -> Buffer::Instance* { return new Buffer::WatermarkBuffer(below_low, above_high, above_overflow); @@ -1913,19 +1913,19 @@ TEST_P(ConnectionImplTest, NetworkSocketDumpsWithoutAllocatingMemory) { // Check socket dump const auto contents = ostream.contents(); EXPECT_THAT(contents, HasSubstr("ListenSocketImpl")); - EXPECT_THAT(contents, HasSubstr("transport_protocol_: , server_name_: envoyproxy.io")); + EXPECT_THAT(contents, HasSubstr("transport_protocol_: ")); EXPECT_THAT(contents, HasSubstr("SocketAddressSetterImpl")); if (GetParam() == Network::Address::IpVersion::v4) { EXPECT_THAT( contents, HasSubstr( "remote_address_: 1.1.1.1:80, direct_remote_address_: 1.1.1.1:80, local_address_: " - "1.2.3.4:56789")); + "1.2.3.4:56789, server_name_: envoyproxy.io")); } else { EXPECT_THAT( contents, HasSubstr("remote_address_: [::1]:80, direct_remote_address_: [::1]:80, local_address_: " - "[::1:2:3:4]:56789")); + "[::1:2:3:4]:56789, server_name_: envoyproxy.io")); } } @@ -1984,7 +1984,7 @@ class MockTransportConnectionImplTest : public testing::Test { public: MockTransportConnectionImplTest() : stream_info_(dispatcher_.timeSource(), nullptr) { EXPECT_CALL(dispatcher_, isThreadSafe()).WillRepeatedly(Return(true)); - EXPECT_CALL(dispatcher_.buffer_factory_, create_(_, _, _)) + EXPECT_CALL(dispatcher_.buffer_factory_, createBuffer_(_, _, _)) .WillRepeatedly(Invoke([](std::function below_low, std::function above_high, std::function above_overflow) -> Buffer::Instance* { return new Buffer::WatermarkBuffer(below_low, above_high, above_overflow); diff --git a/test/common/quic/active_quic_listener_test.cc b/test/common/quic/active_quic_listener_test.cc index e495f07b78b63..1dbd90bb6009b 100644 --- a/test/common/quic/active_quic_listener_test.cc +++ b/test/common/quic/active_quic_listener_test.cc @@ -198,6 +198,9 @@ class ActiveQuicListenerTest : public QuicMultiVersionTest { read_filters_.push_back(std::move(read_filter)); // A Sequence must be used to allow multiple EXPECT_CALL().WillOnce() // calls for the same object. + EXPECT_CALL(*filter_chain_, transportSocketFactory()) + .InSequence(seq) + .WillOnce(ReturnRef(transport_socket_factory_)); EXPECT_CALL(*filter_chain_, networkFilterFactories()) .InSequence(seq) .WillOnce(ReturnRef(filter_factories_.back())); @@ -321,6 +324,7 @@ class ActiveQuicListenerTest : public QuicMultiVersionTest { // of elements are saved in expectations before new elements are added. std::list> filter_factories_; const Network::MockFilterChain* filter_chain_; + Network::MockTransportSocketFactory transport_socket_factory_; quic::ParsedQuicVersion quic_version_; uint32_t connection_window_size_{1024u}; uint32_t stream_window_size_{1024u}; diff --git a/test/common/quic/envoy_quic_client_session_test.cc b/test/common/quic/envoy_quic_client_session_test.cc index 3497518f88d8f..d377abd663a6c 100644 --- a/test/common/quic/envoy_quic_client_session_test.cc +++ b/test/common/quic/envoy_quic_client_session_test.cc @@ -298,5 +298,128 @@ TEST_P(EnvoyQuicClientSessionTest, ConnectionCloseWithActiveStream) { EXPECT_TRUE(stream.write_side_closed() && stream.reading_stopped()); } +class EnvoyQuicClientSessionAllQuicVersionTest + : public testing::TestWithParam { +public: + EnvoyQuicClientSessionAllQuicVersionTest() + : api_(Api::createApiForTest(time_system_)), + dispatcher_(api_->allocateDispatcher("test_thread")), connection_helper_(*dispatcher_), + alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), + peer_addr_(Network::Utility::getAddressWithPort(*Network::Utility::getIpv6LoopbackAddress(), + 12345)), + self_addr_(Network::Utility::getAddressWithPort(*Network::Utility::getIpv6LoopbackAddress(), + 54321)), + quic_connection_(new TestEnvoyQuicClientConnection( + quic::test::TestConnectionId(), connection_helper_, alarm_factory_, writer_, + quic::test::SupportedVersions(GetParam()), *dispatcher_, + createConnectionSocket(peer_addr_, self_addr_, nullptr))), + crypto_config_(std::make_shared( + quic::test::crypto_test_utils::ProofVerifierForTesting())), + envoy_quic_session_(quic_config_, quic::test::SupportedVersions(GetParam()), + std::unique_ptr(quic_connection_), + quic::QuicServerId("example.com", 443, false), crypto_config_, nullptr, + *dispatcher_, + /*send_buffer_limit*/ 1024 * 1024, crypto_stream_factory_), + stats_({ALL_HTTP3_CODEC_STATS(POOL_COUNTER_PREFIX(scope_, "http3."), + POOL_GAUGE_PREFIX(scope_, "http3."))}), + http_connection_(envoy_quic_session_, http_connection_callbacks_, stats_, http3_options_, + 64 * 1024, 100) { + EXPECT_EQ(time_system_.systemTime(), envoy_quic_session_.streamInfo().startTime()); + EXPECT_EQ(EMPTY_STRING, envoy_quic_session_.nextProtocol()); + EXPECT_EQ(Http::Protocol::Http3, http_connection_.protocol()); + + time_system_.advanceTimeWait(std::chrono::milliseconds(1)); + ON_CALL(writer_, WritePacket(_, _, _, _, _)) + .WillByDefault(testing::Return(quic::WriteResult(quic::WRITE_STATUS_OK, 1))); + } + + void SetUp() override { + envoy_quic_session_.Initialize(); + setQuicConfigWithDefaultValues(envoy_quic_session_.config()); + envoy_quic_session_.OnConfigNegotiated(); + envoy_quic_session_.addConnectionCallbacks(network_connection_callbacks_); + envoy_quic_session_.setConnectionStats( + {read_total_, read_current_, write_total_, write_current_, nullptr, nullptr}); + EXPECT_EQ(&read_total_, &quic_connection_->connectionStats().read_total_); + } + + void TearDown() override { + if (quic_connection_->connected()) { + EXPECT_CALL(*quic_connection_, + SendConnectionClosePacket(quic::QUIC_NO_ERROR, _, "Closed by application")); + EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose)); + envoy_quic_session_.close(Network::ConnectionCloseType::NoFlush); + } + } + +protected: + Event::SimulatedTimeSystemHelper time_system_; + Api::ApiPtr api_; + Event::DispatcherPtr dispatcher_; + EnvoyQuicConnectionHelper connection_helper_; + EnvoyQuicAlarmFactory alarm_factory_; + testing::NiceMock writer_; + Network::Address::InstanceConstSharedPtr peer_addr_; + Network::Address::InstanceConstSharedPtr self_addr_; + TestEnvoyQuicClientConnection* quic_connection_; + quic::QuicConfig quic_config_; + std::shared_ptr crypto_config_; + TestQuicCryptoClientStreamFactory crypto_stream_factory_; + EnvoyQuicClientSession envoy_quic_session_; + Network::MockConnectionCallbacks network_connection_callbacks_; + Http::MockServerConnectionCallbacks http_connection_callbacks_; + testing::StrictMock read_total_; + testing::StrictMock read_current_; + testing::StrictMock write_total_; + testing::StrictMock write_current_; + Stats::IsolatedStoreImpl scope_; + Http::Http3::CodecStats stats_; + envoy::config::core::v3::Http3ProtocolOptions http3_options_; + QuicHttpClientConnectionImpl http_connection_; +}; + +INSTANTIATE_TEST_SUITE_P(EnvoyQuicClientSessionAllQuicVersionTests, + EnvoyQuicClientSessionAllQuicVersionTest, + testing::ValuesIn(quic::AllSupportedVersions())); + +TEST_P(EnvoyQuicClientSessionAllQuicVersionTest, ConnectionClosePopulatesQuicVersionStats) { + std::string error_details("dummy details"); + quic::QuicErrorCode error(quic::QUIC_INVALID_FRAME_DATA); + quic::QuicConnectionCloseFrame frame(GetParam().transport_version, error, + quic::NO_IETF_QUIC_ERROR, error_details, + /* transport_close_frame_type = */ 0); + EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::RemoteClose)); + quic_connection_->OnConnectionCloseFrame(frame); + EXPECT_EQ(absl::StrCat(quic::QuicErrorCodeToString(error), " with details: ", error_details), + envoy_quic_session_.transportFailureReason()); + EXPECT_EQ(Network::Connection::State::Closed, envoy_quic_session_.state()); + std::string quic_version_stat_name; + switch (GetParam().transport_version) { + case quic::QUIC_VERSION_43: + quic_version_stat_name = "43"; + break; + case quic::QUIC_VERSION_46: + quic_version_stat_name = "46"; + break; + case quic::QUIC_VERSION_50: + quic_version_stat_name = "50"; + break; + case quic::QUIC_VERSION_51: + quic_version_stat_name = "51"; + break; + case quic::QUIC_VERSION_IETF_DRAFT_29: + quic_version_stat_name = "h3_29"; + break; + case quic::QUIC_VERSION_IETF_RFC_V1: + quic_version_stat_name = "rfc_v1"; + break; + default: + break; + } + EXPECT_EQ(1U, TestUtility::findCounter( + scope_, absl::StrCat("http3.quic_version_", quic_version_stat_name)) + ->value()); +} + } // namespace Quic } // namespace Envoy diff --git a/test/common/quic/envoy_quic_dispatcher_test.cc b/test/common/quic/envoy_quic_dispatcher_test.cc index 92ef520ba44ae..f15072bd140f9 100644 --- a/test/common/quic/envoy_quic_dispatcher_test.cc +++ b/test/common/quic/envoy_quic_dispatcher_test.cc @@ -212,6 +212,9 @@ class EnvoyQuicDispatcherTest : public QuicMultiVersionTest, EXPECT_EQ("test.example.org", socket.requestedServerName()); return &proof_source_->filterChain(); })); + Network::MockTransportSocketFactory transport_socket_factory; + EXPECT_CALL(proof_source_->filterChain(), transportSocketFactory()) + .WillOnce(ReturnRef(transport_socket_factory)); EXPECT_CALL(proof_source_->filterChain(), networkFilterFactories()) .WillOnce(ReturnRef(filter_factory)); EXPECT_CALL(listener_config_, filterChainFactory()); @@ -290,6 +293,9 @@ TEST_P(EnvoyQuicDispatcherTest, CloseConnectionDuringFilterInstallation) { EXPECT_CALL(listener_config_, filterChainManager()).WillOnce(ReturnRef(filter_chain_manager)); EXPECT_CALL(filter_chain_manager, findFilterChain(_)) .WillOnce(Return(&proof_source_->filterChain())); + Network::MockTransportSocketFactory transport_socket_factory; + EXPECT_CALL(proof_source_->filterChain(), transportSocketFactory()) + .WillOnce(ReturnRef(transport_socket_factory)); EXPECT_CALL(proof_source_->filterChain(), networkFilterFactories()) .WillOnce(ReturnRef(filter_factory)); EXPECT_CALL(listener_config_, filterChainFactory()); diff --git a/test/common/quic/envoy_quic_proof_source_test.cc b/test/common/quic/envoy_quic_proof_source_test.cc index 30837b4fddc05..efcd16337872f 100644 --- a/test/common/quic/envoy_quic_proof_source_test.cc +++ b/test/common/quic/envoy_quic_proof_source_test.cc @@ -61,8 +61,6 @@ class TestGetProofCallback : public quic::ProofSource::Callback { ON_CALL(cert_validation_ctx_config_, certificateRevocationListPath()) .WillByDefault(ReturnRef(path_string)); const std::vector empty_string_list; - ON_CALL(cert_validation_ctx_config_, verifySubjectAltNameList()) - .WillByDefault(ReturnRef(empty_string_list)); const std::vector san_matchers; ON_CALL(cert_validation_ctx_config_, subjectAltNameMatchers()) .WillByDefault(ReturnRef(san_matchers)); diff --git a/test/common/quic/envoy_quic_proof_verifier_test.cc b/test/common/quic/envoy_quic_proof_verifier_test.cc index 49dd9696e2fb8..442e9b867f961 100644 --- a/test/common/quic/envoy_quic_proof_verifier_test.cc +++ b/test/common/quic/envoy_quic_proof_verifier_test.cc @@ -57,8 +57,6 @@ class EnvoyQuicProofVerifierTest : public testing::Test { .WillRepeatedly(ReturnRef(empty_string_)); EXPECT_CALL(cert_validation_ctx_config_, certificateRevocationListPath()) .WillRepeatedly(ReturnRef(path_string_)); - EXPECT_CALL(cert_validation_ctx_config_, verifySubjectAltNameList()) - .WillRepeatedly(ReturnRef(empty_string_list_)); EXPECT_CALL(cert_validation_ctx_config_, subjectAltNameMatchers()) .WillRepeatedly(ReturnRef(san_matchers_)); EXPECT_CALL(cert_validation_ctx_config_, verifyCertificateHashList()) diff --git a/test/common/quic/envoy_quic_server_session_test.cc b/test/common/quic/envoy_quic_server_session_test.cc index dd73908e3eefa..df7bf0c8c7015 100644 --- a/test/common/quic/envoy_quic_server_session_test.cc +++ b/test/common/quic/envoy_quic_server_session_test.cc @@ -127,11 +127,12 @@ class EnvoyQuicTestCryptoServerStreamFactory : public EnvoyQuicCryptoServerStrea ProtobufTypes::MessagePtr createEmptyConfigProto() override { return nullptr; } std::string name() const override { return "quic.test_crypto_server_stream"; } - std::unique_ptr - createEnvoyQuicCryptoServerStream(const quic::QuicCryptoServerConfig* crypto_config, - quic::QuicCompressedCertsCache* compressed_certs_cache, - quic::QuicSession* session, - quic::QuicCryptoServerStreamBase::Helper* helper) override { + std::unique_ptr createEnvoyQuicCryptoServerStream( + const quic::QuicCryptoServerConfig* crypto_config, + quic::QuicCompressedCertsCache* compressed_certs_cache, quic::QuicSession* session, + quic::QuicCryptoServerStreamBase::Helper* helper, + OptRef /*transport_socket_factory*/, + Event::Dispatcher& /*dispatcher*/) override { switch (session->connection()->version().handshake_protocol) { case quic::PROTOCOL_QUIC_CRYPTO: return std::make_unique(crypto_config, compressed_certs_cache, @@ -166,7 +167,8 @@ class EnvoyQuicServerSessionTest : public testing::TestWithParam { &compressed_certs_cache_, *dispatcher_, /*send_buffer_limit*/ quic::kDefaultFlowControlSendWindow * 1.5, quic_stat_names_, listener_config_.listenerScope(), - crypto_stream_factory_), + crypto_stream_factory_, + makeOptRefFromPtr(nullptr)), stats_({ALL_HTTP3_CODEC_STATS( POOL_COUNTER_PREFIX(listener_config_.listenerScope(), "http3."), POOL_GAUGE_PREFIX(listener_config_.listenerScope(), "http3."))}) { diff --git a/test/common/quic/envoy_quic_writer_test.cc b/test/common/quic/envoy_quic_writer_test.cc index 4e8d47cf6a829..a0ee8b3ed5edd 100644 --- a/test/common/quic/envoy_quic_writer_test.cc +++ b/test/common/quic/envoy_quic_writer_test.cc @@ -33,10 +33,11 @@ class EnvoyQuicWriterTest : public ::testing::Test { } void verifySendData(const std::string& content, const msghdr* message) { - EXPECT_EQ(peer_address_.ToString(), Network::Address::addressFromSockAddr( - *reinterpret_cast(message->msg_name), - message->msg_namelen, /*v6only=*/false) - ->asString()); + EXPECT_EQ(peer_address_.ToString(), + (*Network::Address::addressFromSockAddr( + *reinterpret_cast(message->msg_name), message->msg_namelen, + /*v6only=*/false)) + ->asString()); cmsghdr* const cmsg = CMSG_FIRSTHDR(message); auto pktinfo = reinterpret_cast(CMSG_DATA(cmsg)); EXPECT_EQ(0, memcmp(self_address_.GetIPv6().s6_addr, pktinfo->ipi6_addr.s6_addr, diff --git a/test/common/router/BUILD b/test/common/router/BUILD index d3be91e2bf11d..ed6f767661b50 100644 --- a/test/common/router/BUILD +++ b/test/common/router/BUILD @@ -101,6 +101,7 @@ envoy_cc_test( "//source/common/router:rds_lib", "//source/server/admin:admin_lib", "//test/mocks/local_info:local_info_mocks", + "//test/mocks/matcher:matcher_mocks", "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/server:instance_mocks", "//test/mocks/thread_local:thread_local_mocks", @@ -145,6 +146,7 @@ envoy_cc_test( "//source/server/admin:admin_lib", "//test/mocks/config:config_mocks", "//test/mocks/init:init_mocks", + "//test/mocks/matcher:matcher_mocks", "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/router:router_mocks", "//test/mocks/server:instance_mocks", diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index 38f3f0d65387d..a403b857474a6 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -5202,6 +5202,7 @@ TEST_F(RouteMatcherTest, TestWeightedClusterHeaderManipulation) { key: x-resp-cluster value: cluster1 response_headers_to_remove: [ "x-remove-cluster1" ] + host_rewrite_literal: "new_host1" - name: cluster2 weight: 50 request_headers_to_add: @@ -5227,6 +5228,7 @@ TEST_F(RouteMatcherTest, TestWeightedClusterHeaderManipulation) { route->finalizeRequestHeaders(headers, stream_info, true); EXPECT_EQ("cluster1", headers.get_("x-req-cluster")); + EXPECT_EQ("new_host1", headers.getHostValue()); route->finalizeResponseHeaders(resp_headers, stream_info); EXPECT_EQ("cluster1", resp_headers.get_("x-resp-cluster")); diff --git a/test/common/router/rds_impl_test.cc b/test/common/router/rds_impl_test.cc index 551593ca21e52..6ed9e8f1985f9 100644 --- a/test/common/router/rds_impl_test.cc +++ b/test/common/router/rds_impl_test.cc @@ -16,6 +16,7 @@ #include "test/mocks/init/mocks.h" #include "test/mocks/local_info/mocks.h" +#include "test/mocks/matcher/mocks.h" #include "test/mocks/protobuf/mocks.h" #include "test/mocks/server/instance.h" #include "test/mocks/thread_local/mocks.h" @@ -37,6 +38,9 @@ namespace Envoy { namespace Router { namespace { +using ::Envoy::Matchers::MockStringMatcher; +using ::Envoy::Matchers::UniversalStringMatcher; + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager parseHttpConnectionManagerFromYaml(const std::string& yaml_string) { envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager @@ -290,6 +294,7 @@ TEST_F(RdsImplTest, Basic) { // Old config use count should be 1 now. EXPECT_EQ(1, config.use_count()); EXPECT_EQ(2UL, scope_.counter("foo.rds.foo_route_config.config_reload").value()); + EXPECT_TRUE(scope_.findGaugeByString("foo.rds.foo_route_config.config_reload_time_ms")); } // validate there will be exception throw when unknown factory found for per virtualhost typed @@ -752,8 +757,10 @@ parseRouteConfigurationFromV3Yaml(const std::string& yaml, bool avoid_boosting = } TEST_F(RouteConfigProviderManagerImplTest, ConfigDump) { + UniversalStringMatcher universal_name_matcher; auto message_ptr = - server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["routes"](); + server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["routes"]( + universal_name_matcher); const auto& route_config_dump = TestUtility::downcastAndValidate(*message_ptr); @@ -784,8 +791,8 @@ name: foo route_config_provider_manager_->createStaticRouteConfigProvider( parseRouteConfigurationFromV3Yaml(config_yaml), OptionalHttpFilters(), server_factory_context_, validation_visitor_); - message_ptr = - server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["routes"](); + message_ptr = server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["routes"]( + universal_name_matcher); const auto& route_config_dump2 = TestUtility::downcastAndValidate(*message_ptr); TestUtility::loadFromYaml(R"EOF( @@ -832,8 +839,8 @@ name: foo EXPECT_CALL(init_watcher_, ready()); rds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info()); - message_ptr = - server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["routes"](); + message_ptr = server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["routes"]( + universal_name_matcher); const auto& route_config_dump3 = TestUtility::downcastAndValidate(*message_ptr); TestUtility::loadFromYaml(R"EOF( @@ -862,6 +869,51 @@ name: foo )EOF", expected_route_config_dump); EXPECT_EQ(expected_route_config_dump.DebugString(), route_config_dump3.DebugString()); + + MockStringMatcher mock_name_matcher; + EXPECT_CALL(mock_name_matcher, match("foo")).WillOnce(Return(true)); + EXPECT_CALL(mock_name_matcher, match("foo_route_config")).WillOnce(Return(false)); + message_ptr = server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["routes"]( + mock_name_matcher); + const auto& route_config_dump4 = + TestUtility::downcastAndValidate(*message_ptr); + TestUtility::loadFromYaml(R"EOF( +static_route_configs: + - route_config: + "@type": type.googleapis.com/envoy.config.route.v3.RouteConfiguration + name: foo + virtual_hosts: + - name: bar + domains: ["*"] + routes: + - match: { prefix: "/" } + route: { cluster: baz } + last_updated: + seconds: 1234567891 + nanos: 234000000 +)EOF", + expected_route_config_dump); + EXPECT_EQ(expected_route_config_dump.DebugString(), route_config_dump4.DebugString()); + + EXPECT_CALL(mock_name_matcher, match("foo")).WillOnce(Return(false)); + EXPECT_CALL(mock_name_matcher, match("foo_route_config")).WillOnce(Return(true)); + message_ptr = server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["routes"]( + mock_name_matcher); + const auto& route_config_dump5 = + TestUtility::downcastAndValidate(*message_ptr); + TestUtility::loadFromYaml(R"EOF( +dynamic_route_configs: + - version_info: "1" + route_config: + "@type": type.googleapis.com/envoy.config.route.v3.RouteConfiguration + name: foo_route_config + virtual_hosts: + last_updated: + seconds: 1234567891 + nanos: 234000000 +)EOF", + expected_route_config_dump); + EXPECT_EQ(expected_route_config_dump.DebugString(), route_config_dump5.DebugString()); } TEST_F(RouteConfigProviderManagerImplTest, Basic) { @@ -909,8 +961,10 @@ name: foo_route_config EXPECT_NE(provider3, provider_); server_factory_context_.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate( decoded_resources.refvec_, "provider3"); - EXPECT_EQ(2UL, - route_config_provider_manager_->dumpRouteConfigs()->dynamic_route_configs().size()); + UniversalStringMatcher universal_name_matcher; + EXPECT_EQ(2UL, route_config_provider_manager_->dumpRouteConfigs(universal_name_matcher) + ->dynamic_route_configs() + .size()); provider_.reset(); provider2.reset(); @@ -918,7 +972,8 @@ name: foo_route_config // All shared_ptrs to the provider pointed at by provider1, and provider2 have been deleted, so // now we should only have the provider pointed at by provider3. auto dynamic_route_configs = - route_config_provider_manager_->dumpRouteConfigs()->dynamic_route_configs(); + route_config_provider_manager_->dumpRouteConfigs(universal_name_matcher) + ->dynamic_route_configs(); EXPECT_EQ(1UL, dynamic_route_configs.size()); // Make sure the left one is provider3 @@ -926,8 +981,9 @@ name: foo_route_config provider3.reset(); - EXPECT_EQ(0UL, - route_config_provider_manager_->dumpRouteConfigs()->dynamic_route_configs().size()); + EXPECT_EQ(0UL, route_config_provider_manager_->dumpRouteConfigs(universal_name_matcher) + ->dynamic_route_configs() + .size()); } TEST_F(RouteConfigProviderManagerImplTest, SameProviderOnTwoInitManager) { @@ -998,8 +1054,10 @@ TEST_F(RouteConfigProviderManagerImplTest, OnConfigUpdateWrongSize) { // Regression test for https://github.com/envoyproxy/envoy/issues/7939 TEST_F(RouteConfigProviderManagerImplTest, ConfigDumpAfterConfigRejected) { + UniversalStringMatcher universal_name_matcher; auto message_ptr = - server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["routes"](); + server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["routes"]( + universal_name_matcher); const auto& route_config_dump = TestUtility::downcastAndValidate(*message_ptr); @@ -1054,8 +1112,8 @@ version_info: '1' rds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info()), EnvoyException, "Only a single wildcard domain is permitted in route foo_route_config"); - message_ptr = - server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["routes"](); + message_ptr = server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["routes"]( + universal_name_matcher); const auto& route_config_dump3 = TestUtility::downcastAndValidate(*message_ptr); TestUtility::loadFromYaml(R"EOF( diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc index 2ed7e399842fa..1f4bc9d6b64b8 100644 --- a/test/common/router/router_test.cc +++ b/test/common/router/router_test.cc @@ -5863,7 +5863,7 @@ TEST_F(RouterTest, ApplicationProtocols) { EXPECT_CALL(cm_.thread_local_cluster_, httpConnPool(_, _, _)) .WillOnce(Invoke([&](Upstream::ResourcePriority, absl::optional, Upstream::LoadBalancerContext* context) { - Network::TransportSocketOptionsSharedPtr transport_socket_options = + Network::TransportSocketOptionsConstSharedPtr transport_socket_options = context->upstreamTransportSocketOptions(); EXPECT_NE(transport_socket_options, nullptr); EXPECT_FALSE(transport_socket_options->applicationProtocolListOverride().empty()); diff --git a/test/common/router/scoped_rds_test.cc b/test/common/router/scoped_rds_test.cc index 60808295f024e..8be75180cec13 100644 --- a/test/common/router/scoped_rds_test.cc +++ b/test/common/router/scoped_rds_test.cc @@ -19,6 +19,7 @@ #include "source/common/router/scoped_rds.h" #include "test/mocks/config/mocks.h" +#include "test/mocks/matcher/mocks.h" #include "test/mocks/protobuf/mocks.h" #include "test/mocks/router/mocks.h" #include "test/mocks/server/instance.h" @@ -30,6 +31,7 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" +using testing::_; using testing::AnyNumber; using testing::Eq; using testing::InSequence; @@ -43,6 +45,9 @@ namespace Envoy { namespace Router { namespace { +using ::Envoy::Matchers::MockStringMatcher; +using ::Envoy::Matchers::UniversalStringMatcher; + using ::Envoy::Http::TestRequestHeaderMapImpl; envoy::config::route::v3::ScopedRouteConfiguration @@ -384,6 +389,9 @@ route_configuration_name: foo_routes EXPECT_EQ(2UL, server_factory_context_.scope_.counter("foo.scoped_rds.foo_scoped_routes.config_reload") .value()); + EXPECT_TRUE(server_factory_context_.scope_.findGaugeByString( + "foo.scoped_rds.foo_scoped_routes.config_reload_time_ms")); + // now scope key "x-bar-key" points to nowhere. EXPECT_THAT(getScopedRdsProvider()->config()->getRouteConfig( TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}), @@ -750,8 +758,10 @@ TEST_F(ScopedRdsTest, ConfigDump) { setup(); init_watcher_.expectReady(); context_init_manager_.initialize(init_watcher_); + UniversalStringMatcher universal_matcher; auto message_ptr = - server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["route_scopes"](); + server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["route_scopes"]( + universal_matcher); const auto& scoped_routes_config_dump = TestUtility::downcastAndValidate( *message_ptr); @@ -800,7 +810,8 @@ stat_prefix: foo inline_scoped_route_configs_yaml)), server_factory_context_, context_init_manager_, "foo.", *config_provider_manager_); message_ptr = - server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["route_scopes"](); + server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["route_scopes"]( + universal_matcher); const auto& scoped_routes_config_dump2 = TestUtility::downcastAndValidate( *message_ptr); @@ -871,12 +882,76 @@ route_configuration_name: dynamic-foo-route-config )EOF", expected_config_dump); message_ptr = - server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["route_scopes"](); + server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["route_scopes"]( + universal_matcher); const auto& scoped_routes_config_dump3 = TestUtility::downcastAndValidate( *message_ptr); EXPECT_THAT(expected_config_dump, ProtoEq(scoped_routes_config_dump3)); + NiceMock mock_matcher; + EXPECT_CALL(mock_matcher, match("foo")).WillOnce(Return(true)); + EXPECT_CALL(mock_matcher, match("foo2")).WillOnce(Return(false)); + EXPECT_CALL(mock_matcher, match("dynamic-foo")).WillOnce(Return(false)); + TestUtility::loadFromYaml(R"EOF( +inline_scoped_route_configs: + - name: foo-scoped-routes + scoped_route_configs: + - name: foo + "@type": type.googleapis.com/envoy.api.v2.ScopedRouteConfiguration + route_configuration_name: foo-route-config + key: + fragments: { string_key: "172.10.10.10" } + last_updated: + seconds: 1234567891 + nanos: 234000000 +dynamic_scoped_route_configs: + - name: foo_scoped_routes + last_updated: + seconds: 1234567891 + nanos: 567000000 + version_info: "1" +)EOF", + expected_config_dump); + message_ptr = + server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["route_scopes"]( + mock_matcher); + const auto& scoped_routes_config_dump4 = + TestUtility::downcastAndValidate( + *message_ptr); + EXPECT_THAT(expected_config_dump, ProtoEq(scoped_routes_config_dump4)); + + EXPECT_CALL(mock_matcher, match("foo")).WillOnce(Return(false)); + EXPECT_CALL(mock_matcher, match("foo2")).WillOnce(Return(false)); + EXPECT_CALL(mock_matcher, match("dynamic-foo")).WillOnce(Return(true)); + TestUtility::loadFromYaml(R"EOF( +inline_scoped_route_configs: + - name: foo-scoped-routes + last_updated: + seconds: 1234567891 + nanos: 234000000 +dynamic_scoped_route_configs: + - name: foo_scoped_routes + scoped_route_configs: + - name: dynamic-foo + "@type": type.googleapis.com/envoy.api.v2.ScopedRouteConfiguration + route_configuration_name: dynamic-foo-route-config + key: + fragments: { string_key: "172.30.30.10" } + last_updated: + seconds: 1234567891 + nanos: 567000000 + version_info: "1" +)EOF", + expected_config_dump); + message_ptr = + server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["route_scopes"]( + mock_matcher); + const auto& scoped_routes_config_dump5 = + TestUtility::downcastAndValidate( + *message_ptr); + EXPECT_THAT(expected_config_dump, ProtoEq(scoped_routes_config_dump5)); + srds_subscription_->onConfigUpdate({}, "2"); TestUtility::loadFromYaml(R"EOF( inline_scoped_route_configs: @@ -904,11 +979,12 @@ route_configuration_name: dynamic-foo-route-config )EOF", expected_config_dump); message_ptr = - server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["route_scopes"](); - const auto& scoped_routes_config_dump4 = + server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["route_scopes"]( + universal_matcher); + const auto& scoped_routes_config_dump6 = TestUtility::downcastAndValidate( *message_ptr); - EXPECT_THAT(expected_config_dump, ProtoEq(scoped_routes_config_dump4)); + EXPECT_THAT(expected_config_dump, ProtoEq(scoped_routes_config_dump6)); } // Tests that SRDS only allows creation of delta static config providers. diff --git a/test/common/secret/BUILD b/test/common/secret/BUILD index 6161773b7b8c2..3f1488d0118fa 100644 --- a/test/common/secret/BUILD +++ b/test/common/secret/BUILD @@ -19,6 +19,7 @@ envoy_cc_test( "//source/common/secret:secret_manager_impl_lib", "//source/common/ssl:certificate_validation_context_config_impl_lib", "//source/common/ssl:tls_certificate_config_impl_lib", + "//test/mocks/matcher:matcher_mocks", "//test/mocks/server:config_tracker_mocks", "//test/mocks/server:instance_mocks", "//test/mocks/server:transport_socket_factory_context_mocks", diff --git a/test/common/secret/secret_manager_impl_test.cc b/test/common/secret/secret_manager_impl_test.cc index cf9d9fb2d9604..0c36a3c4ba404 100644 --- a/test/common/secret/secret_manager_impl_test.cc +++ b/test/common/secret/secret_manager_impl_test.cc @@ -15,6 +15,7 @@ #include "source/common/ssl/tls_certificate_config_impl.h" #include "test/mocks/event/mocks.h" +#include "test/mocks/matcher/mocks.h" #include "test/mocks/server/config_tracker.h" #include "test/mocks/server/instance.h" #include "test/mocks/server/transport_socket_factory_context.h" @@ -26,18 +27,23 @@ #include "gtest/gtest.h" using testing::ReturnRef; +using testing::StrictMock; namespace Envoy { namespace Secret { namespace { +using ::Envoy::Matchers::MockStringMatcher; + class SecretManagerImplTest : public testing::Test, public Logger::Loggable { protected: SecretManagerImplTest() : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher("test_thread")) {} - void checkConfigDump(const std::string& expected_dump_yaml) { - auto message_ptr = config_tracker_.config_tracker_callbacks_["secrets"](); + void checkConfigDump( + const std::string& expected_dump_yaml, + const Matchers::StringMatcher& name_matcher = Matchers::UniversalStringMatcher()) { + auto message_ptr = config_tracker_.config_tracker_callbacks_["secrets"](name_matcher); const auto& secrets_config_dump = dynamic_cast(*message_ptr); envoy::admin::v3::SecretsConfigDump expected_secrets_config_dump; @@ -498,6 +504,9 @@ name: "abc.com" inline_string: "[redacted]" )EOF"; checkConfigDump(expected_secrets_config_dump); + StrictMock mock_matcher; + EXPECT_CALL(mock_matcher, match("abc.com")).WillOnce(Return(false)); + checkConfigDump("{}", mock_matcher); // Add a dynamic tls validation context provider. time_system_.setSystemTime(std::chrono::milliseconds(1234567899000)); @@ -547,6 +556,9 @@ name: "abc.com.validation" inline_string: "DUMMY_INLINE_STRING_TRUSTED_CA" )EOF"; checkConfigDump(updated_config_dump); + EXPECT_CALL(mock_matcher, match("abc.com")).WillOnce(Return(false)); + EXPECT_CALL(mock_matcher, match("abc.com.validation")).WillOnce(Return(false)); + checkConfigDump("{}", mock_matcher); // Add a dynamic tls session ticket encryption keys context provider. time_system_.setSystemTime(std::chrono::milliseconds(1234567899000)); @@ -609,6 +621,10 @@ name: "abc.com.stek" - inline_bytes: "W3JlZGFjdGVkXQ==" )EOF"; checkConfigDump(TestEnvironment::substitute(updated_once_more_config_dump)); + EXPECT_CALL(mock_matcher, match("abc.com")).WillOnce(Return(false)); + EXPECT_CALL(mock_matcher, match("abc.com.validation")).WillOnce(Return(false)); + EXPECT_CALL(mock_matcher, match("abc.com.stek")).WillOnce(Return(false)); + checkConfigDump("{}", mock_matcher); // Add a dynamic generic secret provider. time_system_.setSystemTime(std::chrono::milliseconds(1234567900000)); @@ -682,6 +698,11 @@ name: "signing_key" inline_string: "[redacted]" )EOF"; checkConfigDump(TestEnvironment::substitute(config_dump_with_generic_secret)); + EXPECT_CALL(mock_matcher, match("abc.com")).WillOnce(Return(false)); + EXPECT_CALL(mock_matcher, match("abc.com.validation")).WillOnce(Return(false)); + EXPECT_CALL(mock_matcher, match("abc.com.stek")).WillOnce(Return(false)); + EXPECT_CALL(mock_matcher, match("signing_key")).WillOnce(Return(false)); + checkConfigDump("{}", mock_matcher); } TEST_F(SecretManagerImplTest, ConfigDumpHandlerWarmingSecrets) { @@ -722,6 +743,9 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandlerWarmingSecrets) { name: "abc.com" )EOF"; checkConfigDump(expected_secrets_config_dump); + StrictMock mock_matcher; + EXPECT_CALL(mock_matcher, match("abc.com")).WillOnce(Return(false)); + checkConfigDump("{}", mock_matcher); time_system_.setSystemTime(std::chrono::milliseconds(1234567899000)); auto context_secret_provider = secret_manager->findOrCreateCertificateValidationContextProvider( @@ -746,6 +770,9 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandlerWarmingSecrets) { name: "abc.com.validation" )EOF"; checkConfigDump(updated_config_dump); + EXPECT_CALL(mock_matcher, match("abc.com")).WillOnce(Return(false)); + EXPECT_CALL(mock_matcher, match("abc.com.validation")).WillOnce(Return(false)); + checkConfigDump("{}", mock_matcher); time_system_.setSystemTime(std::chrono::milliseconds(1234567899000)); auto stek_secret_provider = secret_manager->findOrCreateTlsSessionTicketKeysContextProvider( @@ -777,6 +804,10 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandlerWarmingSecrets) { name: "abc.com.stek" )EOF"; checkConfigDump(updated_once_more_config_dump); + EXPECT_CALL(mock_matcher, match("abc.com")).WillOnce(Return(false)); + EXPECT_CALL(mock_matcher, match("abc.com.validation")).WillOnce(Return(false)); + EXPECT_CALL(mock_matcher, match("abc.com.stek")).WillOnce(Return(false)); + checkConfigDump("{}", mock_matcher); time_system_.setSystemTime(std::chrono::milliseconds(1234567900000)); auto generic_secret_provider = secret_manager->findOrCreateGenericSecretProvider( @@ -815,6 +846,11 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandlerWarmingSecrets) { name: "signing_key" )EOF"; checkConfigDump(config_dump_with_generic_secret); + EXPECT_CALL(mock_matcher, match("abc.com")).WillOnce(Return(false)); + EXPECT_CALL(mock_matcher, match("abc.com.validation")).WillOnce(Return(false)); + EXPECT_CALL(mock_matcher, match("abc.com.stek")).WillOnce(Return(false)); + EXPECT_CALL(mock_matcher, match("signing_key")).WillOnce(Return(false)); + checkConfigDump("{}", mock_matcher); } TEST_F(SecretManagerImplTest, ConfigDumpHandlerStaticSecrets) { @@ -889,6 +925,11 @@ name: "abc.com.nopassword" inline_string: "[redacted]" )EOF"; checkConfigDump(expected_config_dump); + StrictMock mock_matcher; + EXPECT_CALL(mock_matcher, match(testing::HasSubstr("abc.com"))) + .Times(2) + .WillRepeatedly(Return(false)); + checkConfigDump("{}", mock_matcher); } TEST_F(SecretManagerImplTest, ConfigDumpHandlerStaticValidationContext) { @@ -934,6 +975,9 @@ name: "abc.com.validation" inline_string: "DUMMY_INLINE_STRING_TRUSTED_CA" )EOF"; checkConfigDump(expected_config_dump); + StrictMock mock_matcher; + EXPECT_CALL(mock_matcher, match("abc.com.validation")).WillOnce(Return(false)); + checkConfigDump("{}", mock_matcher); } TEST_F(SecretManagerImplTest, ConfigDumpHandlerStaticSessionTicketsContext) { @@ -983,6 +1027,9 @@ name: "abc.com.stek" - inline_bytes: "W3JlZGFjdGVkXQ==" )EOF"; checkConfigDump(TestEnvironment::substitute(expected_config_dump)); + StrictMock mock_matcher; + EXPECT_CALL(mock_matcher, match("abc.com.stek")).WillOnce(Return(false)); + checkConfigDump("{}", mock_matcher); } TEST_F(SecretManagerImplTest, ConfigDumpHandlerStaticGenericSecret) { @@ -1009,6 +1056,9 @@ name: "signing_key" inline_bytes: "W3JlZGFjdGVkXQ==" )EOF"; checkConfigDump(TestEnvironment::substitute(expected_config_dump)); + StrictMock mock_matcher; + EXPECT_CALL(mock_matcher, match("signing_key")).WillOnce(Return(false)); + checkConfigDump("{}", mock_matcher); } } // namespace diff --git a/test/common/stream_info/stream_info_impl_test.cc b/test/common/stream_info/stream_info_impl_test.cc index 88b2c04fb38c9..482490ca7874d 100644 --- a/test/common/stream_info/stream_info_impl_test.cc +++ b/test/common/stream_info/stream_info_impl_test.cc @@ -180,11 +180,6 @@ TEST_F(StreamInfoImplTest, MiscSettersAndGetters) { EXPECT_EQ(1, stream_info.upstreamFilterState()->getDataReadOnly("test").access()); - EXPECT_EQ("", stream_info.requestedServerName()); - absl::string_view sni_name = "stubserver.org"; - stream_info.setRequestedServerName(sni_name); - EXPECT_EQ(std::string(sni_name), stream_info.requestedServerName()); - EXPECT_EQ(absl::nullopt, stream_info.upstreamClusterInfo()); Upstream::ClusterInfoConstSharedPtr cluster_info(new NiceMock()); stream_info.setUpstreamClusterInfo(cluster_info); diff --git a/test/common/stream_info/test_util.h b/test/common/stream_info/test_util.h index 8335d6317b749..39b95e2b76549 100644 --- a/test/common/stream_info/test_util.h +++ b/test/common/stream_info/test_util.h @@ -178,12 +178,6 @@ class TestStreamInfo : public StreamInfo::StreamInfo { upstream_filter_state_ = filter_state; } - void setRequestedServerName(const absl::string_view requested_server_name) override { - requested_server_name_ = std::string(requested_server_name); - } - - const std::string& requestedServerName() const override { return requested_server_name_; } - void setUpstreamTransportFailureReason(absl::string_view failure_reason) override { upstream_transport_failure_reason_ = std::string(failure_reason); } diff --git a/test/common/tcp/conn_pool_test.cc b/test/common/tcp/conn_pool_test.cc index 1399e589cc96f..c1febd25d1558 100644 --- a/test/common/tcp/conn_pool_test.cc +++ b/test/common/tcp/conn_pool_test.cc @@ -56,10 +56,11 @@ struct ConnPoolCallbacks : public Tcp::ConnectionPool::Callbacks { conn_data_ = std::move(conn); conn_data_->addUpstreamCallbacks(callbacks_); host_ = host; + ssl_ = conn_data_->connection().streamInfo().downstreamSslConnection(); pool_ready_.ready(); } - void onPoolFailure(ConnectionPool::PoolFailureReason reason, + void onPoolFailure(ConnectionPool::PoolFailureReason reason, absl::string_view, Upstream::HostDescriptionConstSharedPtr host) override { reason_ = reason; host_ = host; @@ -72,6 +73,7 @@ struct ConnPoolCallbacks : public Tcp::ConnectionPool::Callbacks { ConnectionPool::ConnectionDataPtr conn_data_{}; absl::optional reason_; Upstream::HostDescriptionConstSharedPtr host_; + Ssl::ConnectionInfoConstSharedPtr ssl_; }; class TestActiveTcpClient : public ActiveTcpClient { @@ -99,7 +101,7 @@ class ConnPoolBase : public Tcp::ConnectionPool::Instance { ConnPoolBase(Event::MockDispatcher& dispatcher, Upstream::HostSharedPtr host, NiceMock* upstream_ready_cb, Network::ConnectionSocket::OptionsSharedPtr options, - Network::TransportSocketOptionsSharedPtr transport_socket_options, + Network::TransportSocketOptionsConstSharedPtr transport_socket_options, bool test_new_connection_pool); void addDrainedCallback(DrainedCb cb) override { conn_pool_->addDrainedCallback(cb); } @@ -155,14 +157,14 @@ class ConnPoolBase : public Tcp::ConnectionPool::Instance { Network::ConnectionCallbacks* callbacks_ = nullptr; bool test_new_connection_pool_; Network::ConnectionSocket::OptionsSharedPtr options_; - Network::TransportSocketOptionsSharedPtr transport_socket_options_; + Network::TransportSocketOptionsConstSharedPtr transport_socket_options_; protected: class ConnPoolImplForTest : public ConnPoolImpl { public: ConnPoolImplForTest(Event::MockDispatcher& dispatcher, Upstream::HostSharedPtr host, Network::ConnectionSocket::OptionsSharedPtr options, - Network::TransportSocketOptionsSharedPtr transport_socket_options, + Network::TransportSocketOptionsConstSharedPtr transport_socket_options, ConnPoolBase& parent) : ConnPoolImpl(dispatcher, host, Upstream::ResourcePriority::Default, options, transport_socket_options, state_), @@ -187,10 +189,11 @@ class ConnPoolBase : public Tcp::ConnectionPool::Instance { class OriginalConnPoolImplForTest : public OriginalConnPoolImpl { public: - OriginalConnPoolImplForTest(Event::MockDispatcher& dispatcher, Upstream::HostSharedPtr host, - Network::ConnectionSocket::OptionsSharedPtr options, - Network::TransportSocketOptionsSharedPtr transport_socket_options, - ConnPoolBase& parent) + OriginalConnPoolImplForTest( + Event::MockDispatcher& dispatcher, Upstream::HostSharedPtr host, + Network::ConnectionSocket::OptionsSharedPtr options, + Network::TransportSocketOptionsConstSharedPtr transport_socket_options, + ConnPoolBase& parent) : OriginalConnPoolImpl(dispatcher, host, Upstream::ResourcePriority::Default, options, transport_socket_options), parent_(parent) {} @@ -229,7 +232,7 @@ class ConnPoolBase : public Tcp::ConnectionPool::Instance { ConnPoolBase::ConnPoolBase(Event::MockDispatcher& dispatcher, Upstream::HostSharedPtr host, NiceMock* upstream_ready_cb, Network::ConnectionSocket::OptionsSharedPtr options, - Network::TransportSocketOptionsSharedPtr transport_socket_options, + Network::TransportSocketOptionsConstSharedPtr transport_socket_options, bool test_new_connection_pool) : mock_dispatcher_(dispatcher), mock_upstream_ready_cb_(upstream_ready_cb), test_new_connection_pool_(test_new_connection_pool), options_(options), @@ -285,7 +288,7 @@ class TcpConnPoolImplTest : public Event::TestUsingSimulatedTime, NiceMock* upstream_ready_cb_; Upstream::HostSharedPtr host_; Network::ConnectionSocket::OptionsSharedPtr options_; - Network::TransportSocketOptionsSharedPtr transport_socket_options_; + Network::TransportSocketOptionsConstSharedPtr transport_socket_options_; std::unique_ptr conn_pool_; NiceMock runtime_; }; @@ -320,7 +323,7 @@ class TcpConnPoolImplDestructorTest : public Event::TestUsingSimulatedTime, EXPECT_CALL(*connection_, connect()); EXPECT_CALL(*connection_, setConnectionStats(_)); EXPECT_CALL(*connection_, noDelay(true)); - EXPECT_CALL(*connection_, streamInfo()).Times(2); + EXPECT_CALL(*connection_, streamInfo()).Times(3); EXPECT_CALL(*connection_, id()).Times(AnyNumber()); EXPECT_CALL(*connection_, readDisable(_)).Times(AnyNumber()); @@ -337,6 +340,7 @@ class TcpConnPoolImplDestructorTest : public Event::TestUsingSimulatedTime, EXPECT_CALL(*connection_, ssl()).WillOnce(Return(ssl_)); connection_->raiseEvent(Network::ConnectionEvent::Connected); EXPECT_EQ(connection_->streamInfo().downstreamSslConnection(), ssl_); + EXPECT_EQ(callbacks_->ssl_, ssl_); } bool test_new_connection_pool_; diff --git a/test/common/tcp_proxy/config_test.cc b/test/common/tcp_proxy/config_test.cc index 5ea8c2c3a3e3f..d5c49016465c9 100644 --- a/test/common/tcp_proxy/config_test.cc +++ b/test/common/tcp_proxy/config_test.cc @@ -911,7 +911,7 @@ TEST_F(TcpProxyRoutingTest, DEPRECATED_FEATURE_TEST(UpstreamServerName)) { // override-server-name EXPECT_CALL(factory_context_.cluster_manager_.thread_local_cluster_, tcpConnPool(_, _)) .WillOnce(Invoke([](Upstream::ResourcePriority, Upstream::LoadBalancerContext* context) { - Network::TransportSocketOptionsSharedPtr transport_socket_options = + Network::TransportSocketOptionsConstSharedPtr transport_socket_options = context->upstreamTransportSocketOptions(); EXPECT_NE(transport_socket_options, nullptr); EXPECT_TRUE(transport_socket_options->serverNameOverride().has_value()); @@ -941,7 +941,7 @@ TEST_F(TcpProxyRoutingTest, DEPRECATED_FEATURE_TEST(ApplicationProtocols)) { // override-application-protocol EXPECT_CALL(factory_context_.cluster_manager_.thread_local_cluster_, tcpConnPool(_, _)) .WillOnce(Invoke([](Upstream::ResourcePriority, Upstream::LoadBalancerContext* context) { - Network::TransportSocketOptionsSharedPtr transport_socket_options = + Network::TransportSocketOptionsConstSharedPtr transport_socket_options = context->upstreamTransportSocketOptions(); EXPECT_NE(transport_socket_options, nullptr); EXPECT_FALSE(transport_socket_options->applicationProtocolListOverride().empty()); diff --git a/test/common/tcp_proxy/tcp_proxy_test_base.h b/test/common/tcp_proxy/tcp_proxy_test_base.h index cb1c62a9a59d5..37676f5bbceda 100644 --- a/test/common/tcp_proxy/tcp_proxy_test_base.h +++ b/test/common/tcp_proxy/tcp_proxy_test_base.h @@ -150,7 +150,7 @@ class TcpProxyTestBase : public testing::Test { void raiseEventUpstreamConnectFailed(uint32_t conn_index, ConnectionPool::PoolFailureReason reason) { - conn_pool_callbacks_.at(conn_index)->onPoolFailure(reason, upstream_hosts_.at(conn_index)); + conn_pool_callbacks_.at(conn_index)->onPoolFailure(reason, "", upstream_hosts_.at(conn_index)); } Tcp::ConnectionPool::Cancellable* onNewConnection(Tcp::ConnectionPool::Cancellable* connection) { diff --git a/test/common/upstream/BUILD b/test/common/upstream/BUILD index 16fd761aa7394..84c54f4dfd2eb 100644 --- a/test/common/upstream/BUILD +++ b/test/common/upstream/BUILD @@ -42,6 +42,7 @@ envoy_cc_test( ":test_cluster_manager", "//source/common/router:context_lib", "//source/extensions/transport_sockets/tls:config", + "//test/mocks/matcher:matcher_mocks", "//test/mocks/upstream:cds_api_mocks", "//test/mocks/upstream:cluster_priority_set_mocks", "//test/mocks/upstream:cluster_real_priority_set_mocks", @@ -95,6 +96,7 @@ envoy_cc_test( "//source/common/config:utility_lib", "//source/common/upstream:eds_lib", "//source/extensions/transport_sockets/raw_buffer:config", + "//source/extensions/transport_sockets/tls:config", "//source/server:transport_socket_config_lib", "//test/common/stats:stat_test_utility_lib", "//test/mocks/local_info:local_info_mocks", diff --git a/test/common/upstream/cluster_manager_impl_test.cc b/test/common/upstream/cluster_manager_impl_test.cc index 7fed2c6567b49..fa5f863ae53ff 100644 --- a/test/common/upstream/cluster_manager_impl_test.cc +++ b/test/common/upstream/cluster_manager_impl_test.cc @@ -11,6 +11,7 @@ #include "test/common/upstream/test_cluster_manager.h" #include "test/mocks/http/conn_pool.h" +#include "test/mocks/matcher/mocks.h" #include "test/mocks/upstream/cds_api.h" #include "test/mocks/upstream/cluster_priority_set.h" #include "test/mocks/upstream/cluster_real_priority_set.h" @@ -138,8 +139,10 @@ class ClusterManagerImplTest : public testing::Test { .value()); } - void checkConfigDump(const std::string& expected_dump_yaml) { - auto message_ptr = admin_.config_tracker_.config_tracker_callbacks_["clusters"](); + void checkConfigDump( + const std::string& expected_dump_yaml, + const Matchers::StringMatcher& name_matcher = Matchers::UniversalStringMatcher()) { + auto message_ptr = admin_.config_tracker_.config_tracker_callbacks_["clusters"](name_matcher); const auto& clusters_config_dump = dynamic_cast(*message_ptr); @@ -280,6 +283,15 @@ TEST_F(ClusterManagerImplTest, MultipleProtocolCluster) { dynamic_active_clusters: dynamic_warming_clusters: )EOF"); + + Matchers::MockStringMatcher mock_matcher; + EXPECT_CALL(mock_matcher, match("http12_cluster")).WillOnce(Return(false)); + checkConfigDump(R"EOF( +static_clusters: +dynamic_active_clusters: +dynamic_warming_clusters: +)EOF", + mock_matcher); } TEST_F(ClusterManagerImplTest, OutlierEventLog) { diff --git a/test/common/upstream/eds_test.cc b/test/common/upstream/eds_test.cc index 93a31a2b6095c..ad84f1678ddfa 100644 --- a/test/common/upstream/eds_test.cc +++ b/test/common/upstream/eds_test.cc @@ -56,6 +56,36 @@ class EdsTest : public testing::Test { Cluster::InitializePhase::Secondary); } + // Define a cluster with secure and unsecure (default) transport + // sockets. + void resetClusterWithTransportSockets() { + resetCluster(R"EOF( + name: name + connect_timeout: 0.25s + type: EDS + lb_policy: ROUND_ROBIN + eds_cluster_config: + service_name: fare + eds_config: + api_config_source: + api_type: REST + cluster_names: + - eds + refresh_delay: 1s + transport_socket_matches: + - match: + secure: enabled + name: secure-mode + transport_socket: + name: envoy.transport_sockets.tls + - match: {} + name: default-mode + transport_socket: + name: envoy.transport_sockets.raw_buffer + )EOF", + Cluster::InitializePhase::Secondary); + } + void resetClusterDrainOnHostRemoval() { resetCluster(R"EOF( name: name @@ -117,7 +147,7 @@ class EdsTest : public testing::Test { bool initialized_{}; Stats::TestUtil::TestStore stats_; - Ssl::MockContextManager ssl_context_manager_; + NiceMock ssl_context_manager_; envoy::config::cluster::v3::Cluster eds_cluster_; NiceMock cm_; NiceMock dispatcher_; @@ -452,6 +482,51 @@ TEST_F(EdsTest, EndpointMetadata) { "v2"); } +// Test verifies that updating metadata updates +// data members dependent on metadata values. +// Specifically, it transport socket matcher has changed, +// the transport socket factory should also be updated. +TEST_F(EdsTest, EndpointMetadataWithTransportSocket) { + envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment; + cluster_load_assignment.set_cluster_name("fare"); + resetClusterWithTransportSockets(); + + auto health_checker = std::make_shared(); + EXPECT_CALL(*health_checker, start()); + EXPECT_CALL(*health_checker, addHostCheckCompleteCb(_)).Times(2); + cluster_->setHealthChecker(health_checker); + + // Add single endpoint to the cluster. + auto* endpoints = cluster_load_assignment.add_endpoints(); + auto* endpoint = endpoints->add_lb_endpoints(); + + auto* socket_address = endpoint->mutable_endpoint()->mutable_address()->mutable_socket_address(); + socket_address->set_address("1.2.3.4"); + socket_address->set_port_value(80); + + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); + + auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts(); + ASSERT_EQ(hosts.size(), 1); + auto* upstream_host = hosts[0].get(); + + // Verify that default transport socket is raw (does not implement secure transport). + EXPECT_FALSE(upstream_host->transportSocketFactory().implementsSecureTransport()); + + // Create metadata with transport socket match pointing to secure mode. + auto metadata = new envoy::config::core::v3::Metadata(); + MetadataConstSharedPtr metadata_sharedptr(metadata); + Config::Metadata::mutableMetadataValue( + *metadata, Config::MetadataFilters::get().ENVOY_TRANSPORT_SOCKET_MATCH, "secure") + .set_string_value("enabled"); + + // Update metadata. + upstream_host->metadata(metadata_sharedptr); + + // Transport socket factory should point to tls, which implements secure transport. + EXPECT_TRUE(upstream_host->transportSocketFactory().implementsSecureTransport()); +} + // Validate that onConfigUpdate() updates endpoint health status. TEST_F(EdsTest, EndpointHealthStatus) { envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment; diff --git a/test/common/upstream/health_checker_impl_test.cc b/test/common/upstream/health_checker_impl_test.cc index 35e224aa28752..973eb95cacd0a 100644 --- a/test/common/upstream/health_checker_impl_test.cc +++ b/test/common/upstream/health_checker_impl_test.cc @@ -1074,7 +1074,7 @@ TEST_F(HttpHealthCheckerImplTest, ZeroRetryInterval) { } MATCHER_P(ApplicationProtocolListEq, expected, "") { - const Network::TransportSocketOptionsSharedPtr& options = arg; + const Network::TransportSocketOptionsConstSharedPtr& options = arg; EXPECT_EQ(options->applicationProtocolListOverride(), std::vector{expected}); return true; } diff --git a/test/common/upstream/logical_dns_cluster_test.cc b/test/common/upstream/logical_dns_cluster_test.cc index ce94bf6e2184a..d3c0e88df67a4 100644 --- a/test/common/upstream/logical_dns_cluster_test.cc +++ b/test/common/upstream/logical_dns_cluster_test.cc @@ -562,6 +562,40 @@ TEST_F(LogicalDnsClusterTest, Basic) { testBasicSetup(basic_yaml_load_assignment, "foo.bar.com", 443, 8000); } +TEST_F(LogicalDnsClusterTest, DontWaitForDNSOnInit) { + const std::string config = R"EOF( + name: name + type: LOGICAL_DNS + dns_refresh_rate: 4s + dns_failure_refresh_rate: + base_interval: 7s + max_interval: 10s + connect_timeout: 0.25s + lb_policy: ROUND_ROBIN + # Since the following expectResolve() requires Network::DnsLookupFamily::V4Only we need to set + # dns_lookup_family to V4_ONLY explicitly for v2 .yaml config. + dns_lookup_family: V4_ONLY + wait_for_warm_on_init: false + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 + )EOF"; + + EXPECT_CALL(initialized_, ready()); + expectResolve(Network::DnsLookupFamily::V4Only, "foo.bar.com"); + setupFromV3Yaml(config); + + EXPECT_CALL(membership_updated_, ready()); + EXPECT_CALL(*resolve_timer_, enableTimer(std::chrono::milliseconds(4000), _)); + dns_callback_(Network::DnsResolver::ResolutionStatus::Success, + TestUtility::makeDnsResponse({"127.0.0.1", "127.0.0.2"})); +} + } // namespace } // namespace Upstream } // namespace Envoy diff --git a/test/common/upstream/test_cluster_manager.h b/test/common/upstream/test_cluster_manager.h index 03a7cf951d4c8..b0a30974f3b2c 100644 --- a/test/common/upstream/test_cluster_manager.h +++ b/test/common/upstream/test_cluster_manager.h @@ -82,7 +82,7 @@ class TestClusterManagerFactory : public ClusterManagerFactory { const absl::optional& alternate_protocol_options, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options, + const Network::TransportSocketOptionsConstSharedPtr& transport_socket_options, TimeSource&, ClusterConnectivityState& state) override { return Http::ConnectionPool::InstancePtr{allocateConnPool_( host, alternate_protocol_options, options, transport_socket_options, state)}; @@ -91,7 +91,7 @@ class TestClusterManagerFactory : public ClusterManagerFactory { Tcp::ConnectionPool::InstancePtr allocateTcpConnPool(Event::Dispatcher&, HostConstSharedPtr host, ResourcePriority, const Network::ConnectionSocket::OptionsSharedPtr&, - Network::TransportSocketOptionsSharedPtr, + Network::TransportSocketOptionsConstSharedPtr, Upstream::ClusterConnectivityState&) override { return Tcp::ConnectionPool::InstancePtr{allocateTcpConnPool_(host)}; } @@ -123,7 +123,7 @@ class TestClusterManagerFactory : public ClusterManagerFactory { const absl::optional& alternate_protocol_options, Network::ConnectionSocket::OptionsSharedPtr, - Network::TransportSocketOptionsSharedPtr, ClusterConnectivityState&)); + Network::TransportSocketOptionsConstSharedPtr, ClusterConnectivityState&)); MOCK_METHOD(Tcp::ConnectionPool::Instance*, allocateTcpConnPool_, (HostConstSharedPtr host)); MOCK_METHOD((std::pair), clusterFromProto_, (const envoy::config::cluster::v3::Cluster& cluster, ClusterManager& cm, diff --git a/test/common/upstream/transport_socket_matcher_test.cc b/test/common/upstream/transport_socket_matcher_test.cc index caa0adc9c73e1..2bedc726d5e86 100644 --- a/test/common/upstream/transport_socket_matcher_test.cc +++ b/test/common/upstream/transport_socket_matcher_test.cc @@ -32,7 +32,7 @@ class FakeTransportSocketFactory : public Network::TransportSocketFactory { MOCK_METHOD(bool, implementsSecureTransport, (), (const)); MOCK_METHOD(bool, usesProxyProtocolOptions, (), (const)); MOCK_METHOD(Network::TransportSocketPtr, createTransportSocket, - (Network::TransportSocketOptionsSharedPtr), (const)); + (Network::TransportSocketOptionsConstSharedPtr), (const)); FakeTransportSocketFactory(std::string id) : id_(std::move(id)) {} std::string id() const { return id_; } @@ -48,7 +48,7 @@ class FooTransportSocketFactory MOCK_METHOD(bool, implementsSecureTransport, (), (const)); MOCK_METHOD(bool, usesProxyProtocolOptions, (), (const)); MOCK_METHOD(Network::TransportSocketPtr, createTransportSocket, - (Network::TransportSocketOptionsSharedPtr), (const)); + (Network::TransportSocketOptionsConstSharedPtr), (const)); Network::TransportSocketFactoryPtr createTransportSocketFactory(const Protobuf::Message& proto, diff --git a/test/common/upstream/upstream_impl_test.cc b/test/common/upstream/upstream_impl_test.cc index ed7586497ae4a..7bb0850425601 100644 --- a/test/common/upstream/upstream_impl_test.cc +++ b/test/common/upstream/upstream_impl_test.cc @@ -278,6 +278,55 @@ TEST_F(StrictDnsClusterImplTest, ZeroHostsHealthChecker) { EXPECT_EQ(0UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size()); } +TEST_F(StrictDnsClusterImplTest, DontWaitForDNSOnInit) { + ResolverData resolver(*dns_resolver_, dispatcher_); + + const std::string yaml = R"EOF( + name: name + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + dns_refresh_rate: 4s + dns_failure_refresh_rate: + base_interval: 7s + max_interval: 10s + wait_for_warm_on_init: false + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.com + port_value: 443 + )EOF"; + + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); + Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( + "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() + : cluster_config.alt_stat_name())); + Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, + singleton_manager_, tls_, validation_visitor_, *api_, options_); + StrictDnsClusterImpl cluster(cluster_config, runtime_, dns_resolver_, factory_context, + std::move(scope), false); + + ReadyWatcher initialized; + + // Initialized without completing DNS resolution. + EXPECT_CALL(initialized, ready()); + cluster.initialize([&]() -> void { initialized.ready(); }); + + ReadyWatcher membership_updated; + auto priority_update_cb = cluster.prioritySet().addPriorityUpdateCb( + [&](uint32_t, const HostVector&, const HostVector&) -> void { membership_updated.ready(); }); + + EXPECT_CALL(*resolver.timer_, enableTimer(std::chrono::milliseconds(4000), _)); + EXPECT_CALL(membership_updated, ready()); + resolver.dns_callback_(Network::DnsResolver::ResolutionStatus::Success, + TestUtility::makeDnsResponse({"127.0.0.2", "127.0.0.1"})); +} + TEST_F(StrictDnsClusterImplTest, Basic) { // gmock matches in LIFO order which is why these are swapped. ResolverData resolver2(*dns_resolver_, dispatcher_); diff --git a/test/extensions/access_loggers/grpc/grpc_access_log_utils_test.cc b/test/extensions/access_loggers/grpc/grpc_access_log_utils_test.cc index 53b655b54c778..c91799947621c 100644 --- a/test/extensions/access_loggers/grpc/grpc_access_log_utils_test.cc +++ b/test/extensions/access_loggers/grpc/grpc_access_log_utils_test.cc @@ -47,6 +47,7 @@ TEST(UtilityResponseFlagsToAccessLogResponseFlagsTest, All) { common_access_log_expected.mutable_response_flags()->set_duration_timeout(true); common_access_log_expected.mutable_response_flags()->set_upstream_protocol_error(true); common_access_log_expected.mutable_response_flags()->set_no_cluster_found(true); + common_access_log_expected.mutable_response_flags()->set_overload_manager(true); EXPECT_EQ(common_access_log_expected.DebugString(), common_access_log.DebugString()); } diff --git a/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc b/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc index 42969c35dfa4e..54e012875513c 100644 --- a/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc +++ b/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc @@ -389,7 +389,7 @@ response: {} ON_CALL(*connection_info, tlsVersion()).WillByDefault(ReturnRef(tlsVersion)); ON_CALL(*connection_info, ciphersuiteId()).WillByDefault(Return(0x2CC0)); stream_info.setDownstreamSslConnection(connection_info); - stream_info.requested_server_name_ = "sni"; + stream_info.downstream_address_provider_->setRequestedServerName("sni"); Http::TestRequestHeaderMapImpl request_headers{ {":method", "WHACKADOO"}, @@ -449,7 +449,7 @@ response: {} ON_CALL(*connection_info, tlsVersion()).WillByDefault(ReturnRef(tlsVersion)); ON_CALL(*connection_info, ciphersuiteId()).WillByDefault(Return(0x2F)); stream_info.setDownstreamSslConnection(connection_info); - stream_info.requested_server_name_ = "sni"; + stream_info.downstream_address_provider_->setRequestedServerName("sni"); Http::TestRequestHeaderMapImpl request_headers{ {":method", "WHACKADOO"}, @@ -499,7 +499,7 @@ response: {} ON_CALL(*connection_info, tlsVersion()).WillByDefault(ReturnRef(tlsVersion)); ON_CALL(*connection_info, ciphersuiteId()).WillByDefault(Return(0x2F)); stream_info.setDownstreamSslConnection(connection_info); - stream_info.requested_server_name_ = "sni"; + stream_info.downstream_address_provider_->setRequestedServerName("sni"); Http::TestRequestHeaderMapImpl request_headers{ {":method", "WHACKADOO"}, @@ -549,7 +549,7 @@ response: {} ON_CALL(*connection_info, tlsVersion()).WillByDefault(ReturnRef(tlsVersion)); ON_CALL(*connection_info, ciphersuiteId()).WillByDefault(Return(0x2F)); stream_info.setDownstreamSslConnection(connection_info); - stream_info.requested_server_name_ = "sni"; + stream_info.downstream_address_provider_->setRequestedServerName("sni"); Http::TestRequestHeaderMapImpl request_headers{ {":method", "WHACKADOO"}, @@ -599,7 +599,7 @@ response: {} ON_CALL(*connection_info, tlsVersion()).WillByDefault(ReturnRef(tlsVersion)); ON_CALL(*connection_info, ciphersuiteId()).WillByDefault(Return(0x2F)); stream_info.setDownstreamSslConnection(connection_info); - stream_info.requested_server_name_ = "sni"; + stream_info.downstream_address_provider_->setRequestedServerName("sni"); Http::TestRequestHeaderMapImpl request_headers{ {":method", "WHACKADOO"}, diff --git a/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc b/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc index 0544523a3be1f..2f0550321a85a 100644 --- a/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc +++ b/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc @@ -254,34 +254,8 @@ connect_timeout: 0.25s filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem" )EOF"); - EXPECT_THROW_WITH_MESSAGE( - createCluster(yaml_config, false), EnvoyException, - "dynamic_forward_proxy cluster cannot configure 'sni' or 'verify_subject_alt_name'"); -} - -// Verify that using 'verify_subject_alt_name' causes a failure. -TEST_F(ClusterFactoryTest, DEPRECATED_FEATURE_TEST(InvalidVerifySubjectAltName)) { - TestDeprecatedV2Api _deprecated_v2_api; - const std::string yaml_config = TestEnvironment::substitute(R"EOF( -name: name -connect_timeout: 0.25s -cluster_type: - name: dynamic_forward_proxy - typed_config: - "@type": type.googleapis.com/envoy.config.cluster.dynamic_forward_proxy.v2alpha.ClusterConfig - dns_cache_config: - name: foo -tls_context: - common_tls_context: - validation_context: - trusted_ca: - filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem" - verify_subject_alt_name: [api.lyft.com] -)EOF"); - - EXPECT_THROW_WITH_MESSAGE( - createCluster(yaml_config, false), EnvoyException, - "dynamic_forward_proxy cluster cannot configure 'sni' or 'verify_subject_alt_name'"); + EXPECT_THROW_WITH_MESSAGE(createCluster(yaml_config, false), EnvoyException, + "dynamic_forward_proxy cluster cannot configure 'sni'"); } TEST_F(ClusterFactoryTest, InvalidUpstreamHttpProtocolOptions) { diff --git a/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc b/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc index 25f4b42fc4c6d..215e391d5fb63 100644 --- a/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc +++ b/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc @@ -28,9 +28,17 @@ namespace { class DnsCacheImplTest : public testing::Test, public Event::TestUsingSimulatedTime { public: - void initialize() { + void initialize(std::vector preresolve_hostnames = {}, uint32_t max_hosts = 1024) { config_.set_name("foo"); config_.set_dns_lookup_family(envoy::config::cluster::v3::Cluster::V4_ONLY); + config_.mutable_max_hosts()->set_value(max_hosts); + if (!preresolve_hostnames.empty()) { + for (const auto& hostname : preresolve_hostnames) { + envoy::config::core::v3::SocketAddress* address = config_.add_preresolve_hostnames(); + address->set_address(hostname); + address->set_port_value(443); + } + } EXPECT_CALL(dispatcher_, isThreadSafe).WillRepeatedly(Return(true)); @@ -98,6 +106,34 @@ MATCHER_P(CustomDnsResolversSizeEquals, expected_resolvers, "") { return expected_resolvers.size() == arg.size(); } +TEST_F(DnsCacheImplTest, PreresolveSuccess) { + Network::DnsResolver::ResolveCb resolve_cb; + EXPECT_CALL(*resolver_, resolve("bar.baz.com", _, _)) + .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); + EXPECT_CALL( + update_callbacks_, + onDnsHostAddOrUpdate("bar.baz.com", DnsHostInfoEquals("10.0.0.1:443", "bar.baz.com", false))); + + initialize({"bar.baz.com"} /* preresolve_hostnames */); + + resolve_cb(Network::DnsResolver::ResolutionStatus::Success, + TestUtility::makeDnsResponse({"10.0.0.1"})); + checkStats(1 /* attempt */, 1 /* success */, 0 /* failure */, 1 /* address changed */, + 1 /* added */, 0 /* removed */, 1 /* num hosts */); + + MockLoadDnsCacheEntryCallbacks callbacks; + auto result = dns_cache_->loadDnsCacheEntry("bar.baz.com", 80, callbacks); + EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::InCache, result.status_); + EXPECT_EQ(result.handle_, nullptr); + EXPECT_NE(absl::nullopt, result.host_info_); +} + +TEST_F(DnsCacheImplTest, PreresolveFailure) { + EXPECT_THROW_WITH_MESSAGE( + initialize({"bar.baz.com"} /* preresolve_hostnames */, 0 /* max_hosts */), EnvoyException, + "DNS Cache [foo] configured with preresolve_hostnames=1 larger than max_hosts=0"); +} + // Basic successful resolution and then re-resolution. TEST_F(DnsCacheImplTest, ResolveSuccess) { initialize(); @@ -698,8 +734,7 @@ TEST_F(DnsCacheImplTest, InvalidPort) { // Max host overflow. TEST_F(DnsCacheImplTest, MaxHostOverflow) { - config_.mutable_max_hosts()->set_value(0); - initialize(); + initialize({} /* preresolve_hostnames */, 0 /* max_hosts */); InSequence s; MockLoadDnsCacheEntryCallbacks callbacks; diff --git a/test/extensions/common/wasm/wasm_test.cc b/test/extensions/common/wasm/wasm_test.cc index 1314503e3da0c..0d9d4e1b030cd 100644 --- a/test/extensions/common/wasm/wasm_test.cc +++ b/test/extensions/common/wasm/wasm_test.cc @@ -1418,7 +1418,8 @@ class WasmCommonContextTest } void setupContext() { - context_ = std::make_unique(wasm_->wasm().get(), root_context_->id(), plugin_); + context_ = + std::make_unique(wasm_->wasm().get(), root_context_->id(), plugin_handle_); context_->onCreate(); } diff --git a/test/extensions/filters/common/expr/context_test.cc b/test/extensions/filters/common/expr/context_test.cc index dc3e58b62bbb1..79475ef353c70 100644 --- a/test/extensions/filters/common/expr/context_test.cc +++ b/test/extensions/filters/common/expr/context_test.cc @@ -440,10 +440,10 @@ TEST(Context, ConnectionAttributes) { const std::string sni_name = "kittens.com"; info.downstream_address_provider_->setLocalAddress(local); info.downstream_address_provider_->setRemoteAddress(remote); + info.downstream_address_provider_->setRequestedServerName(sni_name); EXPECT_CALL(info, downstreamSslConnection()).WillRepeatedly(Return(downstream_ssl_info)); EXPECT_CALL(info, upstreamSslConnection()).WillRepeatedly(Return(upstream_ssl_info)); EXPECT_CALL(info, upstreamHost()).WillRepeatedly(Return(upstream_host)); - EXPECT_CALL(info, requestedServerName()).WillRepeatedly(ReturnRef(sni_name)); EXPECT_CALL(info, upstreamLocalAddress()).WillRepeatedly(ReturnRef(upstream_local_address)); const std::string upstream_transport_failure_reason = "ConnectionTermination"; EXPECT_CALL(info, upstreamTransportFailureReason()) diff --git a/test/extensions/filters/http/admission_control/admission_control_integration_test.cc b/test/extensions/filters/http/admission_control/admission_control_integration_test.cc index 11357d799626e..8fb07712f747b 100644 --- a/test/extensions/filters/http/admission_control/admission_control_integration_test.cc +++ b/test/extensions/filters/http/admission_control/admission_control_integration_test.cc @@ -8,6 +8,9 @@ namespace Envoy { namespace { +// For how this value was chosen, see https://github.com/envoyproxy/envoy/issues/17067. +constexpr double ALLOWED_ERROR = 0.10; + const std::string ADMISSION_CONTROL_CONFIG = R"EOF( name: envoy.filters.http.admission_control @@ -24,6 +27,10 @@ name: envoy.filters.http.admission_control default_value: value: 100.0 runtime_key: "foo.sr_threshold" + max_rejection_probability: + default_value: + value: 100.0 + runtime_key: "foo.mrp" enabled: default_value: true runtime_key: "foo.enabled" @@ -117,9 +124,9 @@ TEST_P(AdmissionControlIntegrationTest, HttpTest) { ++request_count; } - // Given the current throttling rate formula with an aggression of 1, it should result in a ~80% - // throttling rate (default max_rejection_probability). Allowing an error of 5%. - EXPECT_NEAR(throttle_count / request_count, 0.80, 0.05); + // Given the current throttling rate formula with an aggression of 2.0, it should result in a ~98% + // throttling rate. + EXPECT_NEAR(throttle_count / request_count, 0.98, ALLOWED_ERROR); // We now wait for the history to become stale. timeSystem().advanceTimeWait(std::chrono::seconds(120)); @@ -157,9 +164,9 @@ TEST_P(AdmissionControlIntegrationTest, GrpcTest) { ++request_count; } - // Given the current throttling rate formula with an aggression of 1, it should result in a ~80% - // throttling rate (default max_rejection_probability). Allowing an error of 5%. - EXPECT_NEAR(throttle_count / request_count, 0.80, 0.05); + // Given the current throttling rate formula with an aggression of 2.0, it should result in a ~98% + // throttling rate. + EXPECT_NEAR(throttle_count / request_count, 0.98, ALLOWED_ERROR); // We now wait for the history to become stale. timeSystem().advanceTimeWait(std::chrono::seconds(120)); diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_stats b/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_stats index 10704daac17bb..e5659cd8933ee 100644 --- a/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_stats +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_stats @@ -1,6 +1,5 @@ config { name: "envoy.filters.http.grpc_stats" - typed_config: {} } data { headers { @@ -44,4 +43,4 @@ upstream_data { value: "0" } } -} \ No newline at end of file +} diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_decode_encode b/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_decode_encode index d1a907e186fc1..237c455aa77db 100644 --- a/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_decode_encode +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_decode_encode @@ -1,6 +1,5 @@ config { name: "envoy.filters.http.grpc_json_transcoder" - typed_config: {} } data { headers { @@ -47,4 +46,4 @@ upstream_data { value: "0" } } -} \ No newline at end of file +} diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_http_data b/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_http_data index cf0e8282a0830..1ff5f4f54c3a6 100644 --- a/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_http_data +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_http_data @@ -1,6 +1,5 @@ config { name: "envoy.filters.http.grpc_json_transcoder" - typed_config: {} } data { @@ -21,4 +20,4 @@ data { http_body { data: "{\"theme\": \"Children\"}" } -} \ No newline at end of file +} diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_proto_data b/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_proto_data index 3adc75ba874e2..9f73cd049b5a8 100644 --- a/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_proto_data +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_proto_data @@ -1,6 +1,5 @@ config { name: "envoy.filters.http.grpc_json_transcoder" - typed_config: {} } data { @@ -35,4 +34,4 @@ data { value: "0" } } -} \ No newline at end of file +} diff --git a/test/extensions/filters/http/jwt_authn/BUILD b/test/extensions/filters/http/jwt_authn/BUILD index e224b781035a9..b2ecffb6f36de 100644 --- a/test/extensions/filters/http/jwt_authn/BUILD +++ b/test/extensions/filters/http/jwt_authn/BUILD @@ -112,6 +112,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.jwt_authn", deps = [ ":mock_lib", + "//source/common/common:base64_lib", "//source/extensions/filters/http/common:jwks_fetcher_lib", "//source/extensions/filters/http/jwt_authn:authenticator_lib", "//source/extensions/filters/http/jwt_authn:filter_config_lib", diff --git a/test/extensions/filters/http/jwt_authn/authenticator_test.cc b/test/extensions/filters/http/jwt_authn/authenticator_test.cc index f5cbb8a66e327..0168f1ce58aab 100644 --- a/test/extensions/filters/http/jwt_authn/authenticator_test.cc +++ b/test/extensions/filters/http/jwt_authn/authenticator_test.cc @@ -112,6 +112,23 @@ TEST_F(AuthenticatorTest, TestOkJWTandCache) { EXPECT_EQ(0U, filter_config_->stats().jwks_fetch_failed_.value()); } +TEST_F(AuthenticatorTest, TestCompletePaddingInJwtPayload) { + (*proto_config_.mutable_providers())[std::string(ProviderName)].set_pad_forward_payload_header( + true); + createAuthenticator(); + EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)) + .WillOnce(Invoke([this](const envoy::config::core::v3::HttpUri&, Tracing::Span&, + JwksFetcher::JwksReceiver& receiver) { + receiver.onJwksSuccess(std::move(jwks_)); + })); + + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer " + std::string(GoodToken)}}; + + expectVerifyStatus(Status::Ok, headers); + + EXPECT_EQ(headers.get_("sec-istio-auth-userinfo"), ExpectedPayloadValueWithPadding); +} + // This test verifies the Jwt is forwarded if "forward" flag is set. TEST_F(AuthenticatorTest, TestForwardJwt) { // Config forward_jwt flag diff --git a/test/extensions/filters/http/jwt_authn/test_common.h b/test/extensions/filters/http/jwt_authn/test_common.h index 0f2478ee2a73b..13c083163eb14 100644 --- a/test/extensions/filters/http/jwt_authn/test_common.h +++ b/test/extensions/filters/http/jwt_authn/test_common.h @@ -178,6 +178,11 @@ const char ExpectedPayloadValue[] = "eyJpc3MiOiJodHRwczovL2V4YW1wbGUuY29tIiwic3V "xlLmNvbSIsImV4cCI6MjAwMTAwMTAwMSwiYXVkIjoiZXhhbXBsZV9zZXJ2" "aWNlIn0"; +const char ExpectedPayloadValueWithPadding[] = + "eyJpc3MiOiJodHRwczovL2V4YW1wbGUuY29tIiwic3ViIjoidGVzdEBleGFtcG" + "xlLmNvbSIsImV4cCI6MjAwMTAwMTAwMSwiYXVkIjoiZXhhbXBsZV9zZXJ2" + "aWNlIn0="; + // Base64 decoded Payload JSON const char ExpectedPayloadJSON[] = R"( { diff --git a/test/extensions/filters/http/lua/lua_filter_test.cc b/test/extensions/filters/http/lua/lua_filter_test.cc index d1529ac2061e1..82d1623ff02cd 100644 --- a/test/extensions/filters/http/lua/lua_filter_test.cc +++ b/test/extensions/filters/http/lua/lua_filter_test.cc @@ -1794,8 +1794,8 @@ TEST_F(LuaHttpFilterTest, GetRequestedServerName) { setup(SCRIPT); EXPECT_CALL(decoder_callbacks_, streamInfo()).WillOnce(ReturnRef(stream_info_)); - std::string server_name = "foo.example.com"; - EXPECT_CALL(stream_info_, requestedServerName()).WillOnce(ReturnRef(server_name)); + absl::string_view server_name = "foo.example.com"; + stream_info_.downstream_address_provider_->setRequestedServerName(server_name); Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("foo.example.com"))); diff --git a/test/extensions/filters/http/lua/wrappers_test.cc b/test/extensions/filters/http/lua/wrappers_test.cc index ca10e89bf79b9..7b7b940149062 100644 --- a/test/extensions/filters/http/lua/wrappers_test.cc +++ b/test/extensions/filters/http/lua/wrappers_test.cc @@ -310,7 +310,7 @@ TEST_F(LuaStreamInfoWrapperTest, ReturnRequestedServerName) { setup(SCRIPT); NiceMock stream_info; - stream_info.requested_server_name_ = "some.sni.io"; + stream_info.downstream_address_provider_->setRequestedServerName("some.sni.io"); Filters::Common::Lua::LuaDeathRef wrapper( StreamInfoWrapper::create(coroutine_->luaState(), stream_info), true); EXPECT_CALL(printer_, testPrint("some.sni.io")); diff --git a/test/extensions/filters/http/wasm/config_test.cc b/test/extensions/filters/http/wasm/config_test.cc index 346b2ebac2930..5f6312fa02e20 100644 --- a/test/extensions/filters/http/wasm/config_test.cc +++ b/test/extensions/filters/http/wasm/config_test.cc @@ -128,15 +128,29 @@ TEST_P(WasmFilterConfigTest, YamlLoadFromFileWasm) { envoy::extensions::filters::http::wasm::v3::Wasm proto_config; TestUtility::loadFromYaml(yaml, proto_config); - WasmFilterConfig factory; - Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, "stats", context_); - EXPECT_CALL(init_watcher_, ready()); - context_.initManager().initialize(init_watcher_); - EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized); - Http::MockFilterChainFactoryCallbacks filter_callback; - EXPECT_CALL(filter_callback, addStreamFilter(_)); - EXPECT_CALL(filter_callback, addAccessLogHandler(_)); - cb(filter_callback); + + // Intentionally we scope the factory here, and make the context outlive it. + // This case happens when the config is updated by ECDS, and + // we have to make sure that contexts still hold valid WasmVMs in these cases. + std::shared_ptr context = nullptr; + { + WasmFilterConfig factory; + Http::FilterFactoryCb cb = + factory.createFilterFactoryFromProto(proto_config, "stats", context_); + EXPECT_CALL(init_watcher_, ready()); + context_.initManager().initialize(init_watcher_); + EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized); + Http::MockFilterChainFactoryCallbacks filter_callback; + EXPECT_CALL(filter_callback, addStreamFilter(_)) + .WillOnce([&context](Http::StreamFilterSharedPtr filter) { + context = std::static_pointer_cast(filter); + }); + EXPECT_CALL(filter_callback, addAccessLogHandler(_)); + cb(filter_callback); + } + // Check if the context still holds a valid Wasm even after the factory is destroyed. + EXPECT_TRUE(context); + EXPECT_TRUE(context->wasm()); } TEST_P(WasmFilterConfigTest, YamlLoadFromFileWasmFailOpenOk) { diff --git a/test/extensions/filters/http/wasm/wasm_filter_test.cc b/test/extensions/filters/http/wasm/wasm_filter_test.cc index 98d8c1c2671d7..737b1aa381c3e 100644 --- a/test/extensions/filters/http/wasm/wasm_filter_test.cc +++ b/test/extensions/filters/http/wasm/wasm_filter_test.cc @@ -31,7 +31,7 @@ namespace Wasm { using Envoy::Extensions::Common::Wasm::CreateContextFn; using Envoy::Extensions::Common::Wasm::Plugin; -using Envoy::Extensions::Common::Wasm::PluginSharedPtr; +using Envoy::Extensions::Common::Wasm::PluginHandleSharedPtr; using Envoy::Extensions::Common::Wasm::Wasm; using Envoy::Extensions::Common::Wasm::WasmHandleSharedPtr; using proxy_wasm::ContextBase; @@ -40,9 +40,8 @@ using WasmFilterConfig = envoy::extensions::filters::http::wasm::v3::Wasm; class TestFilter : public Envoy::Extensions::Common::Wasm::Context { public: - TestFilter(Wasm* wasm, uint32_t root_context_id, - Envoy::Extensions::Common::Wasm::PluginSharedPtr plugin) - : Envoy::Extensions::Common::Wasm::Context(wasm, root_context_id, plugin) {} + TestFilter(Wasm* wasm, uint32_t root_context_id, PluginHandleSharedPtr plugin_handle) + : Envoy::Extensions::Common::Wasm::Context(wasm, root_context_id, plugin_handle) {} MOCK_CONTEXT_LOG_; }; @@ -1682,7 +1681,7 @@ TEST_P(WasmHttpFilterTest, Property) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, true)); StreamInfo::MockStreamInfo log_stream_info; request_stream_info_.route_name_ = "route12"; - request_stream_info_.requested_server_name_ = "w3.org"; + request_stream_info_.downstream_address_provider_->setRequestedServerName("w3.org"); NiceMock connection; EXPECT_CALL(connection, id()).WillRepeatedly(Return(4)); EXPECT_CALL(encoder_callbacks_, connection()).WillRepeatedly(Return(&connection)); @@ -1838,15 +1837,16 @@ TEST_P(WasmHttpFilterTest, PanicOnRequestHeaders) { } setupTest("panic"); setupFilter(); - Http::MockStreamDecoderFilterCallbacks decoder_callbacks; - filter().setDecoderFilterCallbacks(decoder_callbacks); - auto headers = Http::TestResponseHeaderMapImpl{{":status", "503"}}; - EXPECT_CALL(decoder_callbacks, encodeHeaders_(HeaderMapEqualRef(&headers), true)); - EXPECT_CALL(decoder_callbacks, + EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&headers), true)); + + // In the case of VM failure, failStream is called for both request and response stream types, + // so we need to make sure that we don't send the local reply twice. + EXPECT_CALL(decoder_callbacks_, sendLocalReply(Envoy::Http::Code::ServiceUnavailable, testing::Eq(""), _, testing::Eq(Grpc::Status::WellKnownGrpcStatus::Unavailable), testing::Eq("wasm_fail_stream"))); + EXPECT_CALL(encoder_callbacks_, sendLocalReply(_, _, _, _, _)).Times(0); // Create in-VM context. filter().onCreate(); @@ -1860,15 +1860,16 @@ TEST_P(WasmHttpFilterTest, PanicOnRequestBody) { } setupTest("panic"); setupFilter(); - Http::MockStreamDecoderFilterCallbacks decoder_callbacks; - filter().setDecoderFilterCallbacks(decoder_callbacks); - auto headers = Http::TestResponseHeaderMapImpl{{":status", "503"}}; - EXPECT_CALL(decoder_callbacks, encodeHeaders_(HeaderMapEqualRef(&headers), true)); - EXPECT_CALL(decoder_callbacks, + + // In the case of VM failure, failStream is called for both request and response stream types, + // so we need to make sure that we don't send the local reply twice. + EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&headers), true)); + EXPECT_CALL(decoder_callbacks_, sendLocalReply(Envoy::Http::Code::ServiceUnavailable, testing::Eq(""), _, testing::Eq(Grpc::Status::WellKnownGrpcStatus::Unavailable), testing::Eq("wasm_fail_stream"))); + EXPECT_CALL(encoder_callbacks_, sendLocalReply(_, _, _, _, _)).Times(0); // Create in-VM context. filter().onCreate(); @@ -1881,15 +1882,16 @@ TEST_P(WasmHttpFilterTest, PanicOnRequestTrailers) { } setupTest("panic"); setupFilter(); - Http::MockStreamDecoderFilterCallbacks decoder_callbacks; - filter().setDecoderFilterCallbacks(decoder_callbacks); - auto headers = Http::TestResponseHeaderMapImpl{{":status", "503"}}; - EXPECT_CALL(decoder_callbacks, encodeHeaders_(HeaderMapEqualRef(&headers), true)); - EXPECT_CALL(decoder_callbacks, + EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&headers), true)); + + // In the case of VM failure, failStream is called for both request and response stream types, + // so we need to make sure that we don't send the local reply twice. + EXPECT_CALL(decoder_callbacks_, sendLocalReply(Envoy::Http::Code::ServiceUnavailable, testing::Eq(""), _, testing::Eq(Grpc::Status::WellKnownGrpcStatus::Unavailable), testing::Eq("wasm_fail_stream"))); + EXPECT_CALL(encoder_callbacks_, sendLocalReply(_, _, _, _, _)).Times(0); // Create in-VM context. filter().onCreate(); @@ -1902,12 +1904,14 @@ TEST_P(WasmHttpFilterTest, PanicOnResponseHeaders) { } setupTest("panic"); setupFilter(); - Http::MockStreamEncoderFilterCallbacks encoder_callbacks; - filter().setEncoderFilterCallbacks(encoder_callbacks); - EXPECT_CALL(encoder_callbacks, + + // In the case of VM failure, failStream is called for both request and response stream types, + // so we need to make sure that we don't send the local reply twice. + EXPECT_CALL(decoder_callbacks_, sendLocalReply(Envoy::Http::Code::ServiceUnavailable, testing::Eq(""), _, testing::Eq(Grpc::Status::WellKnownGrpcStatus::Unavailable), testing::Eq("wasm_fail_stream"))); + EXPECT_CALL(encoder_callbacks_, sendLocalReply(_, _, _, _, _)).Times(0); // Create in-VM context. filter().onCreate(); @@ -1921,12 +1925,14 @@ TEST_P(WasmHttpFilterTest, PanicOnResponseBody) { } setupTest("panic"); setupFilter(); - Http::MockStreamEncoderFilterCallbacks encoder_callbacks; - filter().setEncoderFilterCallbacks(encoder_callbacks); - EXPECT_CALL(encoder_callbacks, + + // In the case of VM failure, failStream is called for both request and response stream types, + // so we need to make sure that we don't send the local reply twice. + EXPECT_CALL(decoder_callbacks_, sendLocalReply(Envoy::Http::Code::ServiceUnavailable, testing::Eq(""), _, testing::Eq(Grpc::Status::WellKnownGrpcStatus::Unavailable), testing::Eq("wasm_fail_stream"))); + EXPECT_CALL(encoder_callbacks_, sendLocalReply(_, _, _, _, _)).Times(0); // Create in-VM context. filter().onCreate(); @@ -1939,12 +1945,14 @@ TEST_P(WasmHttpFilterTest, PanicOnResponseTrailers) { } setupTest("panic"); setupFilter(); - Http::MockStreamEncoderFilterCallbacks encoder_callbacks; - filter().setEncoderFilterCallbacks(encoder_callbacks); - EXPECT_CALL(encoder_callbacks, + + // In the case of VM failure, failStream is called for both request and response stream types, + // so we need to make sure that we don't send the local reply twice. + EXPECT_CALL(decoder_callbacks_, sendLocalReply(Envoy::Http::Code::ServiceUnavailable, testing::Eq(""), _, testing::Eq(Grpc::Status::WellKnownGrpcStatus::Unavailable), testing::Eq("wasm_fail_stream"))); + EXPECT_CALL(encoder_callbacks_, sendLocalReply(_, _, _, _, _)).Times(0); // Create in-VM context. filter().onCreate(); diff --git a/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.cc b/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.cc index 89db1e5e51401..62fc696512081 100644 --- a/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.cc +++ b/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.cc @@ -5,7 +5,7 @@ namespace Extensions { namespace ListenerFilters { void ListenerFilterFuzzer::fuzz( - Network::ListenerFilter& filter, + Network::ListenerFilterPtr filter, const test::extensions::filters::listener::FilterFuzzTestCase& input) { try { socket_.addressProvider().setLocalAddress( @@ -32,7 +32,7 @@ void ListenerFilterFuzzer::fuzz( testing::ReturnNew>())); } - filter.onAccept(cb_); + filter->onAccept(cb_); if (file_event_callback_ == nullptr) { // If filter does not call createFileEvent (i.e. original_dst and original_src) diff --git a/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.h b/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.h index fa98a6c552672..7caab46d77883 100644 --- a/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.h +++ b/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.h @@ -23,7 +23,7 @@ class ListenerFilterFuzzer { ON_CALL(Const(cb_), dynamicMetadata()).WillByDefault(testing::ReturnRef(metadata_)); } - void fuzz(Network::ListenerFilter& filter, + void fuzz(Network::ListenerFilterPtr filter, const test::extensions::filters::listener::FilterFuzzTestCase& input); private: diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_fuzz_test.cc b/test/extensions/filters/listener/http_inspector/http_inspector_fuzz_test.cc index 624475651551d..d8ea3d2b2b4d8 100644 --- a/test/extensions/filters/listener/http_inspector/http_inspector_fuzz_test.cc +++ b/test/extensions/filters/listener/http_inspector/http_inspector_fuzz_test.cc @@ -22,7 +22,7 @@ DEFINE_PROTO_FUZZER(const test::extensions::filters::listener::FilterFuzzTestCas auto filter = std::make_unique(cfg); ListenerFilterFuzzer fuzzer; - fuzzer.fuzz(*filter, input); + fuzzer.fuzz(std::move(filter), input); } } // namespace HttpInspector diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_test.cc b/test/extensions/filters/listener/http_inspector/http_inspector_test.cc index 8499c8e3428d4..1104855f41060 100644 --- a/test/extensions/filters/listener/http_inspector/http_inspector_test.cc +++ b/test/extensions/filters/listener/http_inspector/http_inspector_test.cc @@ -31,7 +31,20 @@ class HttpInspectorTest : public testing::Test { HttpInspectorTest() : cfg_(std::make_shared(store_)), io_handle_(std::make_unique(42)) {} - ~HttpInspectorTest() override { io_handle_->close(); } + ~HttpInspectorTest() override { + filter_.reset(); + EXPECT_CALL(dispatcher_, + createFileEvent_(_, _, Event::PlatformDefaultTriggerType, + Event::FileReadyType::Read | Event::FileReadyType::Closed)) + .WillOnce(ReturnNew>()); + // This is used to test the FileEvent was reset by the listener filters. + // Otherwise the assertion inside `initializeFileEvent` will be trigger. + io_handle_->initializeFileEvent( + dispatcher_, [](uint32_t) -> void {}, Event::PlatformDefaultTriggerType, + Event::FileReadyType::Read | Event::FileReadyType::Closed); + io_handle_->resetFileEvents(); + io_handle_->close(); + } void init(bool include_inline_recv = true) { filter_ = std::make_unique(cfg_); diff --git a/test/extensions/filters/listener/original_dst/original_dst_fuzz_test.cc b/test/extensions/filters/listener/original_dst/original_dst_fuzz_test.cc index 49ccc85a71e3a..f8d4368b79813 100644 --- a/test/extensions/filters/listener/original_dst/original_dst_fuzz_test.cc +++ b/test/extensions/filters/listener/original_dst/original_dst_fuzz_test.cc @@ -20,7 +20,7 @@ DEFINE_PROTO_FUZZER(const test::extensions::filters::listener::FilterFuzzTestCas auto filter = std::make_unique(envoy::config::core::v3::TrafficDirection::UNSPECIFIED); ListenerFilterFuzzer fuzzer; - fuzzer.fuzz(*filter, input); + fuzzer.fuzz(std::move(filter), input); } } // namespace OriginalDst diff --git a/test/extensions/filters/listener/original_src/original_src_fuzz_test.cc b/test/extensions/filters/listener/original_src/original_src_fuzz_test.cc index 2e3c8dc5a646a..2b2f70ee016a5 100644 --- a/test/extensions/filters/listener/original_src/original_src_fuzz_test.cc +++ b/test/extensions/filters/listener/original_src/original_src_fuzz_test.cc @@ -21,7 +21,7 @@ DEFINE_PROTO_FUZZER( Config config(input.config()); auto filter = std::make_unique(config); ListenerFilterFuzzer fuzzer; - fuzzer.fuzz(*filter, input.fuzzed()); + fuzzer.fuzz(std::move(filter), input.fuzzed()); } } // namespace OriginalSrc diff --git a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_fuzz_test.cc b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_fuzz_test.cc index 4e0cbced1409c..7416e92c8bba4 100644 --- a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_fuzz_test.cc +++ b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_fuzz_test.cc @@ -23,7 +23,7 @@ DEFINE_PROTO_FUZZER( auto filter = std::make_unique(std::move(cfg)); ListenerFilterFuzzer fuzzer; - fuzzer.fuzz(*filter, input.fuzzed()); + fuzzer.fuzz(std::move(filter), input.fuzzed()); } } // namespace ProxyProtocol diff --git a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc index ff3082ed5d042..b02c80e6b07c0 100644 --- a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc +++ b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc @@ -53,7 +53,7 @@ class ProxyProtocolTest : public testing::TestWithParam { public: ProxyProtocolTest() - : api_(Api::createApiForTest(stats_store_)), + : api_(Api::createApiForTest(stats_store_, time_system_)), dispatcher_(api_->allocateDispatcher("test_thread")), socket_(std::make_shared( Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true)), @@ -78,8 +78,10 @@ class ProxyProtocolTest : public testing::TestWithParamaddressProvider().remoteAddress()->ip()->addressAsString(), + "127.0.0.1"); + } else { + EXPECT_EQ(server_connection_->addressProvider().remoteAddress()->ip()->addressAsString(), + "::1"); + } + EXPECT_EQ(stats_store_.counter("downstream_cx_total").value(), 1); + disconnect(); +} + TEST_P(ProxyProtocolTest, V1Basic) { connect(); write("PROXY TCP4 1.2.3.4 253.253.253.253 65535 1234\r\nmore data"); diff --git a/test/extensions/filters/listener/tls_inspector/tls_inspector_fuzz_test.cc b/test/extensions/filters/listener/tls_inspector/tls_inspector_fuzz_test.cc index fc4b4f1ae2620..b722a2c50ec54 100644 --- a/test/extensions/filters/listener/tls_inspector/tls_inspector_fuzz_test.cc +++ b/test/extensions/filters/listener/tls_inspector/tls_inspector_fuzz_test.cc @@ -31,7 +31,7 @@ DEFINE_PROTO_FUZZER( auto filter = std::make_unique(std::move(cfg)); ListenerFilterFuzzer fuzzer; - fuzzer.fuzz(*filter, input.fuzzed()); + fuzzer.fuzz(std::move(filter), input.fuzzed()); } } // namespace TlsInspector diff --git a/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc b/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc index 6e119c87dcd92..bb152489a569b 100644 --- a/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc +++ b/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc @@ -32,7 +32,20 @@ class TlsInspectorTest : public testing::TestWithParam(store_)), io_handle_(std::make_unique(42)) {} - ~TlsInspectorTest() override { io_handle_->close(); } + ~TlsInspectorTest() override { + filter_.reset(); + EXPECT_CALL(dispatcher_, + createFileEvent_(_, _, Event::PlatformDefaultTriggerType, + Event::FileReadyType::Read | Event::FileReadyType::Closed)) + .WillOnce(ReturnNew>()); + // This is used to test the FileEvent was reset by the listener filters. + // Otherwise the assertion inside `initializeFileEvent` will be trigger. + io_handle_->initializeFileEvent( + dispatcher_, [](uint32_t) -> void {}, Event::PlatformDefaultTriggerType, + Event::FileReadyType::Read | Event::FileReadyType::Closed); + io_handle_->resetFileEvents(); + io_handle_->close(); + } void init() { filter_ = std::make_unique(cfg_); diff --git a/test/extensions/filters/network/http_connection_manager/config_test.cc b/test/extensions/filters/network/http_connection_manager/config_test.cc index 2477db6bf4fc2..4bdd11bd6695b 100644 --- a/test/extensions/filters/network/http_connection_manager/config_test.cc +++ b/test/extensions/filters/network/http_connection_manager/config_test.cc @@ -1409,7 +1409,6 @@ stat_prefix: my_stat_prefix cluster: fake_cluster http_filters: - name: encoder-decoder-buffer-filter - typed_config: {} access_log: - name: accesslog typed_config: @@ -1439,7 +1438,6 @@ stat_prefix: my_stat_prefix cluster: fake_cluster http_filters: - name: encoder-decoder-buffer-filter - typed_config: {} access_log: - name: accesslog typed_config: @@ -1479,7 +1477,6 @@ stat_prefix: my_stat_prefix cluster: fake_cluster http_filters: - name: envoy.filters.http.router - typed_config: {} http2_protocol_options: hpack_table_size: 1024 custom_settings_parameters: { identifier: 3, value: 2048 } @@ -1508,7 +1505,6 @@ stat_prefix: my_stat_prefix cluster: fake_cluster http_filters: - name: encoder-decoder-buffer-filter - typed_config: {} http2_protocol_options: hpack_table_size: 2048 max_concurrent_streams: 4096 @@ -1542,7 +1538,6 @@ stat_prefix: my_stat_prefix cluster: fake_cluster http_filters: - name: envoy.filters.http.router - typed_config: {} http2_protocol_options: custom_settings_parameters: - { identifier: 8, value: 0 } @@ -1566,7 +1561,6 @@ stat_prefix: my_stat_prefix cluster: fake_cluster http_filters: - name: envoy.filters.http.router - typed_config: {} http2_protocol_options: allow_connect: true )EOF"; @@ -1590,7 +1584,6 @@ stat_prefix: my_stat_prefix cluster: fake_cluster http_filters: - name: encoder-decoder-buffer-filter - typed_config: {} http2_protocol_options: custom_settings_parameters: { identifier: 2, value: 1 } )EOF"; @@ -1615,7 +1608,6 @@ stat_prefix: my_stat_prefix cluster: fake_cluster http_filters: - name: encoder-decoder-buffer-filter - typed_config: {} http2_protocol_options: hpack_table_size: 2048 max_concurrent_streams: 4096 @@ -1648,7 +1640,6 @@ stat_prefix: my_stat_prefix cluster: fake_cluster http_filters: - name: envoy.filters.http.router - typed_config: {} http2_protocol_options: custom_settings_parameters: - { identifier: 10, value: 0 } diff --git a/test/extensions/filters/network/postgres_proxy/BUILD b/test/extensions/filters/network/postgres_proxy/BUILD index f121e6b178e2e..10a2680e00f95 100644 --- a/test/extensions/filters/network/postgres_proxy/BUILD +++ b/test/extensions/filters/network/postgres_proxy/BUILD @@ -19,6 +19,7 @@ envoy_extension_cc_test_library( extension_name = "envoy.filters.network.postgres_proxy", deps = [ "//source/common/buffer:buffer_lib", + "//source/extensions/filters/network/postgres_proxy:filter", ], ) diff --git a/test/extensions/filters/network/postgres_proxy/postgres_decoder_test.cc b/test/extensions/filters/network/postgres_proxy/postgres_decoder_test.cc index cc7f65cdcd035..3c6e05bf9f1fd 100644 --- a/test/extensions/filters/network/postgres_proxy/postgres_decoder_test.cc +++ b/test/extensions/filters/network/postgres_proxy/postgres_decoder_test.cc @@ -33,7 +33,7 @@ class PostgresProxyDecoderTestBase { PostgresProxyDecoderTestBase() { decoder_ = std::make_unique(&callbacks_); decoder_->initialize(); - decoder_->setStartup(false); + decoder_->state(DecoderImpl::State::InSyncState); } protected: @@ -60,6 +60,10 @@ class PostgresProxyFrontendEncrDecoderTest : public PostgresProxyDecoderTestBase class PostgresProxyBackendDecoderTest : public PostgresProxyDecoderTestBase, public ::testing::TestWithParam {}; +class PostgresProxyBackendStatementTest + : public PostgresProxyDecoderTestBase, + public ::testing::TestWithParam> {}; + class PostgresProxyErrorTest : public PostgresProxyDecoderTestBase, public ::testing::TestWithParam> {}; @@ -75,7 +79,7 @@ class PostgresProxyNoticeTest // startup message the server should start using message format // with command as 1st byte. TEST_F(PostgresProxyDecoderTest, StartupMessage) { - decoder_->setStartup(true); + decoder_->state(DecoderImpl::State::InitState); buf_[0] = '\0'; // Startup message has the following structure: @@ -98,29 +102,25 @@ TEST_F(PostgresProxyDecoderTest, StartupMessage) { // Some other attribute data_.add("attribute"); // 9 bytes data_.add(buf_, 1); + ASSERT_THAT(decoder_->onData(data_, true), Decoder::Result::NeedMoreData); data_.add("blah"); // 4 bytes + ASSERT_THAT(decoder_->onData(data_, true), Decoder::Result::NeedMoreData); data_.add(buf_, 1); - decoder_->onData(data_, true); + ASSERT_THAT(decoder_->onData(data_, true), Decoder::Result::ReadyForNext); ASSERT_THAT(data_.length(), 0); + // Decoder should move to InSyncState + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); // Verify parsing attributes ASSERT_THAT(decoder_->getAttributes().at("user"), "postgres"); ASSERT_THAT(decoder_->getAttributes().at("database"), "testdb"); // This attribute should not be found ASSERT_THAT(decoder_->getAttributes().find("no"), decoder_->getAttributes().end()); - - // Now feed normal message with 1bytes as command. - data_.add("P"); - // Add length. - data_.writeBEInt(6); // 4 bytes of length + 2 bytes of data. - data_.add("AB"); - decoder_->onData(data_, true); - ASSERT_THAT(data_.length(), 0); } // Test verifies that when Startup message does not carry // "database" attribute, it is derived from "user". TEST_F(PostgresProxyDecoderTest, StartupMessageNoAttr) { - decoder_->setStartup(true); + decoder_->state(DecoderImpl::State::InitState); buf_[0] = '\0'; // Startup message has the following structure: @@ -141,7 +141,8 @@ TEST_F(PostgresProxyDecoderTest, StartupMessageNoAttr) { data_.add(buf_, 1); data_.add("blah"); // 4 bytes data_.add(buf_, 1); - decoder_->onData(data_, true); + ASSERT_THAT(decoder_->onData(data_, true), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); ASSERT_THAT(data_.length(), 0); // Verify parsing attributes @@ -151,53 +152,109 @@ TEST_F(PostgresProxyDecoderTest, StartupMessageNoAttr) { ASSERT_THAT(decoder_->getAttributes().find("no"), decoder_->getAttributes().end()); } +TEST_F(PostgresProxyDecoderTest, InvalidStartupMessage) { + decoder_->state(DecoderImpl::State::InitState); + + // Create a bogus message with incorrect syntax. + // Length is 10 bytes. + data_.writeBEInt(10); + for (auto i = 0; i < 6; i++) { + data_.writeBEInt(i); + } + + // Decoder should move to OutOfSync state. + ASSERT_THAT(decoder_->onData(data_, true), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::OutOfSyncState); + ASSERT_THAT(data_.length(), 0); + + // All-zeros message. + data_.writeBEInt(0); + for (auto i = 0; i < 6; i++) { + data_.writeBEInt(0); + } + + // Decoder should move to OutOfSync state. + ASSERT_THAT(decoder_->onData(data_, true), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::OutOfSyncState); + ASSERT_THAT(data_.length(), 0); +} + +// Test that decoder does not crash when it receives +// random data in InitState. +TEST_F(PostgresProxyDecoderTest, StartupMessageRandomData) { + srand(time(nullptr)); + for (auto i = 0; i < 10000; i++) { + decoder_->state(DecoderImpl::State::InSyncState); + // Generate random length. + uint32_t len = rand() % 20000; + // Now fill the buffer with random data. + for (uint32_t j = 0; j < len; j++) { + data_.writeBEInt(rand() % 1024); + uint8_t data = static_cast(rand() % 256); + data_.writeBEInt(data); + } + // Feed the buffer to the decoder. It should not crash. + decoder_->onData(data_, true); + + // Reset the buffer for the next iteration. + data_.drain(data_.length()); + } +} + // Test processing messages which map 1:1 with buffer. // The buffer contains just a single entire message and // nothing more. TEST_F(PostgresProxyDecoderTest, ReadingBufferSingleMessages) { - + decoder_->state(DecoderImpl::State::InSyncState); // Feed empty buffer - should not crash. - decoder_->onData(data_, true); + ASSERT_THAT(decoder_->onData(data_, true), Decoder::Result::NeedMoreData); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); // Put one byte. This is not enough to parse the message and that byte // should stay in the buffer. - data_.add("P"); - decoder_->onData(data_, true); + data_.add("H"); + ASSERT_THAT(decoder_->onData(data_, true), Decoder::Result::NeedMoreData); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); ASSERT_THAT(data_.length(), 1); // Add length of 4 bytes. It would mean completely empty message. // but it should be consumed. data_.writeBEInt(4); - decoder_->onData(data_, true); + ASSERT_THAT(decoder_->onData(data_, true), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); ASSERT_THAT(data_.length(), 0); // Create a message with 5 additional bytes. - data_.add("P"); + data_.add("d"); // Add length. data_.writeBEInt(9); // 4 bytes of length field + 5 of data. data_.add(buf_, 5); - decoder_->onData(data_, true); + ASSERT_THAT(decoder_->onData(data_, true), Decoder::Result::ReadyForNext); ASSERT_THAT(data_.length(), 0); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); } // Test simulates situation when decoder is called with incomplete message. // The message should not be processed until the buffer is filled // with missing bytes. TEST_F(PostgresProxyDecoderTest, ReadingBufferLargeMessages) { + decoder_->state(DecoderImpl::State::InSyncState); // Fill the buffer with message of 100 bytes long // but the buffer contains only 98 bytes. // It should not be processed. - data_.add("P"); + data_.add("d"); // Add length. data_.writeBEInt(100); // This also includes length field data_.add(buf_, 94); - decoder_->onData(data_, true); + ASSERT_THAT(decoder_->onData(data_, true), Decoder::Result::NeedMoreData); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); // The buffer contains command (1 byte), length (4 bytes) and 94 bytes of message. ASSERT_THAT(data_.length(), 99); // Add 2 missing bytes and feed again to decoder. data_.add("AB"); - decoder_->onData(data_, true); + ASSERT_THAT(decoder_->onData(data_, true), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); ASSERT_THAT(data_.length(), 0); } @@ -205,14 +262,15 @@ TEST_F(PostgresProxyDecoderTest, ReadingBufferLargeMessages) { // message. Call to the decoder should consume only one message // at a time and only when the buffer contains the entire message. TEST_F(PostgresProxyDecoderTest, TwoMessagesInOneBuffer) { + decoder_->state(DecoderImpl::State::InSyncState); // Create the first message of 50 bytes long (+1 for command). - data_.add("P"); + data_.add("d"); // Add length. data_.writeBEInt(50); data_.add(buf_, 46); // Create the second message of 50 + 46 bytes (+1 for command). - data_.add("P"); + data_.add("d"); // Add length. data_.writeBEInt(96); data_.add(buf_, 46); @@ -223,49 +281,72 @@ TEST_F(PostgresProxyDecoderTest, TwoMessagesInOneBuffer) { // 2nd: command (1 byte), length (4 bytes), 92 bytes of data ASSERT_THAT(data_.length(), 148); // Process the first message. - decoder_->onData(data_, true); + ASSERT_THAT(decoder_->onData(data_, true), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); ASSERT_THAT(data_.length(), 97); // Process the second message. - decoder_->onData(data_, true); + ASSERT_THAT(decoder_->onData(data_, true), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); ASSERT_THAT(data_.length(), 0); } TEST_F(PostgresProxyDecoderTest, Unknown) { + decoder_->state(DecoderImpl::State::InSyncState); // Create invalid message. The first byte is invalid "=" // Message must be at least 5 bytes to be parsed. EXPECT_CALL(callbacks_, incMessagesUnknown()); createPostgresMsg(data_, "=", "some not important string which will be ignored anyways"); - decoder_->onData(data_, true); + ASSERT_THAT(decoder_->onData(data_, true), Decoder::Result::ReadyForNext); + ASSERT_THAT(data_.length(), 0); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); +} + +// Test verifies that decoder goes into OutOfSyncState when +// it encounters a message with wrong syntax. +TEST_F(PostgresProxyDecoderTest, IncorrectMessages) { + decoder_->state(DecoderImpl::State::InSyncState); + + // Create incorrect message. Message syntax is + // 1 byte type ('f'), 4 bytes of length and zero terminated string. + data_.add("f"); + data_.writeBEInt(8); + // Do not write terminating zero for the string. + data_.add("test"); + + // The decoder will indicate that is is ready for more data, but + // will enter OutOfSyncState. + ASSERT_THAT(decoder_->onData(data_, true), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::OutOfSyncState); } -// Test if each frontend command calls incMessagesFrontend() method. -TEST_P(PostgresProxyFrontendDecoderTest, FrontendInc) { +// Test if frontend command calls incMessagesFrontend() method. +TEST_F(PostgresProxyFrontendDecoderTest, FrontendInc) { + decoder_->state(DecoderImpl::State::InSyncState); EXPECT_CALL(callbacks_, incMessagesFrontend()); - createPostgresMsg(data_, GetParam(), "SELECT 1;"); - decoder_->onData(data_, true); + createPostgresMsg(data_, "f", "some text"); + ASSERT_THAT(decoder_->onData(data_, true), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); // Make sure that decoder releases memory used during message processing. ASSERT_TRUE(decoder_->getMessage().empty()); } -// Run the above test for each frontend message. -INSTANTIATE_TEST_SUITE_P(FrontEndMessagesTests, PostgresProxyFrontendDecoderTest, - ::testing::Values("B", "C", "d", "c", "f", "D", "E", "H", "F", "p", "P", - "p", "Q", "S", "X")); - // Test if X message triggers incRollback and sets proper state in transaction. TEST_F(PostgresProxyFrontendDecoderTest, TerminateMessage) { + decoder_->state(DecoderImpl::State::InSyncState); // Set decoder state NOT to be in_transaction. decoder_->getSession().setInTransaction(false); EXPECT_CALL(callbacks_, incTransactionsRollback()).Times(0); createPostgresMsg(data_, "X"); - decoder_->onData(data_, true); + ASSERT_THAT(decoder_->onData(data_, true), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); // Now set the decoder to be in_transaction state. decoder_->getSession().setInTransaction(true); EXPECT_CALL(callbacks_, incTransactionsRollback()); createPostgresMsg(data_, "X"); - decoder_->onData(data_, true); + ASSERT_THAT(decoder_->onData(data_, true), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); ASSERT_FALSE(decoder_->getSession().inTransaction()); } @@ -273,7 +354,8 @@ TEST_F(PostgresProxyFrontendDecoderTest, TerminateMessage) { TEST_F(PostgresProxyFrontendDecoderTest, QueryMessage) { EXPECT_CALL(callbacks_, processQuery); createPostgresMsg(data_, "Q", "SELECT * FROM whatever;"); - decoder_->onData(data_, true); + ASSERT_THAT(decoder_->onData(data_, true), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); } // Parse message has optional Query name which may be in front of actual @@ -295,7 +377,8 @@ TEST_F(PostgresProxyFrontendDecoderTest, ParseMessage) { query_name.reserve(1); query_name += '\0'; createPostgresMsg(data_, "P", query_name + query + query_params); - decoder_->onData(data_, true); + ASSERT_THAT(decoder_->onData(data_, true), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); // Message with optional name query_name query_name.clear(); @@ -303,21 +386,18 @@ TEST_F(PostgresProxyFrontendDecoderTest, ParseMessage) { query_name += "P0_8"; query_name += '\0'; createPostgresMsg(data_, "P", query_name + query + query_params); - decoder_->onData(data_, true); + ASSERT_THAT(decoder_->onData(data_, true), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); } -// Test if each backend command calls incMessagesBackend()) method. -TEST_P(PostgresProxyBackendDecoderTest, BackendInc) { +// Test if backend command calls incMessagesBackend()) method. +TEST_F(PostgresProxyBackendDecoderTest, BackendInc) { EXPECT_CALL(callbacks_, incMessagesBackend()); - createPostgresMsg(data_, GetParam(), "Some not important message"); - decoder_->onData(data_, false); + createPostgresMsg(data_, "I"); + ASSERT_THAT(decoder_->onData(data_, false), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); } -// Run the above test for each backend message. -INSTANTIATE_TEST_SUITE_P(BackendMessagesTests, PostgresProxyBackendDecoderTest, - ::testing::Values("R", "K", "2", "3", "C", "d", "c", "G", "H", "D", "I", - "E", "V", "v", "n", "N", "A", "t", "S", "1", "s", "Z", - "T")); // Test parsing backend messages. // The parser should react only to the first word until the space. TEST_F(PostgresProxyBackendDecoderTest, ParseStatement) { @@ -325,80 +405,93 @@ TEST_F(PostgresProxyBackendDecoderTest, ParseStatement) { // Rollback counter should be bumped up. EXPECT_CALL(callbacks_, incTransactionsRollback()); createPostgresMsg(data_, "C", "ROLLBACK 123"); - decoder_->onData(data_, false); + ASSERT_THAT(decoder_->onData(data_, false), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); data_.drain(data_.length()); // Now try just keyword without a space at the end. EXPECT_CALL(callbacks_, incTransactionsRollback()); createPostgresMsg(data_, "C", "ROLLBACK"); - decoder_->onData(data_, false); + ASSERT_THAT(decoder_->onData(data_, false), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); data_.drain(data_.length()); // Partial message should be ignored. EXPECT_CALL(callbacks_, incTransactionsRollback()).Times(0); EXPECT_CALL(callbacks_, incStatements(DecoderCallbacks::StatementType::Other)); createPostgresMsg(data_, "C", "ROLL"); - decoder_->onData(data_, false); + ASSERT_THAT(decoder_->onData(data_, false), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); data_.drain(data_.length()); // Keyword without a space should be ignored. EXPECT_CALL(callbacks_, incTransactionsRollback()).Times(0); EXPECT_CALL(callbacks_, incStatements(DecoderCallbacks::StatementType::Other)); createPostgresMsg(data_, "C", "ROLLBACK123"); - decoder_->onData(data_, false); + ASSERT_THAT(decoder_->onData(data_, false), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); data_.drain(data_.length()); } // Test Backend messages and make sure that they // trigger proper stats updates. TEST_F(PostgresProxyDecoderTest, Backend) { + decoder_->state(DecoderImpl::State::InSyncState); // C message EXPECT_CALL(callbacks_, incStatements(DecoderCallbacks::StatementType::Other)); createPostgresMsg(data_, "C", "BEGIN 123"); - decoder_->onData(data_, false); - data_.drain(data_.length()); + ASSERT_THAT(decoder_->onData(data_, false), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); + ASSERT_THAT(data_.length(), 0); ASSERT_TRUE(decoder_->getSession().inTransaction()); EXPECT_CALL(callbacks_, incStatements(DecoderCallbacks::StatementType::Other)); createPostgresMsg(data_, "C", "START TR"); - decoder_->onData(data_, false); - data_.drain(data_.length()); + ASSERT_THAT(decoder_->onData(data_, false), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); + ASSERT_THAT(data_.length(), 0); EXPECT_CALL(callbacks_, incStatements(DecoderCallbacks::StatementType::Other)); EXPECT_CALL(callbacks_, incTransactionsCommit()); createPostgresMsg(data_, "C", "COMMIT"); - decoder_->onData(data_, false); - data_.drain(data_.length()); + ASSERT_THAT(decoder_->onData(data_, false), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); + ASSERT_THAT(data_.length(), 0); EXPECT_CALL(callbacks_, incStatements(DecoderCallbacks::StatementType::Select)); EXPECT_CALL(callbacks_, incTransactionsCommit()); createPostgresMsg(data_, "C", "SELECT"); - decoder_->onData(data_, false); - data_.drain(data_.length()); + ASSERT_THAT(decoder_->onData(data_, false), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); + ASSERT_THAT(data_.length(), 0); EXPECT_CALL(callbacks_, incStatements(DecoderCallbacks::StatementType::Other)); EXPECT_CALL(callbacks_, incTransactionsRollback()); createPostgresMsg(data_, "C", "ROLLBACK"); - decoder_->onData(data_, false); - data_.drain(data_.length()); + ASSERT_THAT(decoder_->onData(data_, false), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); + ASSERT_THAT(data_.length(), 0); EXPECT_CALL(callbacks_, incStatements(DecoderCallbacks::StatementType::Insert)); EXPECT_CALL(callbacks_, incTransactionsCommit()); createPostgresMsg(data_, "C", "INSERT 1"); - decoder_->onData(data_, false); - data_.drain(data_.length()); + ASSERT_THAT(decoder_->onData(data_, false), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); + ASSERT_THAT(data_.length(), 0); EXPECT_CALL(callbacks_, incStatements(DecoderCallbacks::StatementType::Update)); EXPECT_CALL(callbacks_, incTransactionsCommit()); createPostgresMsg(data_, "C", "UPDATE 123"); - decoder_->onData(data_, false); - data_.drain(data_.length()); + ASSERT_THAT(decoder_->onData(data_, false), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); + ASSERT_THAT(data_.length(), 0); EXPECT_CALL(callbacks_, incStatements(DecoderCallbacks::StatementType::Delete)); EXPECT_CALL(callbacks_, incTransactionsCommit()); createPostgresMsg(data_, "C", "DELETE 88"); - decoder_->onData(data_, false); - data_.drain(data_.length()); + ASSERT_THAT(decoder_->onData(data_, false), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); + ASSERT_THAT(data_.length(), 0); } // Test checks deep inspection of the R message. @@ -412,7 +505,8 @@ TEST_F(PostgresProxyBackendDecoderTest, AuthenticationMsg) { // sessions must not be increased. EXPECT_CALL(callbacks_, incSessionsUnencrypted()).Times(0); createPostgresMsg(data_, "R", "blah blah"); - decoder_->onData(data_, false); + ASSERT_THAT(decoder_->onData(data_, false), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); data_.drain(data_.length()); // Create the correct payload which means that @@ -423,7 +517,8 @@ TEST_F(PostgresProxyBackendDecoderTest, AuthenticationMsg) { data_.writeBEInt(8); // Add 4-byte code. data_.writeBEInt(0); - decoder_->onData(data_, false); + ASSERT_THAT(decoder_->onData(data_, false), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); data_.drain(data_.length()); } @@ -432,7 +527,8 @@ TEST_F(PostgresProxyBackendDecoderTest, AuthenticationMsg) { TEST_P(PostgresProxyErrorTest, ParseErrorMsgs) { EXPECT_CALL(callbacks_, incErrors(std::get<1>(GetParam()))); createPostgresMsg(data_, "E", std::get<0>(GetParam())); - decoder_->onData(data_, false); + ASSERT_THAT(decoder_->onData(data_, false), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); } INSTANTIATE_TEST_SUITE_P( @@ -461,7 +557,8 @@ INSTANTIATE_TEST_SUITE_P( TEST_P(PostgresProxyNoticeTest, ParseNoticeMsgs) { EXPECT_CALL(callbacks_, incNotices(std::get<1>(GetParam()))); createPostgresMsg(data_, "N", std::get<0>(GetParam())); - decoder_->onData(data_, false); + ASSERT_THAT(decoder_->onData(data_, false), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); } INSTANTIATE_TEST_SUITE_P( @@ -478,10 +575,10 @@ INSTANTIATE_TEST_SUITE_P( // that protocol uses encryption. TEST_P(PostgresProxyFrontendEncrDecoderTest, EncyptedTraffic) { // Set decoder to wait for initial message. - decoder_->setStartup(true); + decoder_->state(DecoderImpl::State::InitState); // Initial state is no-encryption. - ASSERT_FALSE(decoder_->encrypted()); + // ASSERT_FALSE(decoder_->encrypted()); // Indicate that decoder should continue with processing the message. ON_CALL(callbacks_, onSSLRequest).WillByDefault(testing::Return(true)); @@ -493,8 +590,11 @@ TEST_P(PostgresProxyFrontendEncrDecoderTest, EncyptedTraffic) { // 1234 in the most significant 16 bits, and some code in the least significant 16 bits. // Add 4 bytes long code data_.writeBEInt(GetParam()); - decoder_->onData(data_, true); - ASSERT_TRUE(decoder_->encrypted()); + // Decoder should indicate that it is ready for mode data and entered + // encrypted state. + ASSERT_THAT(decoder_->onData(data_, false), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::EncryptedState); + // ASSERT_TRUE(decoder_->encrypted()); // Decoder should drain data. ASSERT_THAT(data_.length(), 0); @@ -503,7 +603,8 @@ TEST_P(PostgresProxyFrontendEncrDecoderTest, EncyptedTraffic) { EXPECT_CALL(callbacks_, incMessagesFrontend()).Times(0); createPostgresMsg(data_, "P", "Some message just to fill the payload."); - decoder_->onData(data_, true); + ASSERT_THAT(decoder_->onData(data_, false), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::EncryptedState); // Decoder should drain data. ASSERT_THAT(data_.length(), 0); } @@ -517,7 +618,7 @@ INSTANTIATE_TEST_SUITE_P(FrontendEncryptedMessagesTests, PostgresProxyFrontendEn // Test onSSLRequest callback. TEST_F(PostgresProxyDecoderTest, TerminateSSL) { // Set decoder to wait for initial message. - decoder_->setStartup(true); + decoder_->state(DecoderImpl::State::InitState); // Indicate that decoder should not continue with processing the message // because filter will try to terminate SSL session. @@ -528,9 +629,10 @@ TEST_F(PostgresProxyDecoderTest, TerminateSSL) { // 1234 in the most significant 16 bits, and some code in the least significant 16 bits. // Add 4 bytes long code data_.writeBEInt(80877103); - decoder_->onData(data_, true); + ASSERT_THAT(decoder_->onData(data_, false), Decoder::Result::Stopped); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InitState); - // Decoder should interpret the session as encrypted stream. + // Decoder should interpret the session as clear-text stream. ASSERT_FALSE(decoder_->encrypted()); } @@ -569,11 +671,10 @@ class FakeBuffer : public Buffer::Instance { // Test verifies that decoder calls Buffer::linearize method // for messages which have associated 'action'. TEST_F(PostgresProxyDecoderTest, Linearize) { + decoder_->state(DecoderImpl::State::InSyncState); testing::NiceMock fake_buf; uint8_t body[] = "test\0"; - decoder_->setStartup(false); - // Simulate that decoder reads message which needs processing. // Query 'Q' message's body is just string. // Message header is 5 bytes and body will contain string "test\0". @@ -600,7 +701,8 @@ TEST_F(PostgresProxyDecoderTest, Linearize) { // It should call "Buffer::linearize". EXPECT_CALL(fake_buf, linearize).WillOnce([&](uint32_t) -> void* { return body; }); - decoder_->onData(fake_buf, false); + ASSERT_THAT(decoder_->onData(fake_buf, false), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); // Simulate that decoder reads message which does not need processing. // BindComplete message has type '2' and empty body. @@ -622,7 +724,8 @@ TEST_F(PostgresProxyDecoderTest, Linearize) { // Make sure that decoder does not call linearize. EXPECT_CALL(fake_buf, linearize).Times(0); - decoder_->onData(fake_buf, false); + ASSERT_THAT(decoder_->onData(fake_buf, false), Decoder::Result::ReadyForNext); + ASSERT_THAT(decoder_->state(), DecoderImpl::State::InSyncState); } } // namespace PostgresProxy diff --git a/test/extensions/filters/network/postgres_proxy/postgres_filter_test.cc b/test/extensions/filters/network/postgres_proxy/postgres_filter_test.cc index 1214dc4c661e3..6564883727602 100644 --- a/test/extensions/filters/network/postgres_proxy/postgres_filter_test.cc +++ b/test/extensions/filters/network/postgres_proxy/postgres_filter_test.cc @@ -84,7 +84,7 @@ TEST_P(PostgresFilterTest, ReadData) { EXPECT_CALL(*decoderPtr, onData) .WillOnce(WithArgs<0, 1>(Invoke([](Buffer::Instance& data, bool) -> Decoder::Result { data.drain(data.length()); - return Decoder::ReadyForNext; + return Decoder::Result::ReadyForNext; }))); std::get<0>(GetParam())(filter_.get(), data_, false); ASSERT_THAT(std::get<1>(GetParam())(filter_.get()), 0); @@ -93,11 +93,11 @@ TEST_P(PostgresFilterTest, ReadData) { EXPECT_CALL(*decoderPtr, onData) .WillOnce(WithArgs<0, 1>(Invoke([](Buffer::Instance& data, bool) -> Decoder::Result { data.drain(100); - return Decoder::ReadyForNext; + return Decoder::Result::ReadyForNext; }))) .WillOnce(WithArgs<0, 1>(Invoke([](Buffer::Instance& data, bool) -> Decoder::Result { data.drain(156); - return Decoder::ReadyForNext; + return Decoder::Result::ReadyForNext; }))); std::get<0>(GetParam())(filter_.get(), data_, false); ASSERT_THAT(std::get<1>(GetParam())(filter_.get()), 0); @@ -108,15 +108,15 @@ TEST_P(PostgresFilterTest, ReadData) { EXPECT_CALL(*decoderPtr, onData) .WillOnce(WithArgs<0, 1>(Invoke([](Buffer::Instance& data, bool) -> Decoder::Result { data.drain(100); - return Decoder::ReadyForNext; + return Decoder::Result::ReadyForNext; }))) .WillOnce(WithArgs<0, 1>(Invoke([](Buffer::Instance& data, bool) -> Decoder::Result { data.drain(100); - return Decoder::ReadyForNext; + return Decoder::Result::ReadyForNext; }))) .WillOnce(WithArgs<0, 1>(Invoke([](Buffer::Instance& data, bool) -> Decoder::Result { data.drain(0); - return Decoder::NeedMoreData; + return Decoder::Result::NeedMoreData; }))); std::get<0>(GetParam())(filter_.get(), data_, false); ASSERT_THAT(std::get<1>(GetParam())(filter_.get()), 56); @@ -135,7 +135,7 @@ INSTANTIATE_TEST_SUITE_P(ProcessDataTests, PostgresFilterTest, // It expects that certain statistics are updated. TEST_F(PostgresFilterTest, BackendMsgsStats) { // pretend that startup message has been received. - static_cast(filter_->getDecoder())->setStartup(false); + static_cast(filter_->getDecoder())->state(DecoderImpl::State::InSyncState); // unknown message createPostgresMsg(data_, "=", "blah blah blah"); @@ -230,7 +230,7 @@ TEST_F(PostgresFilterTest, BackendMsgsStats) { // verifies that statistic counters are increased. TEST_F(PostgresFilterTest, ErrorMsgsStats) { // Pretend that startup message has been received. - static_cast(filter_->getDecoder())->setStartup(false); + static_cast(filter_->getDecoder())->state(DecoderImpl::State::InSyncState); createPostgresMsg(data_, "E", "SERRORVERRORC22012"); filter_->onWrite(data_, false); @@ -257,7 +257,7 @@ TEST_F(PostgresFilterTest, ErrorMsgsStats) { // that corresponding stats counters are updated. TEST_F(PostgresFilterTest, NoticeMsgsStats) { // Pretend that startup message has been received. - static_cast(filter_->getDecoder())->setStartup(false); + static_cast(filter_->getDecoder())->state(DecoderImpl::State::InSyncState); createPostgresMsg(data_, "N", "SblalalaC2345"); filter_->onWrite(data_, false); @@ -304,7 +304,7 @@ TEST_F(PostgresFilterTest, EncryptedSessionStats) { // Postgres metadata. TEST_F(PostgresFilterTest, MetadataIncorrectSQL) { // Pretend that startup message has been received. - static_cast(filter_->getDecoder())->setStartup(false); + static_cast(filter_->getDecoder())->state(DecoderImpl::State::InSyncState); setMetadata(); createPostgresMsg(data_, "Q", "BLAH blah blah"); @@ -322,7 +322,7 @@ TEST_F(PostgresFilterTest, MetadataIncorrectSQL) { // and it happens only when parse_sql flag is true. TEST_F(PostgresFilterTest, QueryMessageMetadata) { // Pretend that startup message has been received. - static_cast(filter_->getDecoder())->setStartup(false); + static_cast(filter_->getDecoder())->state(DecoderImpl::State::InSyncState); setMetadata(); // Disable creating parsing SQL and creating metadata. diff --git a/test/extensions/filters/network/postgres_proxy/postgres_message_test.cc b/test/extensions/filters/network/postgres_proxy/postgres_message_test.cc index ec7b8e1b713ba..9fb2d5c89277d 100644 --- a/test/extensions/filters/network/postgres_proxy/postgres_message_test.cc +++ b/test/extensions/filters/network/postgres_proxy/postgres_message_test.cc @@ -28,7 +28,17 @@ TYPED_TEST_SUITE(IntTest, IntTypes); TYPED_TEST(IntTest, BasicRead) { this->data_.template writeBEInt().get())>(12); uint64_t pos = 0; - uint64_t left = this->data_.length(); + uint64_t left; + // Simulate that message is too short. + left = sizeof(TypeParam) - 1; + ASSERT_THAT(Message::ValidationFailed, this->field_.validate(this->data_, 0, pos, left)); + // Single 4-byte int. Message length is correct. + left = sizeof(TypeParam); + ASSERT_THAT(Message::ValidationOK, this->field_.validate(this->data_, 0, pos, left)); + + // Read the value after successful validation. + pos = 0; + left = sizeof(TypeParam); ASSERT_TRUE(this->field_.read(this->data_, pos, left)); ASSERT_THAT(this->field_.toString(), "[12]"); @@ -46,6 +56,10 @@ TYPED_TEST(IntTest, ReadWithLeftovers) { this->data_.template writeBEInt(11); uint64_t pos = 0; uint64_t left = this->data_.length(); + ASSERT_THAT(Message::ValidationOK, this->field_.validate(this->data_, 0, pos, left)); + + pos = 0; + left = this->data_.length(); ASSERT_TRUE(this->field_.read(this->data_, pos, left)); ASSERT_THAT(this->field_.toString(), "[12]"); // pos should be moved forward by the number of bytes read. @@ -59,8 +73,13 @@ TYPED_TEST(IntTest, ReadAtOffset) { // write 1 byte before the actual value. this->data_.template writeBEInt(11); this->data_.template writeBEInt().get())>(12); + uint64_t pos = 1; uint64_t left = this->data_.length() - 1; + ASSERT_THAT(Message::ValidationOK, this->field_.validate(this->data_, 1, pos, left)); + + pos = 1; + left = this->data_.length() - 1; ASSERT_TRUE(this->field_.read(this->data_, pos, left)); ASSERT_THAT(this->field_.toString(), "[12]"); // pos should be moved forward by the number of bytes read. @@ -73,8 +92,9 @@ TYPED_TEST(IntTest, NotEnoughData) { this->data_.template writeBEInt().get())>(12); // Start from offset 1. There is not enough data in the buffer for the required type. uint64_t pos = 1; - uint64_t left = this->data_.length() - pos; - ASSERT_FALSE(this->field_.read(this->data_, pos, left)); + uint64_t left = this->data_.length(); + + ASSERT_THAT(this->field_.validate(this->data_, 0, pos, left), Message::ValidationNeedMoreData); } // Byte1 should format content as char. @@ -86,6 +106,12 @@ TEST(Byte1, Formatting) { uint64_t pos = 0; uint64_t left = 1; + ASSERT_THAT(Message::ValidationOK, field.validate(data, 0, pos, left)); + ASSERT_THAT(pos, 1); + ASSERT_THAT(left, 0); + + pos = 0; + left = 1; ASSERT_TRUE(field.read(data, pos, left)); ASSERT_THAT(pos, 1); ASSERT_THAT(left, 0); @@ -99,9 +125,21 @@ TEST(StringType, SingleString) { Buffer::OwnedImpl data; data.add("test"); - data.writeBEInt(0); + // Passed length 3 is too short. uint64_t pos = 0; - uint64_t left = 5; + uint64_t left = 3; + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationFailed); + // Correct length, but terminating zero is missing. + left = 5; + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationNeedMoreData); + // Add terminating zero. + data.writeBEInt(0); + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationOK); + ASSERT_THAT(pos, 5); + ASSERT_THAT(left, 0); + + pos = 0; + left = 5; ASSERT_TRUE(field.read(data, pos, left)); ASSERT_THAT(pos, 5); ASSERT_THAT(left, 0); @@ -110,42 +148,6 @@ TEST(StringType, SingleString) { ASSERT_THAT(out, "[test]"); } -TEST(StringType, MultipleStrings) { - String field; - - // Add 3 strings. - Buffer::OwnedImpl data; - data.add("test1"); - data.writeBEInt(0); - data.add("test2"); - data.writeBEInt(0); - data.add("test3"); - data.writeBEInt(0); - uint64_t pos = 0; - uint64_t left = 3 * 6; - - // Read the first string. - ASSERT_TRUE(field.read(data, pos, left)); - ASSERT_THAT(pos, 1 * 6); - ASSERT_THAT(left, 2 * 6); - auto out = field.toString(); - ASSERT_THAT(out, "[test1]"); - - // Read the second string. - ASSERT_TRUE(field.read(data, pos, left)); - ASSERT_THAT(pos, 2 * 6); - ASSERT_THAT(left, 1 * 6); - out = field.toString(); - ASSERT_THAT(out, "[test2]"); - - // Read the third string. - ASSERT_TRUE(field.read(data, pos, left)); - ASSERT_THAT(pos, 3 * 6); - ASSERT_THAT(left, 0); - out = field.toString(); - ASSERT_THAT(out, "[test3]"); -} - TEST(StringType, NoTerminatingByte) { String field; @@ -153,7 +155,9 @@ TEST(StringType, NoTerminatingByte) { data.add("test"); uint64_t pos = 0; uint64_t left = 4; - ASSERT_FALSE(field.read(data, pos, left)); + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationFailed); + left = 5; + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationNeedMoreData); } // ByteN type is always placed at the end of Postgres message. @@ -169,10 +173,31 @@ TEST(ByteN, BasicTest) { data.writeBEInt(i); } uint64_t pos = 0; - uint64_t left = 10; + uint64_t left; + + // Since ByteN structure does not contain length field, any + // value less than number of bytes in the buffer should + // pass validation. + pos = 0; + left = 0; + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationOK); + ASSERT_THAT(pos, 0); + ASSERT_THAT(left, 0); + pos = 0; + left = 1; + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationOK); + ASSERT_THAT(pos, 1); + ASSERT_THAT(left, 0); + pos = 0; + left = 4; + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationOK); + ASSERT_THAT(pos, 4); + ASSERT_THAT(left, 0); + + pos = 0; + left = 10; ASSERT_TRUE(field.read(data, pos, left)); ASSERT_THAT(pos, 10); - // One byte should be left in the buffer. ASSERT_THAT(left, 0); auto out = field.toString(); @@ -189,7 +214,7 @@ TEST(ByteN, NotEnoughData) { } uint64_t pos = 0; uint64_t left = 11; - ASSERT_FALSE(field.read(data, pos, left)); + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationNeedMoreData); } TEST(ByteN, Empty) { @@ -199,6 +224,7 @@ TEST(ByteN, Empty) { // Write nothing to data buffer. uint64_t pos = 0; uint64_t left = 0; + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationOK); ASSERT_TRUE(field.read(data, pos, left)); auto out = field.toString(); @@ -208,12 +234,49 @@ TEST(ByteN, Empty) { // VarByteN type. It contains 4 bytes length field with value which follows. TEST(VarByteN, BasicTest) { VarByteN field; - Buffer::OwnedImpl data; + + uint64_t pos = 0; + uint64_t left = 0; + // Simulate that message ended and VarByteN's length fields sticks past the + // message boundary. + data.writeBEInt(5); + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationFailed); + // Write VarByteN with length equal to zero. No value follows. - data.writeBEInt(0); + // Set structure length to be -1 (means no payload). + left = 4; + data.drain(data.length()); + data.writeBEInt(-1); + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationOK); + // The same for structure length 0. + pos = 0; + left = 4; + data.drain(data.length()); + data.writeBEInt(0); + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationOK); + + // Simulate that VarByteN would extend past message boundary. + data.drain(data.length()); + data.writeBEInt(30); + pos = 0; + left = 4; + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationFailed); + + // Simulate that VarByteN length is 6, there are 6 bytes left to the + // message boundary, but buffer contains only 4 bytes. + data.drain(data.length()); + data.writeBEInt(6); + data.writeBEInt(16); + pos = 0; + left = 6; + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationNeedMoreData); + + data.drain(data.length()); + // Write first value. + data.writeBEInt(0); - // Write value with 5 bytes. + // Write 2nd value with 5 bytes. data.writeBEInt(5); for (auto i = 0; i < 5; i++) { data.writeBEInt(10 + i); @@ -222,11 +285,15 @@ TEST(VarByteN, BasicTest) { // Write special case value with length -1. No value follows. data.writeBEInt(-1); - uint64_t pos = 0; - uint64_t left = 4 + 4 + 5 + 4; + pos = 0; + left = 4 + 4 + 5 + 4; uint64_t expected_left = left; - + uint64_t orig_pos = pos; + uint64_t orig_left = left; // Read the first value. + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationOK); + pos = orig_pos; + left = orig_left; ASSERT_TRUE(field.read(data, pos, left)); ASSERT_THAT(pos, 4); expected_left -= 4; @@ -235,6 +302,11 @@ TEST(VarByteN, BasicTest) { ASSERT_TRUE(out.find("0 bytes") != std::string::npos); // Read the second value. + orig_pos = pos; + orig_left = left; + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationOK); + pos = orig_pos; + left = orig_left; ASSERT_TRUE(field.read(data, pos, left)); ASSERT_THAT(pos, 4 + 4 + 5); expected_left -= (4 + 5); @@ -244,6 +316,11 @@ TEST(VarByteN, BasicTest) { ASSERT_TRUE(out.find("10 11 12 13 14") != std::string::npos); // Read the third value. + orig_pos = pos; + orig_left = left; + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationOK); + pos = orig_pos; + left = orig_left; ASSERT_TRUE(field.read(data, pos, left)); ASSERT_THAT(pos, 4 + 4 + 5 + 4); expected_left -= 4; @@ -252,47 +329,31 @@ TEST(VarByteN, BasicTest) { ASSERT_TRUE(out.find("-1 bytes") != std::string::npos); } -TEST(VarByteN, NotEnoughLengthData) { - VarByteN field; - - Buffer::OwnedImpl data; - // Write 3 bytes. Minimum for this type is 4 bytes of length. - data.writeBEInt(0); - data.writeBEInt(1); - data.writeBEInt(2); - - uint64_t pos = 0; - uint64_t left = 3; - ASSERT_FALSE(field.read(data, pos, left)); -} - -TEST(VarByteN, NotEnoughValueData) { - VarByteN field; - - Buffer::OwnedImpl data; - // Write length of the value to be 5 bytes, but supply only 4 bytes. - data.writeBEInt(5); - data.writeBEInt(0); - data.writeBEInt(1); - data.writeBEInt(2); - data.writeBEInt(3); - - uint64_t pos = 0; - uint64_t left = 5 + 4; - ASSERT_FALSE(field.read(data, pos, left)); -} - // Array composite type tests. TEST(Array, SingleInt) { Array field; Buffer::OwnedImpl data; - // Write the number of elements in the array. - data.writeBEInt(1); + // Simulate that message ends before the array. + uint64_t pos = 0; + uint64_t left = 1; + data.writeBEInt(1); + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationFailed); + + // Write the value of the element into the array. + data.drain(data.length()); + data.writeBEInt(1); data.writeBEInt(123); + // Simulate that message length end before end of array. + left = 5; + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationFailed); - uint64_t pos = 0; - uint64_t left = 2 + 4; + left = 6; + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationOK); + ASSERT_THAT(pos, 6); + ASSERT_THAT(left, 0); + pos = 0; + left = 6; ASSERT_TRUE(field.read(data, pos, left)); ASSERT_THAT(pos, 6); ASSERT_THAT(left, 0); @@ -306,14 +367,29 @@ TEST(Array, MultipleInts) { Array field; Buffer::OwnedImpl data; - // Write 3 elements into array. + // Write 3 as size of array, but add only 2 elements into array. data.writeBEInt(3); data.writeBEInt(211); data.writeBEInt(212); - data.writeBEInt(213); uint64_t pos = 0; uint64_t left = 2 + 3 * 1; + + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationNeedMoreData); + + // Add the third element. + data.writeBEInt(213); + + // Simulate that message ends before end of the array. + left = 2 + 3 * 1 - 1; + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationFailed); + + left = 2 + 3 * 1; + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationOK); + ASSERT_THAT(pos, 5); + ASSERT_THAT(left, 0); + pos = 0; + left = 2 + 3 * 1; ASSERT_TRUE(field.read(data, pos, left)); ASSERT_THAT(pos, 5); ASSERT_THAT(left, 0); @@ -334,6 +410,11 @@ TEST(Array, Empty) { uint64_t pos = 0; uint64_t left = 2; + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationOK); + ASSERT_THAT(pos, 2); + ASSERT_THAT(left, 0); + pos = 0; + left = 2; ASSERT_TRUE(field.read(data, pos, left)); ASSERT_THAT(pos, 2); ASSERT_THAT(left, 0); @@ -352,7 +433,7 @@ TEST(Array, NotEnoughDataForLength) { uint64_t pos = 0; uint64_t left = 1; - ASSERT_FALSE(field.read(data, pos, left)); + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationFailed); } // Test situation when there is not enough data in the buffer to read one of the elements @@ -370,7 +451,7 @@ TEST(Array, NotEnoughDataForValues) { uint64_t pos = 0; uint64_t left = 2 + 4 + 2; - ASSERT_FALSE(field.read(data, pos, left)); + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationFailed); } // Repeated composite type tests. @@ -382,15 +463,37 @@ TEST(Repeated, BasicTestWithStrings) { // It will be ignored. data.writeBEInt(101); data.writeBEInt(102); - // Now write 3 strings. Each terminated by zero byte. + uint64_t pos = 5; + uint64_t left = 5; + // Write the first string without terminating zero. data.add("test1"); - data.writeBEInt(0); + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationFailed); + left = 6; + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationNeedMoreData); + // Add terminating zero. + data.writeBEInt(0); + left = 5; + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationFailed); + left = 7; + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationNeedMoreData); + left = 6; + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationOK); + // Add two additional strings data.add("test2"); data.writeBEInt(0); data.add("test3"); data.writeBEInt(0); - uint64_t pos = 5; - uint64_t left = 3 * 6; + pos = 5; + left = 3 * 6 - 1; + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationFailed); + left = 3 * 6 + 1; + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationNeedMoreData); + left = 3 * 6; + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationOK); + ASSERT_THAT(pos, 5 + 3 * 6); + ASSERT_THAT(left, 0); + pos = 5; + left = 3 * 6; ASSERT_TRUE(field.read(data, pos, left)); ASSERT_THAT(pos, 5 + 3 * 6); ASSERT_THAT(left, 0); @@ -401,46 +504,6 @@ TEST(Repeated, BasicTestWithStrings) { ASSERT_TRUE(out.find("test3") != std::string::npos); } -// Test verifies that read fails when there is less -// bytes in the buffer than bytes needed to read to the end of the message. -TEST(Repeated, NotEnoughData) { - Repeated field; - - Buffer::OwnedImpl data; - // Write some data to simulate message header. - // It will be ignored. - data.writeBEInt(101); - data.writeBEInt(102); - data.add("test"); - - // "test" with terminating zero is 5 bytes. - // Set "left" to indicate that 6 bytes are needed. - uint64_t pos = 5; - uint64_t left = 5 + 6; - ASSERT_FALSE(field.read(data, pos, left)); -} - -// Test verifies that entire read fails when one of -// subordinate reads fails. -TEST(Repeated, NotEnoughDataForSecondString) { - Repeated field; - - Buffer::OwnedImpl data; - // Write some data to simulate message header. - // It will be ignored. - data.writeBEInt(101); - data.writeBEInt(102); - // Now write 3 strings. Each terminated by zero byte. - data.add("test1"); - data.writeBEInt(0); - data.add("test2"); - // Do not write terminating zero. - // Read should fail here. - uint64_t pos = 5; - uint64_t left = 6 + 5; - ASSERT_FALSE(field.read(data, pos, left)); -} - // Sequence composite type tests. TEST(Sequence, Int32SingleValue) { Sequence field; @@ -450,6 +513,11 @@ TEST(Sequence, Int32SingleValue) { uint64_t pos = 0; uint64_t left = 4; + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationOK); + ASSERT_THAT(pos, 4); + ASSERT_THAT(left, 0); + pos = 0; + left = 4; ASSERT_TRUE(field.read(data, pos, left)); ASSERT_THAT(pos, 4); ASSERT_THAT(left, 0); @@ -466,6 +534,11 @@ TEST(Sequence, Int16SingleValue) { uint64_t pos = 0; uint64_t left = 2; + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationOK); + ASSERT_THAT(pos, 2); + ASSERT_THAT(left, 0); + pos = 0; + left = 2; ASSERT_TRUE(field.read(data, pos, left)); ASSERT_THAT(pos, 2); ASSERT_THAT(left, 0); @@ -484,6 +557,11 @@ TEST(Sequence, BasicMultipleValues1) { uint64_t pos = 0; uint64_t left = 4 + 5; + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationOK); + ASSERT_THAT(pos, 4 + 5); + ASSERT_THAT(left, 0); + pos = 0; + left = 4 + 5; ASSERT_TRUE(field.read(data, pos, left)); ASSERT_THAT(pos, 4 + 5); ASSERT_THAT(left, 0); @@ -503,6 +581,11 @@ TEST(Sequence, BasicMultipleValues2) { uint64_t pos = 0; uint64_t left = 4 + 2; uint64_t expected_pos = left; + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationOK); + ASSERT_THAT(pos, expected_pos); + ASSERT_THAT(left, 0); + pos = 0; + left = 4 + 2; ASSERT_TRUE(field.read(data, pos, left)); ASSERT_THAT(pos, expected_pos); ASSERT_THAT(left, 0); @@ -524,6 +607,11 @@ TEST(Sequence, BasicMultipleValues3) { uint64_t pos = 0; uint64_t left = 4 + 2 + 4 + 2; uint64_t expected_pos = left; + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationOK); + ASSERT_THAT(pos, expected_pos); + ASSERT_THAT(left, 0); + pos = 0; + left = 4 + 2 + 4 + 2; ASSERT_TRUE(field.read(data, pos, left)); ASSERT_THAT(pos, expected_pos); ASSERT_THAT(left, 0); @@ -547,7 +635,7 @@ TEST(Sequence, NotEnoughData) { uint64_t pos = 0; uint64_t left = 4 + 4; - ASSERT_FALSE(field.read(data, pos, left)); + ASSERT_THAT(field.validate(data, 0, pos, left), Message::ValidationFailed); } // Tests for Message interface and helper function createMsgBodyReader. @@ -555,7 +643,16 @@ TEST(PostgresMessage, SingleFieldInt32) { std::unique_ptr msg = createMsgBodyReader(); Buffer::OwnedImpl data; + // Validation of empty message should complain that there + // is not enough data in the buffer. + ASSERT_THAT(msg->validate(data, 0, 4), Message::ValidationNeedMoreData); + data.writeBEInt(12); + + // Simulate that message is longer than In32. + ASSERT_THAT(msg->validate(data, 0, 5), Message::ValidationFailed); + + ASSERT_THAT(msg->validate(data, 0, 4), Message::ValidationOK); ASSERT_TRUE(msg->read(data, 4)); auto out = msg->toString(); ASSERT_THAT(out, "[12]"); @@ -565,7 +662,13 @@ TEST(PostgresMessage, SingleFieldInt16) { std::unique_ptr msg = createMsgBodyReader(); Buffer::OwnedImpl data; + + // Validation of empty message should complain that there + // is not enough data in the buffer. + ASSERT_THAT(msg->validate(data, 0, 2), Message::ValidationNeedMoreData); + data.writeBEInt(12); + ASSERT_THAT(msg->validate(data, 0, 2), Message::ValidationOK); ASSERT_TRUE(msg->read(data, 2)); auto out = msg->toString(); ASSERT_THAT(out, "[12]"); @@ -575,12 +678,18 @@ TEST(PostgresMessage, SingleByteN) { std::unique_ptr msg = createMsgBodyReader(); Buffer::OwnedImpl data; + // Validation of empty message should complain that there + // is not enough data in the buffer. + ASSERT_THAT(msg->validate(data, 0, 4), Message::ValidationNeedMoreData); + data.writeBEInt(0); data.writeBEInt(1); data.writeBEInt(2); data.writeBEInt(3); data.writeBEInt(4); - ASSERT_TRUE(msg->read(data, 5 * 1)); + const uint64_t length = 5 * 1; + ASSERT_THAT(msg->validate(data, 0, length), Message::ValidationOK); + ASSERT_TRUE(msg->read(data, length)); auto out = msg->toString(); ASSERT_TRUE(out.find("0") != std::string::npos); // NOLINT ASSERT_TRUE(out.find("1") != std::string::npos); // NOLINT @@ -593,9 +702,16 @@ TEST(PostgresMessage, MultipleValues1) { std::unique_ptr msg = createMsgBodyReader(); Buffer::OwnedImpl data; + + // Validation of empty message should complain that there + // is not enough data in the buffer. + ASSERT_THAT(msg->validate(data, 0, 4), Message::ValidationNeedMoreData); + data.writeBEInt(12); data.writeBEInt(13); - ASSERT_TRUE(msg->read(data, 4 + 2)); + const uint64_t length = 4 + 2; + ASSERT_THAT(msg->validate(data, 0, length), Message::ValidationOK); + ASSERT_TRUE(msg->read(data, length)); auto out = msg->toString(); ASSERT_TRUE(out.find("12") != std::string::npos); ASSERT_TRUE(out.find("13") != std::string::npos); @@ -608,7 +724,9 @@ TEST(PostgresMessage, MultipleValues2) { data.writeBEInt(13); data.writeBEInt(14); data.writeBEInt(15); - ASSERT_TRUE(msg->read(data, 2 + 4 + 2)); + const uint64_t length = 2 + 4 + 2; + ASSERT_THAT(msg->validate(data, 0, length), Message::ValidationOK); + ASSERT_TRUE(msg->read(data, length)); auto out = msg->toString(); ASSERT_TRUE(out.find("13") != std::string::npos); ASSERT_TRUE(out.find("14") != std::string::npos); @@ -623,7 +741,9 @@ TEST(PostgresMessage, MultipleValues3) { data.writeBEInt(13); data.writeBEInt(14); data.writeBEInt(15); - ASSERT_TRUE(msg->read(data, 4 + 2 + 4 + 2)); + const uint64_t length = 4 + 2 + 4 + 2; + ASSERT_THAT(msg->validate(data, 0, length), Message::ValidationOK); + ASSERT_TRUE(msg->read(data, length)); auto out = msg->toString(); ASSERT_TRUE(out.find("12") != std::string::npos); ASSERT_TRUE(out.find("13") != std::string::npos); @@ -640,7 +760,9 @@ TEST(PostgresMessage, MultipleValues4) { data.writeBEInt(15); data.writeBEInt(16); data.writeBEInt(17); - ASSERT_TRUE(msg->read(data, 2 + 4 + 2 + 4 + 2)); + const uint64_t length = 2 + 4 + 2 + 4 + 2; + ASSERT_THAT(msg->validate(data, 0, length), Message::ValidationOK); + ASSERT_TRUE(msg->read(data, length)); auto out = msg->toString(); ASSERT_TRUE(out.find("13") != std::string::npos); ASSERT_TRUE(out.find("14") != std::string::npos); @@ -659,7 +781,9 @@ TEST(PostgresMessage, MultipleValues5) { data.writeBEInt(15); data.writeBEInt(16); data.writeBEInt(17); - ASSERT_TRUE(msg->read(data, 4 + 2 + 4 + 2 + 4 + 2)); + const uint64_t length = 4 + 2 + 4 + 2 + 4 + 2; + ASSERT_THAT(msg->validate(data, 0, length), Message::ValidationOK); + ASSERT_TRUE(msg->read(data, length)); auto out = msg->toString(); ASSERT_TRUE(out.find("12") != std::string::npos); ASSERT_TRUE(out.find("13") != std::string::npos); @@ -682,7 +806,9 @@ TEST(PostgresMessage, MultipleValues6) { data.writeBEInt(15); data.writeBEInt(16); data.writeBEInt(17); - ASSERT_TRUE(msg->read(data, 5 + 4 + 2 + 4 + 2 + 4 + 2)); + const uint64_t length = 5 + 4 + 2 + 4 + 2 + 4 + 2; + ASSERT_THAT(msg->validate(data, 0, length), Message::ValidationOK); + ASSERT_TRUE(msg->read(data, length)); auto out = msg->toString(); ASSERT_TRUE(out.find("test") != std::string::npos); ASSERT_TRUE(out.find("12") != std::string::npos); @@ -705,7 +831,9 @@ TEST(PostgresMessage, MultipleValues7) { data.writeBEInt(13); data.writeBEInt(14); data.writeBEInt(15); - ASSERT_TRUE(msg->read(data, 5 + 2 + 3 * 4)); + const uint64_t length = 5 + 2 + 3 * 4; + ASSERT_THAT(msg->validate(data, 0, length), Message::ValidationOK); + ASSERT_TRUE(msg->read(data, length)); auto out = msg->toString(); ASSERT_TRUE(out.find("test") != std::string::npos); ASSERT_TRUE(out.find("13") != std::string::npos); @@ -722,7 +850,9 @@ TEST(PostgresMessage, ArraySet1) { data.writeBEInt(13); data.writeBEInt(14); data.writeBEInt(15); - ASSERT_TRUE(msg->read(data, 2 + 3 * 2)); + const uint64_t length = 2 + 3 * 2; + ASSERT_THAT(msg->validate(data, 0, length), Message::ValidationOK); + ASSERT_TRUE(msg->read(data, length)); auto out = msg->toString(); ASSERT_TRUE(out.find("13") != std::string::npos); ASSERT_TRUE(out.find("14") != std::string::npos); @@ -745,8 +875,9 @@ TEST(PostgresMessage, ArraySet2) { // 16-bits value. data.writeBEInt(115); - - ASSERT_TRUE(msg->read(data, 2 + 4 + 5 + 2)); + const uint64_t length = 2 + 4 + 5 + 2; + ASSERT_THAT(msg->validate(data, 0, length), Message::ValidationOK); + ASSERT_TRUE(msg->read(data, length)); auto out = msg->toString(); ASSERT_TRUE(out.find("114") != std::string::npos); ASSERT_TRUE(out.find("115") != std::string::npos); @@ -774,8 +905,9 @@ TEST(PostgresMessage, ArraySet3) { // 16-bits value. data.writeBEInt(115); - - ASSERT_TRUE(msg->read(data, 2 + 3 * 2 + 2 + 4 + 5 + 2)); + const uint64_t length = 2 + 3 * 2 + 2 + 4 + 5 + 2; + ASSERT_THAT(msg->validate(data, 0, length), Message::ValidationOK); + ASSERT_TRUE(msg->read(data, length)); auto out = msg->toString(); ASSERT_TRUE(out.find("13") != std::string::npos); ASSERT_TRUE(out.find("115") != std::string::npos); @@ -799,8 +931,9 @@ TEST(PostgresMessage, ArraySet4) { data.writeBEInt(2); data.writeBEInt(113); data.writeBEInt(114); - - ASSERT_TRUE(msg->read(data, 2 + 4 + 5 + 2 + 2 * 2)); + const uint64_t length = 2 + 4 + 5 + 2 + 2 * 2; + ASSERT_THAT(msg->validate(data, 0, length), Message::ValidationOK); + ASSERT_TRUE(msg->read(data, length)); auto out = msg->toString(); ASSERT_TRUE(out.find("111") != std::string::npos); ASSERT_TRUE(out.find("114") != std::string::npos); @@ -830,8 +963,9 @@ TEST(PostgresMessage, ArraySet5) { data.writeBEInt(2); data.writeBEInt(113); data.writeBEInt(114); - - ASSERT_TRUE(msg->read(data, 2 + 3 * 2 + 2 + 4 + 5 + 2 + 2 * 2)); + const uint64_t length = 2 + 3 * 2 + 2 + 4 + 5 + 2 + 2 * 2; + ASSERT_THAT(msg->validate(data, 0, length), Message::ValidationOK); + ASSERT_TRUE(msg->read(data, length)); auto out = msg->toString(); ASSERT_TRUE(out.find("13") != std::string::npos); ASSERT_TRUE(out.find("114") != std::string::npos); @@ -867,7 +1001,9 @@ TEST(PostgresMessage, ArraySet6) { data.writeBEInt(113); data.writeBEInt(114); - ASSERT_TRUE(msg->read(data, 5 + 2 + 3 * 2 + 2 + 4 + 5 + 2 + 2 * 2)); + const uint64_t length = 5 + 2 + 3 * 2 + 2 + 4 + 5 + 2 + 2 * 2; + ASSERT_THAT(msg->validate(data, 0, length), Message::ValidationOK); + ASSERT_TRUE(msg->read(data, length)); auto out = msg->toString(); ASSERT_TRUE(out.find("test") != std::string::npos); ASSERT_TRUE(out.find("13") != std::string::npos); @@ -886,7 +1022,9 @@ TEST(PostgresMessage, Repeated1) { data.add("test3"); data.writeBEInt(0); - ASSERT_TRUE(msg->read(data, 3 * 6)); + const uint64_t length = 3 * 6; + ASSERT_THAT(msg->validate(data, 0, length), Message::ValidationOK); + ASSERT_TRUE(msg->read(data, length)); auto out = msg->toString(); ASSERT_TRUE(out.find("test1") != std::string::npos); ASSERT_TRUE(out.find("test2") != std::string::npos); @@ -906,7 +1044,9 @@ TEST(PostgresMessage, Repeated2) { data.add("test3"); data.writeBEInt(0); - ASSERT_TRUE(msg->read(data, 4 + 3 * 6)); + const uint64_t length = 4 + 3 * 6; + ASSERT_THAT(msg->validate(data, 0, length), Message::ValidationOK); + ASSERT_TRUE(msg->read(data, length)); auto out = msg->toString(); ASSERT_TRUE(out.find("115") != std::string::npos); ASSERT_TRUE(out.find("test1") != std::string::npos); @@ -922,7 +1062,27 @@ TEST(PostgresMessage, NotEnoughData) { data.writeBEInt(1); data.writeBEInt(2); - ASSERT_FALSE(msg->read(data, 3)); + ASSERT_THAT(msg->validate(data, 0, 4), Message::ValidationNeedMoreData); + ASSERT_THAT(msg->validate(data, 0, 2), Message::ValidationFailed); +} + +// Test checks validating a properly formatted message +// which starts at some offset in data buffer. +TEST(PostgresMessage, ValidateFromOffset) { + std::unique_ptr msg = createMsgBodyReader(); + Buffer::OwnedImpl data; + + // Write some data which should be skipped by validator. + data.add("skip"); + data.writeBEInt(0); + + // Write valid data according to message syntax. + data.writeBEInt(110); + data.add("test123"); + data.writeBEInt(0); + + // Skip first 5 bytes in the buffer. + ASSERT_THAT(msg->validate(data, 5, 4 + 8), Message::ValidationOK); } } // namespace PostgresProxy diff --git a/test/extensions/filters/network/postgres_proxy/postgres_test_utils.cc b/test/extensions/filters/network/postgres_proxy/postgres_test_utils.cc index 51819b1ad72e2..56450f5c5fe98 100644 --- a/test/extensions/filters/network/postgres_proxy/postgres_test_utils.cc +++ b/test/extensions/filters/network/postgres_proxy/postgres_test_utils.cc @@ -10,8 +10,11 @@ void createPostgresMsg(Buffer::Instance& data, std::string type, std::string pay data.drain(data.length()); ASSERT(1 == type.length()); data.add(type); - data.writeBEInt(4 + payload.length()); - data.add(payload); + data.writeBEInt(4 + (payload.empty() ? 0 : (payload.length() + 1))); + if (!payload.empty()) { + data.add(payload); + data.writeBEInt(0); + } } } // namespace PostgresProxy diff --git a/test/extensions/filters/network/thrift_proxy/router_test.cc b/test/extensions/filters/network/thrift_proxy/router_test.cc index a072c214b57ab..ada3531938a74 100644 --- a/test/extensions/filters/network/thrift_proxy/router_test.cc +++ b/test/extensions/filters/network/thrift_proxy/router_test.cc @@ -435,6 +435,9 @@ TEST_F(ThriftRouterTest, PoolRemoteConnectionFailure) { EXPECT_THAT(app_ex.what(), ContainsRegex(".*connection failure.*")); EXPECT_TRUE(end_stream); })); + EXPECT_CALL( + context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.host_->outlier_detector_, + putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _)); context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.poolFailure( ConnectionPool::PoolFailureReason::RemoteConnectionFailure); } @@ -447,7 +450,9 @@ TEST_F(ThriftRouterTest, PoolLocalConnectionFailure) { EXPECT_EQ(1UL, context_.cluster_manager_.thread_local_cluster_.cluster_.info_->statsScope() .counterFromString("thrift.upstream_rq_call") .value()); - + EXPECT_CALL( + context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.host_->outlier_detector_, + putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _)); context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.poolFailure( ConnectionPool::PoolFailureReason::LocalConnectionFailure); } @@ -468,6 +473,9 @@ TEST_F(ThriftRouterTest, PoolTimeout) { EXPECT_THAT(app_ex.what(), ContainsRegex(".*connection failure.*")); EXPECT_TRUE(end_stream); })); + EXPECT_CALL( + context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.host_->outlier_detector_, + putResult(Upstream::Outlier::Result::LocalOriginTimeout, _)); context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.poolFailure( ConnectionPool::PoolFailureReason::Timeout); } @@ -680,6 +688,9 @@ TEST_F(ThriftRouterTest, UnexpectedUpstreamRemoteClose) { EXPECT_THAT(app_ex.what(), ContainsRegex(".*connection failure.*")); EXPECT_TRUE(end_stream); })); + EXPECT_CALL( + context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.host_->outlier_detector_, + putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _)); router_->onEvent(Network::ConnectionEvent::RemoteClose); } @@ -696,6 +707,9 @@ TEST_F(ThriftRouterTest, UnexpectedUpstreamLocalClose) { EXPECT_THAT(app_ex.what(), ContainsRegex(".*connection failure.*")); EXPECT_TRUE(end_stream); })); + EXPECT_CALL( + context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.host_->outlier_detector_, + putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _)); router_->onEvent(Network::ConnectionEvent::RemoteClose); } @@ -987,6 +1001,10 @@ TEST_P(ThriftRouterFieldTypeTest, OneWay) { initializeRouter(); startRequest(MessageType::Oneway); + + EXPECT_CALL( + context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.host_->outlier_detector_, + putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess, _)); connectUpstream(); sendTrivialStruct(field_type); completeRequest(); @@ -1005,9 +1023,17 @@ TEST_P(ThriftRouterFieldTypeTest, Call) { initializeRouter(); startRequest(MessageType::Call); + + EXPECT_CALL( + context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.host_->outlier_detector_, + putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess, _)); connectUpstream(); sendTrivialStruct(field_type); completeRequest(); + + EXPECT_CALL( + context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.host_->outlier_detector_, + putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _)); returnResponse(); destroyRouter(); @@ -1063,9 +1089,17 @@ TEST_P(ThriftRouterFieldTypeTest, Call_Error) { initializeRouter(); startRequest(MessageType::Call); + + EXPECT_CALL( + context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.host_->outlier_detector_, + putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess, _)); connectUpstream(); sendTrivialStruct(field_type); completeRequest(); + + EXPECT_CALL( + context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.host_->outlier_detector_, + putResult(Upstream::Outlier::Result::ExtOriginRequestFailed, _)); returnResponse(MessageType::Reply, false); destroyRouter(); @@ -1088,9 +1122,17 @@ TEST_P(ThriftRouterFieldTypeTest, Exception) { initializeRouter(); startRequest(MessageType::Call); + + EXPECT_CALL( + context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.host_->outlier_detector_, + putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess, _)); connectUpstream(); sendTrivialStruct(field_type); completeRequest(); + + EXPECT_CALL( + context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.host_->outlier_detector_, + putResult(Upstream::Outlier::Result::ExtOriginRequestFailed, _)); returnResponse(MessageType::Exception); destroyRouter(); diff --git a/test/extensions/filters/network/wasm/config_test.cc b/test/extensions/filters/network/wasm/config_test.cc index c7bb849f8ae6a..66132102bd929 100644 --- a/test/extensions/filters/network/wasm/config_test.cc +++ b/test/extensions/filters/network/wasm/config_test.cc @@ -81,14 +81,26 @@ TEST_P(WasmNetworkFilterConfigTest, YamlLoadFromFileWasm) { envoy::extensions::filters::network::wasm::v3::Wasm proto_config; TestUtility::loadFromYaml(yaml, proto_config); - WasmFilterConfig factory; - Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context_); - EXPECT_CALL(init_watcher_, ready()); - context_.initManager().initialize(init_watcher_); - EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized); - Network::MockConnection connection; - EXPECT_CALL(connection, addFilter(_)); - cb(connection); + + // Intentionally we scope the factory here, and make the context outlive it. + // This case happens when the config is updated by ECDS, and + // we have to make sure that contexts still hold valid WasmVMs in these cases. + std::shared_ptr context = nullptr; + { + WasmFilterConfig factory; + Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context_); + EXPECT_CALL(init_watcher_, ready()); + context_.initManager().initialize(init_watcher_); + EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized); + Network::MockConnection connection; + EXPECT_CALL(connection, addFilter(_)).WillOnce([&context](Network::FilterSharedPtr filter) { + context = std::static_pointer_cast(filter); + }); + cb(connection); + } + // Check if the context still holds a valid Wasm even after the factory is destroyed. + EXPECT_TRUE(context); + EXPECT_TRUE(context->wasm()); } TEST_P(WasmNetworkFilterConfigTest, YamlLoadInlineWasm) { diff --git a/test/extensions/filters/network/wasm/wasm_filter_test.cc b/test/extensions/filters/network/wasm/wasm_filter_test.cc index 4203cc9de8fc9..7bd0eb5465932 100644 --- a/test/extensions/filters/network/wasm/wasm_filter_test.cc +++ b/test/extensions/filters/network/wasm/wasm_filter_test.cc @@ -28,8 +28,8 @@ using proxy_wasm::ContextBase; class TestFilter : public Context { public: - TestFilter(Wasm* wasm, uint32_t root_context_id, PluginSharedPtr plugin) - : Context(wasm, root_context_id, plugin) {} + TestFilter(Wasm* wasm, uint32_t root_context_id, PluginHandleSharedPtr plugin_handle) + : Context(wasm, root_context_id, plugin_handle) {} MOCK_CONTEXT_LOG_; void testClose() { onCloseTCP(); } diff --git a/test/extensions/matching/input_matchers/ip/BUILD b/test/extensions/matching/input_matchers/ip/BUILD new file mode 100644 index 0000000000000..ac9afab392a0f --- /dev/null +++ b/test/extensions/matching/input_matchers/ip/BUILD @@ -0,0 +1,31 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_package", +) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_test", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_extension_cc_test( + name = "config_test", + srcs = ["config_test.cc"], + extension_name = "envoy.matching.input_matchers.ip", + deps = [ + "//source/extensions/matching/input_matchers/ip:config", + "//test/mocks/server:factory_context_mocks", + ], +) + +envoy_extension_cc_test( + name = "matcher_test", + srcs = ["matcher_test.cc"], + extension_name = "envoy.matching.input_matchers.ip", + deps = [ + "//source/extensions/matching/input_matchers/ip:ip_lib", + ], +) diff --git a/test/extensions/matching/input_matchers/ip/config_test.cc b/test/extensions/matching/input_matchers/ip/config_test.cc new file mode 100644 index 0000000000000..82f8428622af8 --- /dev/null +++ b/test/extensions/matching/input_matchers/ip/config_test.cc @@ -0,0 +1,85 @@ +#include "source/extensions/matching/input_matchers/ip/config.h" + +#include "test/mocks/server/factory_context.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace Matching { +namespace InputMatchers { +namespace IP { + +TEST(ConfigTest, TestConfig) { + NiceMock context; + + const std::string yaml_string = R"EOF( + name: ip + typed_config: + "@type": type.googleapis.com/envoy.extensions.matching.input_matchers.ip.v3.Ip + cidr_ranges: + - address_prefix: 192.0.2.0 + prefix_len: 24 + stat_prefix: "test.ips_matcher" +)EOF"; + + envoy::config::core::v3::TypedExtensionConfig config; + TestUtility::loadFromYaml(yaml_string, config); + + Config factory; + auto message = Envoy::Config::Utility::translateAnyToFactoryConfig( + config.typed_config(), ProtobufMessage::getStrictValidationVisitor(), factory); + auto matcher = factory.createInputMatcherFactoryCb(*message, context); + EXPECT_NE(nullptr, matcher()); +} + +TEST(ConfigTest, InvalidConfigIP) { + NiceMock context; + + const std::string yaml_string = R"EOF( + name: ip + typed_config: + "@type": type.googleapis.com/envoy.extensions.matching.input_matchers.ip.v3.Ip + cidr_ranges: + - address_prefix: foo + prefix_len: 10 + stat_prefix: "test.ips_matcher" +)EOF"; + + envoy::config::core::v3::TypedExtensionConfig config; + TestUtility::loadFromYaml(yaml_string, config); + + Config factory; + auto message = Envoy::Config::Utility::translateAnyToFactoryConfig( + config.typed_config(), ProtobufMessage::getStrictValidationVisitor(), factory); + EXPECT_THROW_WITH_MESSAGE(factory.createInputMatcherFactoryCb(*message, context), EnvoyException, + "malformed IP address: foo"); +} + +TEST(ConfigTest, InvalidConfigStats) { + NiceMock context; + + const std::string yaml_string = R"EOF( + name: ip + typed_config: + "@type": type.googleapis.com/envoy.extensions.matching.input_matchers.ip.v3.Ip + cidr_ranges: + - address_prefix: 192.0.2.0 + prefix_len: 10 +)EOF"; + + envoy::config::core::v3::TypedExtensionConfig config; + TestUtility::loadFromYaml(yaml_string, config); + + Config factory; + auto message = Envoy::Config::Utility::translateAnyToFactoryConfig( + config.typed_config(), ProtobufMessage::getStrictValidationVisitor(), factory); + EXPECT_THROW_WITH_REGEX(factory.createInputMatcherFactoryCb(*message, context), EnvoyException, + "Proto constraint validation failed.*StatPrefix"); +} + +} // namespace IP +} // namespace InputMatchers +} // namespace Matching +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/matching/input_matchers/ip/matcher_test.cc b/test/extensions/matching/input_matchers/ip/matcher_test.cc new file mode 100644 index 0000000000000..cd91c92d90337 --- /dev/null +++ b/test/extensions/matching/input_matchers/ip/matcher_test.cc @@ -0,0 +1,87 @@ +#include "envoy/network/address.h" + +#include "source/extensions/matching/input_matchers/ip/matcher.h" + +#include "test/common/stats/stat_test_utility.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace Matching { +namespace InputMatchers { +namespace IP { + +class MatcherTest : public testing::Test { +public: + void initialize(std::vector&& ranges) { + m_ = std::make_unique(std::move(ranges), stat_prefix_, scope_); + } + + Stats::TestUtil::TestStore scope_; + std::string stat_prefix_{"ipmatcher.test"}; + std::unique_ptr m_; +}; + +TEST_F(MatcherTest, TestV4) { + std::vector ranges; + ranges.emplace_back(Network::Address::CidrRange::create("192.0.2.0", 24)); + ranges.emplace_back(Network::Address::CidrRange::create("10.0.0.0", 24)); + initialize(std::move(ranges)); + EXPECT_FALSE(m_->match("192.0.1.255")); + EXPECT_TRUE(m_->match("192.0.2.0")); + EXPECT_TRUE(m_->match("192.0.2.1")); + EXPECT_TRUE(m_->match("192.0.2.255")); + EXPECT_FALSE(m_->match("9.255.255.255")); + EXPECT_TRUE(m_->match("10.0.0.0")); + EXPECT_TRUE(m_->match("10.0.0.255")); + EXPECT_FALSE(m_->match("10.0.1.0")); +} + +TEST_F(MatcherTest, TestV6) { + std::vector ranges; + ranges.emplace_back(Network::Address::CidrRange::create("::1/128")); + ranges.emplace_back(Network::Address::CidrRange::create("2001::/16")); + ranges.emplace_back(Network::Address::CidrRange::create("2002::/16")); + initialize(std::move(ranges)); + + EXPECT_FALSE(m_->match("::")); + EXPECT_TRUE(m_->match("::1")); + EXPECT_FALSE(m_->match("::2")); + + EXPECT_FALSE(m_->match("2000:ffff:ffff:ffff:ffff:ffff:ffff:ffff")); + EXPECT_TRUE(m_->match("2001::1")); + EXPECT_TRUE(m_->match("2001:ffff:ffff:ffff:ffff:ffff:ffff:ffff")); + EXPECT_TRUE(m_->match("2002::1")); + EXPECT_TRUE(m_->match("2002:ffff:ffff:ffff:ffff:ffff:ffff:ffff")); + EXPECT_FALSE(m_->match("2003::")); +} + +TEST_F(MatcherTest, EmptyRanges) { + initialize(std::vector{}); + EXPECT_FALSE(m_->match("192.0.2.0")); +} + +TEST_F(MatcherTest, EmptyIP) { + std::vector ranges; + ranges.emplace_back(Network::Address::CidrRange::create("192.0.2.0", 24)); + initialize(std::move(ranges)); + EXPECT_FALSE(m_->match("")); + EXPECT_FALSE(m_->match(absl::optional{})); +} + +TEST_F(MatcherTest, InvalidIP) { + std::vector ranges; + ranges.emplace_back(Network::Address::CidrRange::create("192.0.2.0", 24)); + initialize(std::move(ranges)); + EXPECT_EQ(m_->stats()->ip_parsing_failed_.value(), 0); + EXPECT_FALSE(m_->match("foo")); + EXPECT_EQ(m_->stats()->ip_parsing_failed_.value(), 1); +} + +} // namespace IP +} // namespace InputMatchers +} // namespace Matching +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/stats_sinks/metrics_service/grpc_metrics_service_impl_test.cc b/test/extensions/stats_sinks/metrics_service/grpc_metrics_service_impl_test.cc index 769983b8e1437..dd83b5d695b6d 100644 --- a/test/extensions/stats_sinks/metrics_service/grpc_metrics_service_impl_test.cc +++ b/test/extensions/stats_sinks/metrics_service/grpc_metrics_service_impl_test.cc @@ -9,6 +9,8 @@ #include "test/mocks/thread_local/mocks.h" #include "test/test_common/simulated_time_system.h" +#include "io/prometheus/client/metrics.pb.h" + using namespace std::chrono_literals; using testing::_; using testing::InSequence; @@ -186,17 +188,43 @@ TEST_F(MetricsServiceSinkTest, ReportCountersValues) { // Test that verifies counters are reported as the delta between flushes when configured to do so. TEST_F(MetricsServiceSinkTest, ReportCountersAsDeltas) { - MetricsServiceSink - sink(streamer_, true, false); - addCounterToSnapshot("test_counter", 1, 100); + counter_storage_.back()->setTagExtractedName("tag-counter-name"); + counter_storage_.back()->setTags({{"a", "b"}}); - EXPECT_CALL(*streamer_, send(_)).WillOnce(Invoke([](MetricsPtr&& metrics) { - EXPECT_EQ(1, metrics->size()); - EXPECT_EQ(1, (*metrics)[0].metric(0).counter().value()); - })); - sink.flush(snapshot_); + { + // This test won't emit any labels. + MetricsServiceSink + sink(streamer_, true, false); + + EXPECT_CALL(*streamer_, send(_)).WillOnce(Invoke([](MetricsPtr&& metrics) { + ASSERT_EQ(1, metrics->size()); + EXPECT_EQ("test_counter", (*metrics)[0].name()); + + const auto& metric = (*metrics)[0].metric(0); + EXPECT_EQ(1, metric.counter().value()); + EXPECT_EQ(0, metric.label().size()); + })); + sink.flush(snapshot_); + } + + { + // This test will emit labels. + MetricsServiceSink + sink(streamer_, true, true); + + EXPECT_CALL(*streamer_, send(_)).WillOnce(Invoke([](MetricsPtr&& metrics) { + ASSERT_EQ(1, metrics->size()); + EXPECT_EQ("tag-counter-name", (*metrics)[0].name()); + + const auto& metric = (*metrics)[0].metric(0); + EXPECT_EQ(1, metric.counter().value()); + EXPECT_EQ(1, metric.label().size()); + })); + sink.flush(snapshot_); + } } // Test the behavior of tag emission based on the emit_tags_as_label flag. @@ -217,7 +245,7 @@ TEST_F(MetricsServiceSinkTest, ReportMetricsWithTags) { // When the emit_tags flag is false, we don't emit the tags and use the full name. MetricsServiceSink - sink(streamer_, true, false); + sink(streamer_, false, false); EXPECT_CALL(*streamer_, send(_)).WillOnce(Invoke([](MetricsPtr&& metrics) { EXPECT_EQ(4, metrics->size()); @@ -244,7 +272,7 @@ TEST_F(MetricsServiceSinkTest, ReportMetricsWithTags) { // When the emit_tags flag is true, we emit the tags as labels and use the tag extracted name. MetricsServiceSink - sink(streamer_, true, true); + sink(streamer_, false, true); EXPECT_CALL(*streamer_, send(_)).WillOnce(Invoke([&expected_label_pair](MetricsPtr&& metrics) { EXPECT_EQ(4, metrics->size()); diff --git a/test/extensions/stats_sinks/wasm/wasm_stat_sink_test.cc b/test/extensions/stats_sinks/wasm/wasm_stat_sink_test.cc index 4bda7d8e9d970..a254fc7767f9a 100644 --- a/test/extensions/stats_sinks/wasm/wasm_stat_sink_test.cc +++ b/test/extensions/stats_sinks/wasm/wasm_stat_sink_test.cc @@ -44,7 +44,8 @@ class WasmCommonContextTest }); } void setupContext() { - context_ = std::make_unique(wasm_->wasm().get(), root_context_->id(), plugin_); + context_ = + std::make_unique(wasm_->wasm().get(), root_context_->id(), plugin_handle_); context_->onCreate(); } diff --git a/test/extensions/transport_sockets/proxy_protocol/proxy_protocol_test.cc b/test/extensions/transport_sockets/proxy_protocol/proxy_protocol_test.cc index eed0cc8ee1f7e..47cc4937f0f4b 100644 --- a/test/extensions/transport_sockets/proxy_protocol/proxy_protocol_test.cc +++ b/test/extensions/transport_sockets/proxy_protocol/proxy_protocol_test.cc @@ -35,7 +35,7 @@ namespace { class ProxyProtocolTest : public testing::Test { public: void initialize(ProxyProtocolConfig_Version version, - Network::TransportSocketOptionsSharedPtr socket_options) { + Network::TransportSocketOptionsConstSharedPtr socket_options) { auto inner_socket = std::make_unique>(); inner_socket_ = inner_socket.get(); ON_CALL(transport_callbacks_, ioHandle()).WillByDefault(ReturnRef(io_handle_)); @@ -253,7 +253,7 @@ TEST_F(ProxyProtocolTest, V1IPV4DownstreamAddresses) { new Network::Address::Ipv4Instance("202.168.0.13", 52000)); auto dst_addr = Network::Address::InstanceConstSharedPtr( new Network::Address::Ipv4Instance("174.2.2.222", 80)); - Network::TransportSocketOptionsSharedPtr socket_options = + Network::TransportSocketOptionsConstSharedPtr socket_options = std::make_shared( "", std::vector{}, std::vector{}, std::vector{}, absl::optional( @@ -285,7 +285,7 @@ TEST_F(ProxyProtocolTest, V1IPV6DownstreamAddresses) { Network::Address::InstanceConstSharedPtr(new Network::Address::Ipv6Instance("1::2:3", 52000)); auto dst_addr = Network::Address::InstanceConstSharedPtr(new Network::Address::Ipv6Instance("a:b:c:d::", 80)); - Network::TransportSocketOptionsSharedPtr socket_options = + Network::TransportSocketOptionsConstSharedPtr socket_options = std::make_shared( "", std::vector{}, std::vector{}, std::vector{}, absl::optional( @@ -362,7 +362,7 @@ TEST_F(ProxyProtocolTest, V2IPV4DownstreamAddresses) { Network::Address::InstanceConstSharedPtr(new Network::Address::Ipv4Instance("1.2.3.4", 773)); auto dst_addr = Network::Address::InstanceConstSharedPtr(new Network::Address::Ipv4Instance("0.1.1.2", 513)); - Network::TransportSocketOptionsSharedPtr socket_options = + Network::TransportSocketOptionsConstSharedPtr socket_options = std::make_shared( "", std::vector{}, std::vector{}, std::vector{}, absl::optional( @@ -394,7 +394,7 @@ TEST_F(ProxyProtocolTest, V2IPV6DownstreamAddresses) { Network::Address::InstanceConstSharedPtr(new Network::Address::Ipv6Instance("1:2:3::4", 8)); auto dst_addr = Network::Address::InstanceConstSharedPtr( new Network::Address::Ipv6Instance("1:100:200:3::", 2)); - Network::TransportSocketOptionsSharedPtr socket_options = + Network::TransportSocketOptionsConstSharedPtr socket_options = std::make_shared( "", std::vector{}, std::vector{}, std::vector{}, absl::optional( @@ -426,7 +426,7 @@ TEST_F(ProxyProtocolTest, OnConnectedCallsInnerOnConnected) { Network::Address::InstanceConstSharedPtr(new Network::Address::Ipv6Instance("1:2:3::4", 8)); auto dst_addr = Network::Address::InstanceConstSharedPtr( new Network::Address::Ipv6Instance("1:100:200:3::", 2)); - Network::TransportSocketOptionsSharedPtr socket_options = + Network::TransportSocketOptionsConstSharedPtr socket_options = std::make_shared( "", std::vector{}, std::vector{}, std::vector{}, absl::optional( diff --git a/test/extensions/transport_sockets/starttls/starttls_integration_test.cc b/test/extensions/transport_sockets/starttls/starttls_integration_test.cc index 9ee6e9ad3b3f9..597c0939d3123 100644 --- a/test/extensions/transport_sockets/starttls/starttls_integration_test.cc +++ b/test/extensions/transport_sockets/starttls/starttls_integration_test.cc @@ -176,7 +176,7 @@ class StartTlsIntegrationTest : public testing::TestWithParam below_low, std::function above_high, diff --git a/test/extensions/transport_sockets/starttls/starttls_socket_test.cc b/test/extensions/transport_sockets/starttls/starttls_socket_test.cc index 4a69c58f17a32..9c9fc51d7f3fb 100644 --- a/test/extensions/transport_sockets/starttls/starttls_socket_test.cc +++ b/test/extensions/transport_sockets/starttls/starttls_socket_test.cc @@ -25,7 +25,7 @@ class StartTlsTransportSocketMock : public Network::MockTransportSocket { }; TEST(StartTlsTest, BasicSwitch) { - Network::TransportSocketOptionsSharedPtr options = + Network::TransportSocketOptionsConstSharedPtr options = std::make_shared(); NiceMock transport_callbacks; NiceMock* raw_socket = new NiceMock; diff --git a/test/extensions/transport_sockets/tls/cert_validator/default_validator_test.cc b/test/extensions/transport_sockets/tls/cert_validator/default_validator_test.cc index 44f8ff0e4e355..ec6c3753d9b2b 100644 --- a/test/extensions/transport_sockets/tls/cert_validator/default_validator_test.cc +++ b/test/extensions/transport_sockets/tls/cert_validator/default_validator_test.cc @@ -116,6 +116,36 @@ TEST(DefaultCertValidatorTest, TestMatchSubjectAltNameNotMatched) { EXPECT_FALSE(DefaultCertValidator::matchSubjectAltName(cert.get(), subject_alt_name_matchers)); } +TEST(DefaultCertValidatorTest, TestCertificateVerificationWithSANMatcher) { + Stats::TestUtil::TestStore test_store; + SslStats stats = generateSslStats(test_store); + // Create the default validator object. + auto default_validator = + std::make_unique( + /*CertificateValidationContextConfig=*/nullptr, stats, + Event::GlobalTimeSystem().timeSystem()); + + bssl::UniquePtr cert = readCertFromFile(TestEnvironment::substitute( + "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem")); + envoy::type::matcher::v3::StringMatcher matcher; + matcher.MergeFrom(TestUtility::createRegexMatcher(".*.example.com")); + std::vector san_matchers; + san_matchers.push_back(Matchers::StringMatcherImpl(matcher)); + // Verify the certificate with correct SAN regex matcher. + EXPECT_EQ(default_validator->verifyCertificate(cert.get(), /*verify_san_list=*/{}, san_matchers), + Envoy::Ssl::ClientValidationStatus::Validated); + EXPECT_EQ(stats.fail_verify_san_.value(), 0); + + matcher.MergeFrom(TestUtility::createExactMatcher("hello.example.com")); + std::vector invalid_san_matchers; + invalid_san_matchers.push_back(Matchers::StringMatcherImpl(matcher)); + // Verify the certificate with incorrect SAN exact matcher. + EXPECT_EQ(default_validator->verifyCertificate(cert.get(), /*verify_san_list=*/{}, + invalid_san_matchers), + Envoy::Ssl::ClientValidationStatus::Failed); + EXPECT_EQ(stats.fail_verify_san_.value(), 1); +} + } // namespace Tls } // namespace TransportSockets } // namespace Extensions diff --git a/test/extensions/transport_sockets/tls/cert_validator/test_common.h b/test/extensions/transport_sockets/tls/cert_validator/test_common.h index c0cc7f28425fd..b958f17272075 100644 --- a/test/extensions/transport_sockets/tls/cert_validator/test_common.h +++ b/test/extensions/transport_sockets/tls/cert_validator/test_common.h @@ -47,9 +47,6 @@ class TestCertificateValidationContextConfig const std::string& certificateRevocationListPath() const final { CONSTRUCT_ON_FIRST_USE(std::string, ""); } - const std::vector& verifySubjectAltNameList() const override { - CONSTRUCT_ON_FIRST_USE(std::vector, {}); - } const std::vector& subjectAltNameMatchers() const override { return san_matchers_; diff --git a/test/extensions/transport_sockets/tls/ssl_socket_test.cc b/test/extensions/transport_sockets/tls/ssl_socket_test.cc index 432216c5daa09..61812d034b551 100644 --- a/test/extensions/transport_sockets/tls/ssl_socket_test.cc +++ b/test/extensions/transport_sockets/tls/ssl_socket_test.cc @@ -567,13 +567,13 @@ class TestUtilOptionsV2 : public TestUtilOptionsBase { const std::string& expectedALPNProtocol() const { return expected_alpn_protocol_; } - TestUtilOptionsV2& - setTransportSocketOptions(Network::TransportSocketOptionsSharedPtr transport_socket_options) { + TestUtilOptionsV2& setTransportSocketOptions( + Network::TransportSocketOptionsConstSharedPtr transport_socket_options) { transport_socket_options_ = transport_socket_options; return *this; } - Network::TransportSocketOptionsSharedPtr transportSocketOptions() const { + Network::TransportSocketOptionsConstSharedPtr transportSocketOptions() const { return transport_socket_options_; } @@ -598,7 +598,7 @@ class TestUtilOptionsV2 : public TestUtilOptionsBase { std::string expected_server_cert_digest_; std::string expected_requested_server_name_; std::string expected_alpn_protocol_; - Network::TransportSocketOptionsSharedPtr transport_socket_options_; + Network::TransportSocketOptionsConstSharedPtr transport_socket_options_; std::string expected_transport_failure_reason_contains_; }; @@ -4589,7 +4589,7 @@ TEST_P(SslSocketTest, OverrideRequestedServerName) { envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client; client.set_sni("lyft.com"); - Network::TransportSocketOptionsSharedPtr transport_socket_options( + Network::TransportSocketOptionsConstSharedPtr transport_socket_options( new Network::TransportSocketOptionsImpl("example.com")); TestUtilOptionsV2 test_options(listener, client, true, GetParam()); @@ -4611,7 +4611,7 @@ TEST_P(SslSocketTest, OverrideRequestedServerNameWithoutSniInUpstreamTlsContext) envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client; - Network::TransportSocketOptionsSharedPtr transport_socket_options( + Network::TransportSocketOptionsConstSharedPtr transport_socket_options( new Network::TransportSocketOptionsImpl("example.com")); TestUtilOptionsV2 test_options(listener, client, true, GetParam()); testUtilV2(test_options.setExpectedRequestedServerName("example.com") @@ -4879,7 +4879,7 @@ class SslReadBufferLimitTest : public SslSocketTest { dispatcher_ = api_->allocateDispatcher("test_thread", Buffer::WatermarkFactoryPtr{factory}); // By default, expect 4 buffers to be created - the client and server read and write buffers. - EXPECT_CALL(*factory, create_(_, _, _)) + EXPECT_CALL(*factory, createBuffer_(_, _, _)) .Times(4) .WillOnce(Invoke([&](std::function below_low, std::function above_high, std::function above_overflow) -> Buffer::Instance* { diff --git a/test/extensions/upstreams/http/tcp/upstream_request_test.cc b/test/extensions/upstreams/http/tcp/upstream_request_test.cc index 1c18cb0c0770e..4ceaa9f3dddac 100644 --- a/test/extensions/upstreams/http/tcp/upstream_request_test.cc +++ b/test/extensions/upstreams/http/tcp/upstream_request_test.cc @@ -68,9 +68,9 @@ TEST_F(TcpConnPoolTest, OnPoolFailure) { EXPECT_CALL(mock_pool_, newConnection(_)).WillOnce(Return(&cancellable_)); conn_pool_->newStream(&mock_generic_callbacks_); - EXPECT_CALL(mock_generic_callbacks_, onPoolFailure(_, _, _)); + EXPECT_CALL(mock_generic_callbacks_, onPoolFailure(_, "foo", _)); conn_pool_->onPoolFailure(Envoy::Tcp::ConnectionPool::PoolFailureReason::LocalConnectionFailure, - host_); + "foo", host_); // Make sure that the pool failure nulled out the pending request. EXPECT_FALSE(conn_pool_->cancelAnyPendingStream()); diff --git a/test/fuzz/utility.h b/test/fuzz/utility.h index 002eab8571a74..b478b9eab1faf 100644 --- a/test/fuzz/utility.h +++ b/test/fuzz/utility.h @@ -152,7 +152,6 @@ inline std::unique_ptr fromStreamInfo(const test::fuzz::StreamIn if (stream_info.has_response_code()) { test_stream_info->response_code_ = stream_info.response_code().value(); } - test_stream_info->setRequestedServerName(stream_info.requested_server_name()); auto upstream_host = std::make_shared>(); auto upstream_metadata = std::make_shared( replaceInvalidStringValues(stream_info.upstream_metadata())); @@ -168,6 +167,8 @@ inline std::unique_ptr fromStreamInfo(const test::fuzz::StreamIn test_stream_info->upstream_local_address_ = upstream_local_address; test_stream_info->downstream_address_provider_ = std::make_shared(address, address); + test_stream_info->downstream_address_provider_->setRequestedServerName( + stream_info.requested_server_name()); auto connection_info = std::make_shared>(); ON_CALL(*connection_info, subjectPeerCertificate()) .WillByDefault(testing::ReturnRef(TestSubjectPeer)); diff --git a/test/integration/BUILD b/test/integration/BUILD index 5d4083e23c05f..7e6d0990dbf6a 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -30,6 +30,7 @@ envoy_cc_test_library( ], deps = [ ":http_integration_lib", + "//source/common/common:matchers_lib", "//source/common/config:protobuf_link_hacks", "//source/common/config:version_converter_lib", "//source/common/protobuf:utility_lib", diff --git a/test/integration/ads_integration.cc b/test/integration/ads_integration.cc index dfa3b81539b94..f356c7cd6cc25 100644 --- a/test/integration/ads_integration.cc +++ b/test/integration/ads_integration.cc @@ -8,6 +8,7 @@ #include "envoy/config/route/v3/route.pb.h" #include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" +#include "source/common/common/matchers.h" #include "source/common/config/protobuf_link_hacks.h" #include "source/common/protobuf/protobuf.h" #include "source/common/protobuf/utility.h" @@ -259,20 +260,20 @@ void AdsIntegrationTest::testBasicFlow() { } envoy::admin::v3::ClustersConfigDump AdsIntegrationTest::getClustersConfigDump() { - auto message_ptr = - test_server_->server().admin().getConfigTracker().getCallbacksMap().at("clusters")(); + auto message_ptr = test_server_->server().admin().getConfigTracker().getCallbacksMap().at( + "clusters")(Matchers::UniversalStringMatcher()); return dynamic_cast(*message_ptr); } envoy::admin::v3::ListenersConfigDump AdsIntegrationTest::getListenersConfigDump() { - auto message_ptr = - test_server_->server().admin().getConfigTracker().getCallbacksMap().at("listeners")(); + auto message_ptr = test_server_->server().admin().getConfigTracker().getCallbacksMap().at( + "listeners")(Matchers::UniversalStringMatcher()); return dynamic_cast(*message_ptr); } envoy::admin::v3::RoutesConfigDump AdsIntegrationTest::getRoutesConfigDump() { - auto message_ptr = - test_server_->server().admin().getConfigTracker().getCallbacksMap().at("routes")(); + auto message_ptr = test_server_->server().admin().getConfigTracker().getCallbacksMap().at( + "routes")(Matchers::UniversalStringMatcher()); return dynamic_cast(*message_ptr); } diff --git a/test/integration/base_integration_test.cc b/test/integration/base_integration_test.cc index d74b71962a032..26c12716cfcb2 100644 --- a/test/integration/base_integration_test.cc +++ b/test/integration/base_integration_test.cc @@ -60,7 +60,7 @@ BaseIntegrationTest::BaseIntegrationTest(const InstanceConstSharedPtrFn& upstrea // complex test hooks to the server and/or spin waiting on stats, neither of which I think are // necessary right now. timeSystem().realSleepDoNotUseWithoutScrutiny(std::chrono::milliseconds(10)); - ON_CALL(*mock_buffer_factory_, create_(_, _, _)) + ON_CALL(*mock_buffer_factory_, createBuffer_(_, _, _)) .WillByDefault(Invoke([](std::function below_low, std::function above_high, std::function above_overflow) -> Buffer::Instance* { return new Buffer::WatermarkBuffer(below_low, above_high, above_overflow); @@ -323,7 +323,7 @@ void BaseIntegrationTest::registerTestServerPorts(const std::vector std::string getListenerDetails(Envoy::Server::Instance& server) { const auto& cbs_maps = server.admin().getConfigTracker().getCallbacksMap(); - ProtobufTypes::MessagePtr details = cbs_maps.at("listeners")(); + ProtobufTypes::MessagePtr details = cbs_maps.at("listeners")(Matchers::UniversalStringMatcher()); auto listener_info = Protobuf::down_cast(*details); return MessageUtil::getYamlStringFromMessage(listener_info.dynamic_listeners(0).error_state()); } @@ -428,15 +428,16 @@ size_t entryIndex(const std::string& file, uint32_t entry) { std::string BaseIntegrationTest::waitForAccessLog(const std::string& filename, uint32_t entry) { // Wait a max of 1s for logs to flush to disk. + std::string contents; for (int i = 0; i < 1000; ++i) { - std::string contents = TestEnvironment::readFileToStringForTest(filename); + contents = TestEnvironment::readFileToStringForTest(filename); size_t index = entryIndex(contents, entry); if (contents.length() > index) { return contents.substr(index); } absl::SleepFor(absl::Milliseconds(1)); } - RELEASE_ASSERT(0, "Timed out waiting for access log"); + RELEASE_ASSERT(0, absl::StrCat("Timed out waiting for access log. Found: ", contents)); return ""; } diff --git a/test/integration/integration_admin_test.cc b/test/integration/integration_admin_test.cc index 65da9c1fb119a..b694e98c55ec8 100644 --- a/test/integration/integration_admin_test.cc +++ b/test/integration/integration_admin_test.cc @@ -398,6 +398,17 @@ TEST_P(IntegrationAdminTest, Admin) { TestUtility::loadFromJson(response->body(), config_dump_with_eds); EXPECT_EQ(7, config_dump_with_eds.configs_size()); + EXPECT_EQ("200", request("admin", "GET", "/config_dump?name_regex=route_config_0", response)); + EXPECT_EQ("application/json", ContentType(response)); + envoy::admin::v3::ConfigDump name_filtered_config_dump; + TestUtility::loadFromJson(response->body(), name_filtered_config_dump); + EXPECT_EQ(6, config_dump.configs_size()); + + // SecretsConfigDump should have been totally filtered away. + secret_config_dump.Clear(); + name_filtered_config_dump.configs(5).UnpackTo(&secret_config_dump); + EXPECT_EQ(secret_config_dump.static_secrets().size(), 0); + // Validate that the "inboundonly" does not stop the default listener. response = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "POST", "/drain_listeners?inboundonly", "", diff --git a/test/integration/integration_tcp_client.cc b/test/integration/integration_tcp_client.cc index a6535265c00f3..2396d74c06da4 100644 --- a/test/integration/integration_tcp_client.cc +++ b/test/integration/integration_tcp_client.cc @@ -41,7 +41,7 @@ IntegrationTcpClient::IntegrationTcpClient( Network::Address::InstanceConstSharedPtr source_address) : payload_reader_(new WaitForPayloadReader(dispatcher)), callbacks_(new ConnectionCallbacks(*this)) { - EXPECT_CALL(factory, create_(_, _, _)) + EXPECT_CALL(factory, createBuffer_(_, _, _)) .Times(AtLeast(1)) .WillOnce(Invoke([&](std::function below_low, std::function above_high, std::function above_overflow) -> Buffer::Instance* { diff --git a/test/integration/listener_filter_integration_test.cc b/test/integration/listener_filter_integration_test.cc index 558109f8fde34..ccd5f360a77ab 100644 --- a/test/integration/listener_filter_integration_test.cc +++ b/test/integration/listener_filter_integration_test.cc @@ -48,20 +48,35 @@ class ListenerFilterIntegrationTest : public testing::TestWithParam listener_filter_disabled = absl::nullopt) { + void initializeWithListenerFilter(bool ssl_client, + absl::optional listener_filter_disabled = absl::nullopt) { config_helper_.renameListener("echo"); std::string tls_inspector_config = ConfigHelper::tlsInspectorFilter(); if (listener_filter_disabled.has_value()) { tls_inspector_config = appendMatcher(tls_inspector_config, listener_filter_disabled.value()); } config_helper_.addListenerFilter(tls_inspector_config); - config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { - auto* filter_chain = - bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_filter_chains(0); - auto* alpn = filter_chain->mutable_filter_chain_match()->add_application_protocols(); - *alpn = "envoyalpn"; + + config_helper_.addConfigModifier([ssl_client]( + envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + if (ssl_client) { + auto* filter_chain = + bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_filter_chains(0); + auto* alpn = filter_chain->mutable_filter_chain_match()->add_application_protocols(); + *alpn = "envoyalpn"; + } + auto* timeout = bootstrap.mutable_static_resources() + ->mutable_listeners(0) + ->mutable_listener_filters_timeout(); + timeout->MergeFrom(ProtobufUtil::TimeUtil::MillisecondsToDuration(1000)); + bootstrap.mutable_static_resources() + ->mutable_listeners(0) + ->set_continue_on_listener_filters_timeout(true); }); - config_helper_.addSslConfig(); + if (ssl_client) { + config_helper_.addSslConfig(); + } + useListenerAccessLog("%RESPONSE_CODE_DETAILS%"); BaseIntegrationTest::initialize(); @@ -69,23 +84,28 @@ class ListenerFilterIntegrationTest : public testing::TestWithParam(timeSystem()); } - void setupConnections(bool listener_filter_disabled, bool expect_connection_open) { - initializeWithListenerFilter(listener_filter_disabled); + void setupConnections(bool listener_filter_disabled, bool expect_connection_open, + bool ssl_client) { + initializeWithListenerFilter(ssl_client, listener_filter_disabled); // Set up the SSL client. Network::Address::InstanceConstSharedPtr address = Ssl::getSslAddress(version_, lookupPort("echo")); context_ = Ssl::createClientSslTransportSocketFactory({}, *context_manager_, *api_); - ssl_client_ = dispatcher_->createClientConnection( - address, Network::Address::InstanceConstSharedPtr(), - context_->createTransportSocket( - // nullptr - std::make_shared( - absl::string_view(""), std::vector(), - std::vector{"envoyalpn"})), - nullptr); - ssl_client_->addConnectionCallbacks(connect_callbacks_); - ssl_client_->connect(); + Network::TransportSocketPtr transport_socket; + if (ssl_client) { + transport_socket = + context_->createTransportSocket(std::make_shared( + absl::string_view(""), std::vector(), + std::vector{"envoyalpn"})); + } else { + auto transport_socket_factory = std::make_unique(); + transport_socket = transport_socket_factory->createTransportSocket(nullptr); + } + client_ = dispatcher_->createClientConnection( + address, Network::Address::InstanceConstSharedPtr(), std::move(transport_socket), nullptr); + client_->addConnectionCallbacks(connect_callbacks_); + client_->connect(); while (!connect_callbacks_.connected() && !connect_callbacks_.closed()) { dispatcher_->run(Event::Dispatcher::RunType::NonBlock); } @@ -98,27 +118,45 @@ class ListenerFilterIntegrationTest : public testing::TestWithParam context_manager_; Network::TransportSocketFactoryPtr context_; ConnectionStatusCallbacks connect_callbacks_; testing::NiceMock secret_manager_; - Network::ClientConnectionPtr ssl_client_; + Network::ClientConnectionPtr client_; }; // Each listener filter is enabled by default. TEST_P(ListenerFilterIntegrationTest, AllListenerFiltersAreEnabledByDefault) { - setupConnections(/*listener_filter_disabled=*/false, /*expect_connection_open=*/true); - ssl_client_->close(Network::ConnectionCloseType::NoFlush); + setupConnections(/*listener_filter_disabled=*/false, /*expect_connection_open=*/true, + /*ssl_client=*/true); + client_->close(Network::ConnectionCloseType::NoFlush); EXPECT_THAT(waitForAccessLog(listener_access_log_name_), testing::Eq("-")); } // The tls_inspector is disabled. The ALPN won't be sniffed out and no filter chain is matched. TEST_P(ListenerFilterIntegrationTest, DisabledTlsInspectorFailsFilterChainFind) { - setupConnections(/*listener_filter_disabled=*/true, /*expect_connection_open=*/false); + setupConnections(/*listener_filter_disabled=*/true, /*expect_connection_open=*/false, + /*ssl_client=*/true); EXPECT_THAT(waitForAccessLog(listener_access_log_name_), testing::Eq(StreamInfo::ResponseCodeDetails::get().FilterChainNotFound)); } +// trigger the tls inspect filter timeout, and continue create new connection after timeout +TEST_P(ListenerFilterIntegrationTest, ContinueOnListenerTimeout) { + setupConnections(/*listener_filter_disabled=*/false, /*expect_connection_open=*/true, + /*ssl_client=*/false); + // The length of tls hello message is defined as `TLS_MAX_CLIENT_HELLO = 64 * 1024` + // if tls inspect filter doesn't read the max length of hello message data, it + // will continue wait. Then the listener filter timeout timer will be triggered. + Buffer::OwnedImpl buffer("fake data"); + client_->write(buffer, false); + // the timeout is set as one seconds, sleep 5 to trigger the timeout. + timeSystem().advanceTimeWaitImpl(std::chrono::milliseconds(2000)); + client_->close(Network::ConnectionCloseType::NoFlush); + EXPECT_THAT(waitForAccessLog(listener_access_log_name_), testing::Eq("-")); +} + INSTANTIATE_TEST_SUITE_P(IpVersions, ListenerFilterIntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); diff --git a/test/integration/proxy_proto_integration_test.cc b/test/integration/proxy_proto_integration_test.cc index dc683f6772050..83d983edbc069 100644 --- a/test/integration/proxy_proto_integration_test.cc +++ b/test/integration/proxy_proto_integration_test.cc @@ -275,4 +275,117 @@ TEST_P(ProxyProtoTcpIntegrationTest, AccessLog) { EXPECT_EQ(log_result, "remote=1.2.3.4:12345 local=254.254.254.254:1234"); } +ProxyProtoFilterChainMatchIntegrationTest::ProxyProtoFilterChainMatchIntegrationTest() { + useListenerAccessLog("%FILTER_CHAIN_NAME% %RESPONSE_CODE_DETAILS%"); + config_helper_.skipPortUsageValidation(); + config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + // This test doesn't need to deal with upstream connections at all, so make sure none occur. + bootstrap.mutable_static_resources()->mutable_clusters(0)->clear_load_assignment(); + + auto* orig_filter_chain = + bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_filter_chains(0); + for (unsigned i = 0; i < 3; i++) { + *bootstrap.mutable_static_resources()->mutable_listeners(0)->add_filter_chains() = + *orig_filter_chain; + } + + auto setPrefix = [](auto* prefix, const std::string& ip, uint32_t length) { + prefix->set_address_prefix(ip); + prefix->mutable_prefix_len()->set_value(length); + }; + + { + auto* filter_chain = + bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_filter_chains(0); + filter_chain->set_name("directsource_localhost_and_source_1.2.3.0/24"); + + setPrefix(filter_chain->mutable_filter_chain_match()->add_direct_source_prefix_ranges(), + "127.0.0.1", 8); + setPrefix(filter_chain->mutable_filter_chain_match()->add_direct_source_prefix_ranges(), + "::1", 128); + + setPrefix(filter_chain->mutable_filter_chain_match()->add_source_prefix_ranges(), "1.2.3.0", + 24); + } + + { + auto* filter_chain = + bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_filter_chains(1); + filter_chain->set_name("wrong_directsource_and_source_1.2.3.0/24"); + setPrefix(filter_chain->mutable_filter_chain_match()->add_direct_source_prefix_ranges(), + "1.1.1.1", 32); + setPrefix(filter_chain->mutable_filter_chain_match()->add_direct_source_prefix_ranges(), + "eeee::1", 128); + + setPrefix(filter_chain->mutable_filter_chain_match()->add_source_prefix_ranges(), "1.2.3.0", + 24); + } + + { + auto* filter_chain = + bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_filter_chains(2); + filter_chain->set_name("wrong_directsource_and_source_5.5.5.5.32"); + setPrefix(filter_chain->mutable_filter_chain_match()->add_direct_source_prefix_ranges(), + "1.1.1.1", 32); + setPrefix(filter_chain->mutable_filter_chain_match()->add_direct_source_prefix_ranges(), + "eeee::1", 128); + + setPrefix(filter_chain->mutable_filter_chain_match()->add_source_prefix_ranges(), "5.5.5.5", + 32); + } + + { + auto* filter_chain = + bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_filter_chains(3); + filter_chain->set_name("no_direct_source_and_source_6.6.6.6/32"); + + setPrefix(filter_chain->mutable_filter_chain_match()->add_source_prefix_ranges(), "6.6.6.6", + 32); + } + }); +} + +INSTANTIATE_TEST_SUITE_P(IpVersions, ProxyProtoFilterChainMatchIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +// Validate that source IP and direct source IP match correctly. +TEST_P(ProxyProtoFilterChainMatchIntegrationTest, MatchDirectSourceAndSource) { + initialize(); + + IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); + ASSERT_TRUE(tcp_client->write("PROXY TCP4 1.2.3.4 254.254.254.254 12345 1234\r\nhello", false)); + tcp_client->waitForDisconnect(); + + EXPECT_THAT(waitForAccessLog(listener_access_log_name_), + testing::HasSubstr("directsource_localhost_and_source_1.2.3.0/24 -")); +} + +// Test that a mismatched direct source prevents matching a filter chain with a matching source. +TEST_P(ProxyProtoFilterChainMatchIntegrationTest, MismatchDirectSourceButMatchSource) { + initialize(); + + IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); + ASSERT_TRUE(tcp_client->write("PROXY TCP4 5.5.5.5 254.254.254.254 12345 1234\r\nhello", false)); + tcp_client->waitForDisconnect(); + + EXPECT_THAT(waitForAccessLog(listener_access_log_name_), + testing::HasSubstr( + absl::StrCat("- ", StreamInfo::ResponseCodeDetails::get().FilterChainNotFound))); +} + +// Test that a more specific direct source match prevents matching a filter chain with a less +// specific direct source match but matching source. +TEST_P(ProxyProtoFilterChainMatchIntegrationTest, MoreSpecificDirectSource) { + initialize(); + + IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); + ASSERT_TRUE(tcp_client->write("PROXY TCP4 6.6.6.6 254.254.254.254 12345 1234\r\nhello", false)); + tcp_client->waitForDisconnect(); + + EXPECT_THAT(waitForAccessLog(listener_access_log_name_), + testing::HasSubstr( + absl::StrCat("- ", StreamInfo::ResponseCodeDetails::get().FilterChainNotFound))); +} + } // namespace Envoy diff --git a/test/integration/proxy_proto_integration_test.h b/test/integration/proxy_proto_integration_test.h index cfce0d5534616..224b6b168eb82 100644 --- a/test/integration/proxy_proto_integration_test.h +++ b/test/integration/proxy_proto_integration_test.h @@ -24,4 +24,9 @@ class ProxyProtoTcpIntegrationTest : public testing::TestWithParamwaitForCounterEq("http3.quic_version_50", 2u); + } else if (GetParam().second == QuicVersionType::GquicTls) { + test_server_->waitForCounterEq("http3.quic_version_51", 2u); + } else { + test_server_->waitForCounterEq("http3.quic_version_rfc_v1", 2u); + } } // Ensure multiple quic connections work, regardless of platform BPF support diff --git a/test/integration/server.h b/test/integration/server.h index 1f6a2b95039ae..21d94ed44a9ad 100644 --- a/test/integration/server.h +++ b/test/integration/server.h @@ -51,8 +51,8 @@ createTestOptionsImpl(const std::string& config_path, const std::string& config_ class TestComponentFactory : public ComponentFactory { public: Server::DrainManagerPtr createDrainManager(Server::Instance& server) override { - return Server::DrainManagerPtr{ - new Server::DrainManagerImpl(server, envoy::config::listener::v3::Listener::MODIFY_ONLY)}; + return Server::DrainManagerPtr{new Server::DrainManagerImpl( + server, envoy::config::listener::v3::Listener::MODIFY_ONLY, server.dispatcher())}; } Runtime::LoaderPtr createRuntime(Server::Instance& server, Server::Configuration::Initial& config) override { @@ -499,8 +499,8 @@ class IntegrationTestServer : public Logger::Loggable, // Server::ComponentFactory Server::DrainManagerPtr createDrainManager(Server::Instance& server) override { - drain_manager_ = - new Server::DrainManagerImpl(server, envoy::config::listener::v3::Listener::MODIFY_ONLY); + drain_manager_ = new Server::DrainManagerImpl( + server, envoy::config::listener::v3::Listener::MODIFY_ONLY, server.dispatcher()); return Server::DrainManagerPtr{drain_manager_}; } Runtime::LoaderPtr createRuntime(Server::Instance& server, diff --git a/test/integration/tcp_conn_pool_integration_test.cc b/test/integration/tcp_conn_pool_integration_test.cc index a04f1f8f4d0e2..faea32638e77b 100644 --- a/test/integration/tcp_conn_pool_integration_test.cc +++ b/test/integration/tcp_conn_pool_integration_test.cc @@ -47,7 +47,7 @@ class TestFilter : public Network::ReadFilter { Request(TestFilter& parent, Buffer::Instance& data) : parent_(parent) { data_.move(data); } // Tcp::ConnectionPool::Callbacks - void onPoolFailure(ConnectionPool::PoolFailureReason, + void onPoolFailure(ConnectionPool::PoolFailureReason, absl::string_view, Upstream::HostDescriptionConstSharedPtr) override { ASSERT(false); } diff --git a/test/integration/tcp_proxy_integration_test.cc b/test/integration/tcp_proxy_integration_test.cc index 687b6ab9241a6..8062772d73e39 100644 --- a/test/integration/tcp_proxy_integration_test.cc +++ b/test/integration/tcp_proxy_integration_test.cc @@ -1180,7 +1180,7 @@ void TcpProxySslIntegrationTest::setupConnections() { // Set up the mock buffer factory so the newly created SSL client will have a mock write // buffer. This allows us to track the bytes actually written to the socket. - EXPECT_CALL(*mock_buffer_factory_, create_(_, _, _)) + EXPECT_CALL(*mock_buffer_factory_, createBuffer_(_, _, _)) .Times(AtLeast(1)) .WillOnce(Invoke([&](std::function below_low, std::function above_high, std::function above_overflow) -> Buffer::Instance* { diff --git a/test/integration/tracked_watermark_buffer.cc b/test/integration/tracked_watermark_buffer.cc index 61c74f79ac755..1a14d888a1d52 100644 --- a/test/integration/tracked_watermark_buffer.cc +++ b/test/integration/tracked_watermark_buffer.cc @@ -14,9 +14,9 @@ TrackedWatermarkBufferFactory::~TrackedWatermarkBufferFactory() { } Buffer::InstancePtr -TrackedWatermarkBufferFactory::create(std::function below_low_watermark, - std::function above_high_watermark, - std::function above_overflow_watermark) { +TrackedWatermarkBufferFactory::createBuffer(std::function below_low_watermark, + std::function above_high_watermark, + std::function above_overflow_watermark) { absl::MutexLock lock(&mutex_); uint64_t idx = next_idx_++; ++active_buffer_count_; diff --git a/test/integration/tracked_watermark_buffer.h b/test/integration/tracked_watermark_buffer.h index b3402d3c643bd..999e955037fc6 100644 --- a/test/integration/tracked_watermark_buffer.h +++ b/test/integration/tracked_watermark_buffer.h @@ -64,9 +64,9 @@ class TrackedWatermarkBufferFactory : public Buffer::WatermarkFactory { TrackedWatermarkBufferFactory() = default; ~TrackedWatermarkBufferFactory() override; // Buffer::WatermarkFactory - Buffer::InstancePtr create(std::function below_low_watermark, - std::function above_high_watermark, - std::function above_overflow_watermark) override; + Buffer::InstancePtr createBuffer(std::function below_low_watermark, + std::function above_high_watermark, + std::function above_overflow_watermark) override; // Number of buffers created. uint64_t numBuffersCreated() const; diff --git a/test/integration/tracked_watermark_buffer_test.cc b/test/integration/tracked_watermark_buffer_test.cc index c6579bfcddbc1..734fa5cd31aee 100644 --- a/test/integration/tracked_watermark_buffer_test.cc +++ b/test/integration/tracked_watermark_buffer_test.cc @@ -35,15 +35,16 @@ TEST_F(TrackedWatermarkBufferTest, WatermarkFunctions) { ReadyWatcher overflow_watermark; ReadyWatcher now; - auto buffer = factory_.create([&]() { low_watermark.ready(); }, [&]() { high_watermark.ready(); }, - [&]() { overflow_watermark.ready(); }); + auto buffer = + factory_.createBuffer([&]() { low_watermark.ready(); }, [&]() { high_watermark.ready(); }, + [&]() { overflow_watermark.ready(); }); // Test highWatermarkRange EXPECT_THAT(factory_.highWatermarkRange(), Pair(0, 0)); buffer->setWatermarks(100); EXPECT_THAT(factory_.highWatermarkRange(), Pair(100, 100)); - auto buffer2 = factory_.create([]() {}, []() {}, []() {}); + auto buffer2 = factory_.createBuffer([]() {}, []() {}, []() {}); EXPECT_THAT(factory_.highWatermarkRange(), Pair(100, 0)); buffer2->setWatermarks(200); @@ -64,9 +65,9 @@ TEST_F(TrackedWatermarkBufferTest, WatermarkFunctions) { } TEST_F(TrackedWatermarkBufferTest, BufferSizes) { - auto buffer = factory_.create([]() {}, []() {}, []() {}); + auto buffer = factory_.createBuffer([]() {}, []() {}, []() {}); buffer->setWatermarks(100); - auto buffer2 = factory_.create([]() {}, []() {}, []() {}); + auto buffer2 = factory_.createBuffer([]() {}, []() {}, []() {}); EXPECT_EQ(2, factory_.numBuffersCreated()); EXPECT_EQ(2, factory_.numBuffersActive()); // Add some bytes to the buffers, and verify max and sum(max). @@ -109,9 +110,9 @@ TEST_F(TrackedWatermarkBufferTest, BufferSizes) { } TEST_F(TrackedWatermarkBufferTest, WaitUntilTotalBufferedExceeds) { - auto buffer1 = factory_.create([]() {}, []() {}, []() {}); - auto buffer2 = factory_.create([]() {}, []() {}, []() {}); - auto buffer3 = factory_.create([]() {}, []() {}, []() {}); + auto buffer1 = factory_.createBuffer([]() {}, []() {}, []() {}); + auto buffer2 = factory_.createBuffer([]() {}, []() {}, []() {}); + auto buffer3 = factory_.createBuffer([]() {}, []() {}, []() {}); auto thread1 = Thread::threadFactoryForTest().createThread([&]() { buffer1->add("a"); }); auto thread2 = Thread::threadFactoryForTest().createThread([&]() { buffer2->add("b"); }); @@ -127,9 +128,9 @@ TEST_F(TrackedWatermarkBufferTest, WaitUntilTotalBufferedExceeds) { } TEST_F(TrackedWatermarkBufferTest, TracksNumberOfBuffersActivelyBound) { - auto buffer1 = factory_.create([]() {}, []() {}, []() {}); - auto buffer2 = factory_.create([]() {}, []() {}, []() {}); - auto buffer3 = factory_.create([]() {}, []() {}, []() {}); + auto buffer1 = factory_.createBuffer([]() {}, []() {}, []() {}); + auto buffer2 = factory_.createBuffer([]() {}, []() {}, []() {}); + auto buffer3 = factory_.createBuffer([]() {}, []() {}, []() {}); BufferMemoryAccountSharedPtr account = std::make_shared(); ASSERT_TRUE(factory_.waitUntilExpectedNumberOfAccountsAndBoundBuffers(0, 0)); @@ -152,9 +153,9 @@ TEST_F(TrackedWatermarkBufferTest, TracksNumberOfBuffersActivelyBound) { } TEST_F(TrackedWatermarkBufferTest, TracksNumberOfAccountsActive) { - auto buffer1 = factory_.create([]() {}, []() {}, []() {}); - auto buffer2 = factory_.create([]() {}, []() {}, []() {}); - auto buffer3 = factory_.create([]() {}, []() {}, []() {}); + auto buffer1 = factory_.createBuffer([]() {}, []() {}, []() {}); + auto buffer2 = factory_.createBuffer([]() {}, []() {}, []() {}); + auto buffer3 = factory_.createBuffer([]() {}, []() {}, []() {}); BufferMemoryAccountSharedPtr account1 = std::make_shared(); ASSERT_TRUE(factory_.waitUntilExpectedNumberOfAccountsAndBoundBuffers(0, 0)); @@ -179,8 +180,8 @@ TEST_F(TrackedWatermarkBufferTest, TracksNumberOfAccountsActive) { } TEST_F(TrackedWatermarkBufferTest, WaitForExpectedAccountBalanceShouldReturnTrueWhenConditionsMet) { - auto buffer1 = factory_.create([]() {}, []() {}, []() {}); - auto buffer2 = factory_.create([]() {}, []() {}, []() {}); + auto buffer1 = factory_.createBuffer([]() {}, []() {}, []() {}); + auto buffer2 = factory_.createBuffer([]() {}, []() {}, []() {}); BufferMemoryAccountSharedPtr account1 = std::make_shared(); BufferMemoryAccountSharedPtr account2 = std::make_shared(); buffer1->bindAccount(account1); diff --git a/test/mocks/buffer/mocks.h b/test/mocks/buffer/mocks.h index cf6b7ef90d869..22a215206939b 100644 --- a/test/mocks/buffer/mocks.h +++ b/test/mocks/buffer/mocks.h @@ -2,6 +2,8 @@ #include +#include "envoy/buffer/buffer.h" + #include "source/common/buffer/buffer_impl.h" #include "source/common/buffer/watermark_buffer.h" #include "source/common/network/io_socket_error_impl.h" @@ -74,14 +76,15 @@ class MockBufferFactory : public Buffer::WatermarkFactory { MockBufferFactory(); ~MockBufferFactory() override; - Buffer::InstancePtr create(std::function below_low, std::function above_high, - std::function above_overflow) override { - auto buffer = Buffer::InstancePtr{create_(below_low, above_high, above_overflow)}; + Buffer::InstancePtr createBuffer(std::function below_low, + std::function above_high, + std::function above_overflow) override { + auto buffer = Buffer::InstancePtr{createBuffer_(below_low, above_high, above_overflow)}; ASSERT(buffer != nullptr); return buffer; } - MOCK_METHOD(Buffer::Instance*, create_, + MOCK_METHOD(Buffer::Instance*, createBuffer_, (std::function below_low, std::function above_high, std::function above_overflow)); }; diff --git a/test/mocks/event/mocks.cc b/test/mocks/event/mocks.cc index 0c64b69d97027..f70fc602d5480 100644 --- a/test/mocks/event/mocks.cc +++ b/test/mocks/event/mocks.cc @@ -29,7 +29,7 @@ MockDispatcher::MockDispatcher(const std::string& name) : name_(name) { .WillByDefault(ReturnNew>()); ON_CALL(*this, post(_)).WillByDefault(Invoke([](PostCb cb) -> void { cb(); })); - ON_CALL(buffer_factory_, create_(_, _, _)) + ON_CALL(buffer_factory_, createBuffer_(_, _, _)) .WillByDefault(Invoke([](std::function below_low, std::function above_high, std::function above_overflow) -> Buffer::Instance* { return new Buffer::WatermarkBuffer(below_low, above_high, above_overflow); diff --git a/test/mocks/matcher/BUILD b/test/mocks/matcher/BUILD index 0d9f3e97bda91..f6eb9386d93e8 100644 --- a/test/mocks/matcher/BUILD +++ b/test/mocks/matcher/BUILD @@ -12,6 +12,7 @@ envoy_cc_mock( name = "matcher_mocks", hdrs = ["mocks.h"], deps = [ + "//source/common/common:matchers_lib", "//source/common/matcher:matcher_lib", ], ) diff --git a/test/mocks/matcher/mocks.h b/test/mocks/matcher/mocks.h index ab2aee120ed08..46483563ed62c 100644 --- a/test/mocks/matcher/mocks.h +++ b/test/mocks/matcher/mocks.h @@ -1,5 +1,6 @@ #pragma once +#include "source/common/common/matchers.h" #include "source/common/matcher/matcher.h" #include "gmock/gmock.h" @@ -18,4 +19,11 @@ class MockMatchTreeValidationVisitor : public MatchTreeValidationVisitor address_provider_; bool is_closed_; }; diff --git a/test/mocks/network/transport_socket.h b/test/mocks/network/transport_socket.h index 2e9e906f36b11..ebb9e26c28904 100644 --- a/test/mocks/network/transport_socket.h +++ b/test/mocks/network/transport_socket.h @@ -39,7 +39,7 @@ class MockTransportSocketFactory : public TransportSocketFactory { MOCK_METHOD(bool, implementsSecureTransport, (), (const)); MOCK_METHOD(bool, usesProxyProtocolOptions, (), (const)); MOCK_METHOD(bool, supportsAlpn, (), (const)); - MOCK_METHOD(TransportSocketPtr, createTransportSocket, (TransportSocketOptionsSharedPtr), + MOCK_METHOD(TransportSocketPtr, createTransportSocket, (TransportSocketOptionsConstSharedPtr), (const)); }; diff --git a/test/mocks/server/drain_manager.h b/test/mocks/server/drain_manager.h index dc0331b05876c..883930091b1ef 100644 --- a/test/mocks/server/drain_manager.h +++ b/test/mocks/server/drain_manager.h @@ -3,8 +3,11 @@ #include #include #include +#include #include +#include +#include "envoy/event/dispatcher.h" #include "envoy/server/drain_manager.h" #include "gmock/gmock.h" @@ -16,11 +19,17 @@ class MockDrainManager : public DrainManager { MockDrainManager(); ~MockDrainManager() override; - // Server::DrainManager - MOCK_METHOD(bool, drainClose, (), (const)); + // Network::DrainManager + MOCK_METHOD(DrainManagerPtr, createChildManager, + (Event::Dispatcher&, envoy::config::listener::v3::Listener::DrainType), (override)); + MOCK_METHOD(DrainManagerPtr, createChildManager, (Event::Dispatcher&), (override)); MOCK_METHOD(bool, draining, (), (const)); - MOCK_METHOD(void, startDrainSequence, (std::function completion)); MOCK_METHOD(void, startParentShutdownSequence, ()); + MOCK_METHOD(void, startDrainSequence, (std::function completion)); + + // Network::DrainDecision + MOCK_METHOD(bool, drainClose, (), (const)); + MOCK_METHOD(Common::CallbackHandlePtr, addOnDrainCloseCb, (DrainCloseCb cb), (const, override)); std::function drain_sequence_completion_; }; diff --git a/test/mocks/ssl/mocks.h b/test/mocks/ssl/mocks.h index 94cf360a3e706..43ad275fce277 100644 --- a/test/mocks/ssl/mocks.h +++ b/test/mocks/ssl/mocks.h @@ -156,7 +156,6 @@ class MockCertificateValidationContextConfig : public CertificateValidationConte MOCK_METHOD(const std::string&, caCertPath, (), (const)); MOCK_METHOD(const std::string&, certificateRevocationList, (), (const)); MOCK_METHOD(const std::string&, certificateRevocationListPath, (), (const)); - MOCK_METHOD(const std::vector&, verifySubjectAltNameList, (), (const)); MOCK_METHOD(const std::vector&, subjectAltNameMatchers, (), (const)); MOCK_METHOD(const std::vector&, verifyCertificateHashList, (), (const)); diff --git a/test/mocks/stream_info/mocks.cc b/test/mocks/stream_info/mocks.cc index 3907bb515f131..a2b7c768162d3 100644 --- a/test/mocks/stream_info/mocks.cc +++ b/test/mocks/stream_info/mocks.cc @@ -112,11 +112,6 @@ MockStreamInfo::MockStreamInfo() .WillByDefault(Invoke([this](const FilterStateSharedPtr& filter_state) { upstream_filter_state_ = filter_state; })); - ON_CALL(*this, setRequestedServerName(_)) - .WillByDefault(Invoke([this](const absl::string_view requested_server_name) { - requested_server_name_ = std::string(requested_server_name); - })); - ON_CALL(*this, requestedServerName()).WillByDefault(ReturnRef(requested_server_name_)); ON_CALL(*this, setRouteName(_)).WillByDefault(Invoke([this](const absl::string_view route_name) { route_name_ = std::string(route_name); })); diff --git a/test/mocks/stream_info/mocks.h b/test/mocks/stream_info/mocks.h index 1a0a81b36d9a5..dbc2fec6c5bd4 100644 --- a/test/mocks/stream_info/mocks.h +++ b/test/mocks/stream_info/mocks.h @@ -80,8 +80,6 @@ class MockStreamInfo : public StreamInfo { MOCK_METHOD(const FilterState&, filterState, (), (const)); MOCK_METHOD(const FilterStateSharedPtr&, upstreamFilterState, (), (const)); MOCK_METHOD(void, setUpstreamFilterState, (const FilterStateSharedPtr&)); - MOCK_METHOD(void, setRequestedServerName, (const absl::string_view)); - MOCK_METHOD(const std::string&, requestedServerName, (), (const)); MOCK_METHOD(void, setUpstreamTransportFailureReason, (absl::string_view)); MOCK_METHOD(const std::string&, upstreamTransportFailureReason, (), (const)); MOCK_METHOD(void, setRequestHeaders, (const Http::RequestHeaderMap&)); @@ -127,7 +125,6 @@ class MockStreamInfo : public StreamInfo { std::shared_ptr downstream_address_provider_; Ssl::ConnectionInfoConstSharedPtr downstream_connection_info_; Ssl::ConnectionInfoConstSharedPtr upstream_connection_info_; - std::string requested_server_name_; std::string route_name_; std::string upstream_transport_failure_reason_; std::string filter_chain_name_; diff --git a/test/mocks/tcp/mocks.cc b/test/mocks/tcp/mocks.cc index 75b07cf8cd5f8..d6828f046a147 100644 --- a/test/mocks/tcp/mocks.cc +++ b/test/mocks/tcp/mocks.cc @@ -41,9 +41,9 @@ void MockInstance::poolFailure(PoolFailureReason reason, bool host_null) { callbacks_.pop_front(); handles_.pop_front(); if (host_null) { - cb->onPoolFailure(reason, nullptr); + cb->onPoolFailure(reason, "", nullptr); } else { - cb->onPoolFailure(reason, host_); + cb->onPoolFailure(reason, "", host_); } } diff --git a/test/mocks/tcp/mocks.h b/test/mocks/tcp/mocks.h index 04f0426eb303a..6b486918cb99c 100644 --- a/test/mocks/tcp/mocks.h +++ b/test/mocks/tcp/mocks.h @@ -17,7 +17,8 @@ namespace ConnectionPool { class MockCallbacks : public Callbacks { MOCK_METHOD(void, onPoolFailure, - (PoolFailureReason reason, Upstream::HostDescriptionConstSharedPtr host)); + (PoolFailureReason reason, absl::string_view details, + Upstream::HostDescriptionConstSharedPtr host)); MOCK_METHOD(void, onPoolReady, (ConnectionDataPtr && conn, Upstream::HostDescriptionConstSharedPtr host)); }; diff --git a/test/mocks/upstream/cluster_manager_factory.h b/test/mocks/upstream/cluster_manager_factory.h index ffa11b5e7546e..a9354c97998ed 100644 --- a/test/mocks/upstream/cluster_manager_factory.h +++ b/test/mocks/upstream/cluster_manager_factory.h @@ -26,13 +26,13 @@ class MockClusterManagerFactory : public ClusterManagerFactory { const absl::optional& alternate_protocol_options, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options, + const Network::TransportSocketOptionsConstSharedPtr& transport_socket_options, TimeSource& source, ClusterConnectivityState& state)); MOCK_METHOD(Tcp::ConnectionPool::InstancePtr, allocateTcpConnPool, (Event::Dispatcher & dispatcher, HostConstSharedPtr host, ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, - Network::TransportSocketOptionsSharedPtr, ClusterConnectivityState& state)); + Network::TransportSocketOptionsConstSharedPtr, ClusterConnectivityState& state)); MOCK_METHOD((std::pair), clusterFromProto, (const envoy::config::cluster::v3::Cluster& cluster, ClusterManager& cm, diff --git a/test/mocks/upstream/host.h b/test/mocks/upstream/host.h index 211698283782d..cd6cc8e21bc44 100644 --- a/test/mocks/upstream/host.h +++ b/test/mocks/upstream/host.h @@ -130,16 +130,17 @@ class MockHost : public Host { MockHost(); ~MockHost() override; - CreateConnectionData createConnection(Event::Dispatcher& dispatcher, - const Network::ConnectionSocket::OptionsSharedPtr& options, - Network::TransportSocketOptionsSharedPtr) const override { + CreateConnectionData + createConnection(Event::Dispatcher& dispatcher, + const Network::ConnectionSocket::OptionsSharedPtr& options, + Network::TransportSocketOptionsConstSharedPtr) const override { MockCreateConnectionData data = createConnection_(dispatcher, options); return {Network::ClientConnectionPtr{data.connection_}, data.host_description_}; } CreateConnectionData createHealthCheckConnection(Event::Dispatcher& dispatcher, - Network::TransportSocketOptionsSharedPtr, + Network::TransportSocketOptionsConstSharedPtr, const envoy::config::core::v3::Metadata*) const override { MockCreateConnectionData data = createConnection_(dispatcher, nullptr); return {Network::ClientConnectionPtr{data.connection_}, data.host_description_}; diff --git a/test/mocks/upstream/load_balancer_context.h b/test/mocks/upstream/load_balancer_context.h index 553ae4e98e2d3..06959126953f5 100644 --- a/test/mocks/upstream/load_balancer_context.h +++ b/test/mocks/upstream/load_balancer_context.h @@ -21,7 +21,7 @@ class MockLoadBalancerContext : public LoadBalancerContext { MOCK_METHOD(bool, shouldSelectAnotherHost, (const Host&)); MOCK_METHOD(uint32_t, hostSelectionRetryCount, (), (const)); MOCK_METHOD(Network::Socket::OptionsSharedPtr, upstreamSocketOptions, (), (const)); - MOCK_METHOD(Network::TransportSocketOptionsSharedPtr, upstreamTransportSocketOptions, (), + MOCK_METHOD(Network::TransportSocketOptionsConstSharedPtr, upstreamTransportSocketOptions, (), (const)); private: diff --git a/test/server/BUILD b/test/server/BUILD index acfa8ec40fc7a..70fa142dc5dfa 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -105,6 +105,7 @@ envoy_cc_test( "//source/server:connection_handler_lib", "//test/mocks/access_log:access_log_mocks", "//test/mocks/api:api_mocks", + "//test/mocks/network:io_handle_mocks", "//test/mocks/network:network_mocks", "//test/test_common:network_utility_lib", "//test/test_common:threadsafe_singleton_injector_lib", @@ -116,6 +117,7 @@ envoy_cc_test( srcs = ["drain_manager_impl_test.cc"], deps = [ "//source/server:drain_manager_lib", + "//test/mocks/event:event_mocks", "//test/mocks/server:instance_mocks", "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", ], @@ -260,6 +262,7 @@ envoy_cc_test_library( "//source/common/init:manager_lib", "//source/server:listener_manager_lib", "//test/mocks/init:init_mocks", + "//test/mocks/matcher:matcher_mocks", "//test/mocks/network:network_mocks", "//test/mocks/server:drain_manager_mocks", "//test/mocks/server:guard_dog_mocks", diff --git a/test/server/active_tcp_listener_test.cc b/test/server/active_tcp_listener_test.cc index 9d15d452475b6..baaf57421ec08 100644 --- a/test/server/active_tcp_listener_test.cc +++ b/test/server/active_tcp_listener_test.cc @@ -12,6 +12,7 @@ #include "test/mocks/api/mocks.h" #include "test/mocks/common.h" +#include "test/mocks/network/io_handle.h" #include "test/mocks/network/mocks.h" #include "test/test_common/network_utility.h" @@ -37,6 +38,7 @@ class MockTcpConnectionHandler : public Network::TcpConnectionHandler, MOCK_METHOD(Network::BalancedConnectionHandlerOptRef, getBalancedHandlerByAddress, (const Network::Address::Instance& address)); }; + class ActiveTcpListenerTest : public testing::Test, protected Logger::Loggable { public: ActiveTcpListenerTest() { @@ -60,6 +62,62 @@ class ActiveTcpListenerTest : public testing::Test, protected Logger::Loggable> listener_filter_matcher_; }; +TEST_F(ActiveTcpListenerTest, PopulateSNIWhenActiveTcpSocketTimeout) { + NiceMock balancer; + EXPECT_CALL(listener_config_, connectionBalancer()).WillRepeatedly(ReturnRef(balancer)); + EXPECT_CALL(listener_config_, listenerScope).Times(testing::AnyNumber()); + EXPECT_CALL(listener_config_, listenerFiltersTimeout()) + .WillOnce(Return(std::chrono::milliseconds(1000))); + EXPECT_CALL(listener_config_, continueOnListenerFiltersTimeout()); + EXPECT_CALL(listener_config_, openConnections()).WillRepeatedly(ReturnRef(resource_limit_)); + + auto listener = std::make_unique>(); + EXPECT_CALL(*listener, onDestroy()); + + auto* test_filter = new NiceMock(); + EXPECT_CALL(*test_filter, destroy_()); + EXPECT_CALL(listener_config_, filterChainFactory()) + .WillRepeatedly(ReturnRef(filter_chain_factory_)); + + // add a filter to stop the filter iteration. + EXPECT_CALL(filter_chain_factory_, createListenerFilterChain(_)) + .WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool { + manager.addAcceptFilter(nullptr, Network::ListenerFilterPtr{test_filter}); + return true; + })); + EXPECT_CALL(*test_filter, onAccept(_)) + .WillOnce(Invoke([](Network::ListenerFilterCallbacks&) -> Network::FilterStatus { + return Network::FilterStatus::StopIteration; + })); + + auto active_listener = + std::make_unique(conn_handler_, std::move(listener), listener_config_); + + absl::string_view server_name = "envoy.io"; + auto accepted_socket = std::make_unique>(); + accepted_socket->address_provider_->setRequestedServerName(server_name); + + // fake the socket is open. + NiceMock io_handle; + EXPECT_CALL(*accepted_socket, ioHandle()).WillOnce(ReturnRef(io_handle)); + EXPECT_CALL(io_handle, isOpen()).WillOnce(Return(true)); + + EXPECT_CALL(balancer, pickTargetHandler(_)) + .WillOnce(testing::DoAll( + testing::WithArg<0>(Invoke([](auto& target) { target.incNumConnections(); })), + ReturnRef(*active_listener))); + + // calling the onAcceptWorker() to create the ActiveTcpSocket. + active_listener->onAcceptWorker(std::move(accepted_socket), false, false); + // get the ActiveTcpSocket pointer before unlink() removed from the link-list. + ActiveTcpSocket* tcp_socket = active_listener->sockets_.front().get(); + // trigger the onTimeout event manually, since the timer is fake. + active_listener->sockets_.front()->onTimeout(); + + EXPECT_EQ(server_name, + tcp_socket->stream_info_->downstreamAddressProvider().requestedServerName()); +} + // Verify that the server connection with recovered address is rebalanced at redirected listener. TEST_F(ActiveTcpListenerTest, RedirectedRebalancer) { NiceMock listener_config1; diff --git a/test/server/admin/BUILD b/test/server/admin/BUILD index bd564a1540c6c..d5c121c5c653c 100644 --- a/test/server/admin/BUILD +++ b/test/server/admin/BUILD @@ -61,6 +61,7 @@ envoy_cc_test( ":admin_instance_lib", "//source/common/stats:thread_local_store_lib", "//source/server/admin:stats_handler_lib", + "//test/mocks/server:admin_stream_mocks", "//test/test_common:logging_lib", "//test/test_common:utility_lib", ], @@ -140,6 +141,7 @@ envoy_cc_test( name = "config_tracker_impl_test", srcs = ["config_tracker_impl_test.cc"], deps = [ + "//source/common/common:matchers_lib", "//source/server/admin:config_tracker_lib", "//test/mocks:common_lib", ], diff --git a/test/server/admin/config_dump_handler_test.cc b/test/server/admin/config_dump_handler_test.cc index 6075dffa4a673..7bd46281445bf 100644 --- a/test/server/admin/config_dump_handler_test.cc +++ b/test/server/admin/config_dump_handler_test.cc @@ -39,7 +39,7 @@ void addHostInfo(NiceMock& host, const std::string& hostname TEST_P(AdminInstanceTest, ConfigDump) { Buffer::OwnedImpl response; Http::TestResponseHeaderMapImpl header_map; - auto entry = admin_.getConfigTracker().add("foo", [] { + auto entry = admin_.getConfigTracker().add("foo", [](const Matchers::StringMatcher&) { auto msg = std::make_unique(); msg->set_value("bar"); return msg; @@ -60,26 +60,29 @@ TEST_P(AdminInstanceTest, ConfigDump) { TEST_P(AdminInstanceTest, ConfigDumpMaintainsOrder) { // Add configs in random order and validate config_dump dumps in the order. - auto bootstrap_entry = admin_.getConfigTracker().add("bootstrap", [] { - auto msg = std::make_unique(); - msg->set_value("bootstrap_config"); - return msg; - }); - auto route_entry = admin_.getConfigTracker().add("routes", [] { + auto bootstrap_entry = + admin_.getConfigTracker().add("bootstrap", [](const Matchers::StringMatcher&) { + auto msg = std::make_unique(); + msg->set_value("bootstrap_config"); + return msg; + }); + auto route_entry = admin_.getConfigTracker().add("routes", [](const Matchers::StringMatcher&) { auto msg = std::make_unique(); msg->set_value("routes_config"); return msg; }); - auto listener_entry = admin_.getConfigTracker().add("listeners", [] { - auto msg = std::make_unique(); - msg->set_value("listeners_config"); - return msg; - }); - auto cluster_entry = admin_.getConfigTracker().add("clusters", [] { - auto msg = std::make_unique(); - msg->set_value("clusters_config"); - return msg; - }); + auto listener_entry = + admin_.getConfigTracker().add("listeners", [](const Matchers::StringMatcher&) { + auto msg = std::make_unique(); + msg->set_value("listeners_config"); + return msg; + }); + auto cluster_entry = + admin_.getConfigTracker().add("clusters", [](const Matchers::StringMatcher&) { + auto msg = std::make_unique(); + msg->set_value("clusters_config"); + return msg; + }); const std::string expected_json = R"EOF({ "configs": [ { @@ -369,7 +372,7 @@ TEST_P(AdminInstanceTest, ConfigDumpWithLocalityEndpoint) { TEST_P(AdminInstanceTest, ConfigDumpFiltersByResource) { Buffer::OwnedImpl response; Http::TestResponseHeaderMapImpl header_map; - auto listeners = admin_.getConfigTracker().add("listeners", [] { + auto listeners = admin_.getConfigTracker().add("listeners", [](const Matchers::StringMatcher&) { auto msg = std::make_unique(); auto dyn_listener = msg->add_dynamic_listeners(); dyn_listener->set_name("foo"); @@ -394,10 +397,11 @@ TEST_P(AdminInstanceTest, ConfigDumpFiltersByResource) { EXPECT_EQ(expected_json, output); } -// Test that using the resource query parameter filters the config dump including EDS. -// We add both static and dynamic endpoint config to the dump, but expect only -// dynamic in the JSON with ?resource=dynamic_endpoint_configs. -TEST_P(AdminInstanceTest, ConfigDumpWithEndpointFiltersByResource) { +// Test that using the resource and name_regex query parameters filter the config dump including +// EDS. We add both static and dynamic endpoint config to the dump, but expect only dynamic in the +// JSON with ?resource=dynamic_endpoint_configs, and only the one named `fake_cluster_2` with +// ?name_regex=fake_cluster_2. +TEST_P(AdminInstanceTest, ConfigDumpWithEndpointFiltersByResourceAndName) { Upstream::ClusterManager::ClusterInfoMaps cluster_maps; ON_CALL(server_.cluster_manager_, clusters()).WillByDefault(ReturnPointee(&cluster_maps)); @@ -479,6 +483,57 @@ TEST_P(AdminInstanceTest, ConfigDumpWithEndpointFiltersByResource) { } )EOF"; EXPECT_EQ(expected_json, output); + + // Check that endpoints dump uses the name_matcher. + Buffer::OwnedImpl response2; + EXPECT_EQ(Http::Code::OK, getCallback("/config_dump?include_eds=true&name_regex=fake_cluster_2", + header_map, response2)); + const std::string expected_json2 = R"EOF({ + "configs": [ + { + "@type": "type.googleapis.com/envoy.admin.v3.EndpointsConfigDump", + "static_endpoint_configs": [ + { + "endpoint_config": { + "@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "cluster_name": "fake_cluster_2", + "endpoints": [ + { + "locality": {}, + "lb_endpoints": [ + { + "endpoint": { + "address": { + "socket_address": { + "address": "1.2.3.5", + "port_value": 8 + } + }, + "health_check_config": { + "port_value": 1, + "hostname": "test_hostname_healthcheck" + }, + "hostname": "boo.com" + }, + "health_status": "HEALTHY", + "metadata": {}, + "load_balancing_weight": 3 + } + ], + "priority": 4 + } + ], + "policy": { + "overprovisioning_factor": 140 + } + } + } + ] + } + ] +} +)EOF"; + EXPECT_EQ(expected_json2, response2.toString()); } // Test that using the mask query parameter filters the config dump. @@ -487,7 +542,7 @@ TEST_P(AdminInstanceTest, ConfigDumpWithEndpointFiltersByResource) { TEST_P(AdminInstanceTest, ConfigDumpFiltersByMask) { Buffer::OwnedImpl response; Http::TestResponseHeaderMapImpl header_map; - auto listeners = admin_.getConfigTracker().add("listeners", [] { + auto listeners = admin_.getConfigTracker().add("listeners", [](const Matchers::StringMatcher&) { auto msg = std::make_unique(); auto dyn_listener = msg->add_dynamic_listeners(); dyn_listener->set_name("foo"); @@ -516,7 +571,49 @@ TEST_P(AdminInstanceTest, ConfigDumpFiltersByMask) { EXPECT_EQ(expected_json, output); } -ProtobufTypes::MessagePtr testDumpClustersConfig() { +TEST_P(AdminInstanceTest, ConfigDumpFiltersByNameRegex) { + Buffer::OwnedImpl response; + Http::TestResponseHeaderMapImpl header_map; + auto listeners = + admin_.getConfigTracker().add("listeners", [](const Matchers::StringMatcher& name_matcher) { + auto msg = std::make_unique(); + if (name_matcher.match("bar")) { + auto dyn_listener = msg->add_dynamic_listeners(); + dyn_listener->set_name("bar"); + } + if (name_matcher.match("foo")) { + auto dyn_listener = msg->add_dynamic_listeners(); + dyn_listener->set_name("foo"); + } + return msg; + }); + const std::string expected_json = R"EOF({ + "configs": [ + { + "@type": "type.googleapis.com/envoy.admin.v3.ListenersConfigDump", + "dynamic_listeners": [ + { + "name": "bar" + } + ] + } + ] +} +)EOF"; + EXPECT_EQ(Http::Code::OK, getCallback("/config_dump?name_regex=.*a.*", header_map, response)); + std::string output = response.toString(); + EXPECT_EQ(expected_json, output); +} + +TEST_P(AdminInstanceTest, InvalidRegexIsBadRequest) { + Buffer::OwnedImpl response; + Http::TestResponseHeaderMapImpl header_map; + EXPECT_EQ(Http::Code::BadRequest, getCallback("/config_dump?name_regex=[", header_map, response)); + std::string output = response.toString(); + EXPECT_THAT(output, testing::HasSubstr("Error while parsing name_regex")); +} + +ProtobufTypes::MessagePtr testDumpClustersConfig(const Matchers::StringMatcher&) { auto msg = std::make_unique(); auto* static_cluster = msg->add_static_clusters(); envoy::config::cluster::v3::Cluster inner_cluster; @@ -564,24 +661,16 @@ TEST_P(AdminInstanceTest, ConfigDumpFiltersByResourceAndMask) { EXPECT_EQ(expected_json, output); } -// Test that no fields are present in the JSON output if there is no intersection between the fields +// Test that BadRequest is returned if there is no intersection between the fields // of the config dump and the fields present in the mask query parameter. TEST_P(AdminInstanceTest, ConfigDumpNonExistentMask) { Buffer::OwnedImpl response; Http::TestResponseHeaderMapImpl header_map; auto clusters = admin_.getConfigTracker().add("clusters", testDumpClustersConfig); - const std::string expected_json = R"EOF({ - "configs": [ - { - "@type": "type.googleapis.com/envoy.admin.v3.ClustersConfigDump.StaticCluster" - } - ] -} -)EOF"; - EXPECT_EQ(Http::Code::OK, + EXPECT_EQ(Http::Code::BadRequest, getCallback("/config_dump?resource=static_clusters&mask=bad", header_map, response)); std::string output = response.toString(); - EXPECT_EQ(expected_json, output); + EXPECT_THAT(output, testing::HasSubstr("could not be successfully used")); } // Test that a 404 Not found is returned if a non-existent resource is passed in as the @@ -589,7 +678,7 @@ TEST_P(AdminInstanceTest, ConfigDumpNonExistentMask) { TEST_P(AdminInstanceTest, ConfigDumpNonExistentResource) { Buffer::OwnedImpl response; Http::TestResponseHeaderMapImpl header_map; - auto listeners = admin_.getConfigTracker().add("listeners", [] { + auto listeners = admin_.getConfigTracker().add("listeners", [](const Matchers::StringMatcher&) { auto msg = std::make_unique(); msg->set_value("listeners_config"); return msg; @@ -602,7 +691,7 @@ TEST_P(AdminInstanceTest, ConfigDumpNonExistentResource) { TEST_P(AdminInstanceTest, ConfigDumpResourceNotRepeated) { Buffer::OwnedImpl response; Http::TestResponseHeaderMapImpl header_map; - auto clusters = admin_.getConfigTracker().add("clusters", [] { + auto clusters = admin_.getConfigTracker().add("clusters", [](const Matchers::StringMatcher&) { auto msg = std::make_unique(); msg->set_version_info("foo"); return msg; @@ -611,5 +700,55 @@ TEST_P(AdminInstanceTest, ConfigDumpResourceNotRepeated) { getCallback("/config_dump?resource=version_info", header_map, response)); } +TEST_P(AdminInstanceTest, InvalidFieldMaskWithResourceDoesNotCrash) { + Buffer::OwnedImpl response; + Http::TestResponseHeaderMapImpl header_map; + auto clusters = admin_.getConfigTracker().add("clusters", [](const Matchers::StringMatcher&) { + auto msg = std::make_unique(); + auto* static_cluster = msg->add_static_clusters(); + envoy::config::cluster::v3::Cluster inner_cluster; + inner_cluster.add_transport_socket_matches()->set_name("match1"); + inner_cluster.add_transport_socket_matches()->set_name("match2"); + static_cluster->mutable_cluster()->PackFrom(inner_cluster); + return msg; + }); + + // `transport_socket_matches` is a repeated field, and cannot be indexed through in a FieldMask. + EXPECT_EQ(Http::Code::BadRequest, + getCallback( + "/config_dump?resource=static_clusters&mask=cluster.transport_socket_matches.name", + header_map, response)); + EXPECT_EQ("FieldMask paths: \"cluster.transport_socket_matches.name\"\n could not be " + "successfully used.", + response.toString()); + EXPECT_EQ(header_map.ContentType()->value().getStringView(), + Http::Headers::get().ContentTypeValues.Text); + EXPECT_EQ(header_map.get(Http::Headers::get().XContentTypeOptions)[0]->value(), + Http::Headers::get().XContentTypeOptionValues.Nosniff); +} + +TEST_P(AdminInstanceTest, InvalidFieldMaskWithoutResourceDoesNotCrash) { + Buffer::OwnedImpl response; + Http::TestResponseHeaderMapImpl header_map; + auto bootstrap = admin_.getConfigTracker().add("bootstrap", [](const Matchers::StringMatcher&) { + auto msg = std::make_unique(); + auto* bootstrap = msg->mutable_bootstrap(); + bootstrap->mutable_node()->add_extensions()->set_name("ext1"); + bootstrap->mutable_node()->add_extensions()->set_name("ext2"); + return msg; + }); + + // `extensions` is a repeated field, and cannot be indexed through in a FieldMask. + EXPECT_EQ(Http::Code::BadRequest, + getCallback("/config_dump?mask=bootstrap.node.extensions.name", header_map, response)); + EXPECT_EQ("FieldMask paths: \"bootstrap.node.extensions.name\"\n could not be " + "successfully used.", + response.toString()); + EXPECT_EQ(header_map.ContentType()->value().getStringView(), + Http::Headers::get().ContentTypeValues.Text); + EXPECT_EQ(header_map.get(Http::Headers::get().XContentTypeOptions)[0]->value(), + Http::Headers::get().XContentTypeOptionValues.Nosniff); +} + } // namespace Server } // namespace Envoy diff --git a/test/server/admin/config_tracker_impl_test.cc b/test/server/admin/config_tracker_impl_test.cc index a666d59a1fbdd..8835c53188295 100644 --- a/test/server/admin/config_tracker_impl_test.cc +++ b/test/server/admin/config_tracker_impl_test.cc @@ -1,3 +1,4 @@ +#include "source/common/common/matchers.h" #include "source/server/admin/config_tracker_impl.h" #include "test/mocks/common.h" @@ -11,7 +12,7 @@ class ConfigTrackerImplTest : public testing::Test { public: ConfigTrackerImplTest() : cbs_map(tracker.getCallbacksMap()) { EXPECT_TRUE(cbs_map.empty()); - test_cb = [this] { + test_cb = [this](const Matchers::StringMatcher&) { called = true; return test_msg(); }; @@ -33,7 +34,7 @@ TEST_F(ConfigTrackerImplTest, Basic) { auto entry_owner = tracker.add("test_key", test_cb); EXPECT_EQ(1, cbs_map.size()); EXPECT_NE(nullptr, entry_owner); - EXPECT_NE(nullptr, cbs_map.begin()->second()); + EXPECT_NE(nullptr, cbs_map.begin()->second(Matchers::UniversalStringMatcher())); EXPECT_TRUE(called); } @@ -61,8 +62,8 @@ TEST_F(ConfigTrackerImplTest, AddDuplicate) { TEST_F(ConfigTrackerImplTest, OperationsWithinCallback) { ConfigTracker::EntryOwnerPtr owner1, owner2; - owner1 = tracker.add("test_key", [&] { - owner2 = tracker.add("test_key2", [&] { + owner1 = tracker.add("test_key", [&](const Matchers::StringMatcher&) { + owner2 = tracker.add("test_key2", [&](const Matchers::StringMatcher&) { owner1.reset(); return test_msg(); }); @@ -70,10 +71,10 @@ TEST_F(ConfigTrackerImplTest, OperationsWithinCallback) { }); EXPECT_EQ(1, cbs_map.size()); EXPECT_NE(nullptr, owner1); - EXPECT_NE(nullptr, cbs_map.at("test_key")()); + EXPECT_NE(nullptr, cbs_map.at("test_key")(Matchers::UniversalStringMatcher())); EXPECT_EQ(2, cbs_map.size()); EXPECT_NE(nullptr, owner2); - EXPECT_NE(nullptr, cbs_map.at("test_key2")()); + EXPECT_NE(nullptr, cbs_map.at("test_key2")(Matchers::UniversalStringMatcher())); EXPECT_EQ(1, cbs_map.size()); EXPECT_EQ(0, cbs_map.count("test_key")); } diff --git a/test/server/admin/stats_handler_test.cc b/test/server/admin/stats_handler_test.cc index cf1dc13118936..f07c79b8fb467 100644 --- a/test/server/admin/stats_handler_test.cc +++ b/test/server/admin/stats_handler_test.cc @@ -3,6 +3,8 @@ #include "source/common/stats/thread_local_store.h" #include "source/server/admin/stats_handler.h" +#include "test/mocks/server/admin_stream.h" +#include "test/mocks/server/instance.h" #include "test/server/admin/admin_instance.h" #include "test/test_common/logging.h" #include "test/test_common/utility.h" @@ -50,6 +52,188 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, AdminStatsTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); +TEST_P(AdminStatsTest, HandlerStatsInvalidFormat) { + const std::string url = "/stats?format=blergh"; + Http::TestResponseHeaderMapImpl response_headers; + Buffer::OwnedImpl data; + MockAdminStream admin_stream; + Configuration::MockStatsConfig stats_config; + EXPECT_CALL(stats_config, flushOnAdmin()).WillRepeatedly(testing::Return(false)); + MockInstance instance; + EXPECT_CALL(instance, stats()).WillRepeatedly(testing::ReturnRef(*store_)); + EXPECT_CALL(instance, statsConfig()).WillRepeatedly(testing::ReturnRef(stats_config)); + StatsHandler handler(instance); + Http::Code code = handler.handlerStats(url, response_headers, data, admin_stream); + EXPECT_EQ(Http::Code::NotFound, code); + EXPECT_EQ("usage: /stats?format=json or /stats?format=prometheus \n\n", data.toString()); +} + +TEST_P(AdminStatsTest, HandlerStatsPlainText) { + const std::string url = "/stats"; + Http::TestResponseHeaderMapImpl response_headers; + Buffer::OwnedImpl data; + MockAdminStream admin_stream; + Configuration::MockStatsConfig stats_config; + EXPECT_CALL(stats_config, flushOnAdmin()).WillRepeatedly(testing::Return(false)); + MockInstance instance; + store_->initializeThreading(main_thread_dispatcher_, tls_); + EXPECT_CALL(instance, stats()).WillRepeatedly(testing::ReturnRef(*store_)); + EXPECT_CALL(instance, statsConfig()).WillRepeatedly(testing::ReturnRef(stats_config)); + StatsHandler handler(instance); + + Stats::Counter& c1 = store_->counterFromString("c1"); + Stats::Counter& c2 = store_->counterFromString("c2"); + + c1.add(10); + c2.add(20); + + Stats::TextReadout& t = store_->textReadoutFromString("t"); + t.set("hello world"); + + Stats::Histogram& h1 = store_->histogramFromString("h1", Stats::Histogram::Unit::Unspecified); + Stats::Histogram& h2 = store_->histogramFromString("h2", Stats::Histogram::Unit::Unspecified); + + EXPECT_CALL(sink_, onHistogramComplete(Ref(h1), 200)); + h1.recordValue(200); + + EXPECT_CALL(sink_, onHistogramComplete(Ref(h2), 100)); + h2.recordValue(100); + + store_->mergeHistograms([]() -> void {}); + + Http::Code code = handler.handlerStats(url, response_headers, data, admin_stream); + EXPECT_EQ(Http::Code::OK, code); + EXPECT_EQ("t: \"hello world\"\n" + "c1: 10\n" + "c2: 20\n" + "h1: P0(200.0,200.0) P25(202.5,202.5) P50(205.0,205.0) P75(207.5,207.5) " + "P90(209.0,209.0) P95(209.5,209.5) P99(209.9,209.9) P99.5(209.95,209.95) " + "P99.9(209.99,209.99) P100(210.0,210.0)\n" + "h2: P0(100.0,100.0) P25(102.5,102.5) P50(105.0,105.0) P75(107.5,107.5) " + "P90(109.0,109.0) P95(109.5,109.5) P99(109.9,109.9) P99.5(109.95,109.95) " + "P99.9(109.99,109.99) P100(110.0,110.0)\n", + data.toString()); + + shutdownThreading(); +} + +TEST_P(AdminStatsTest, HandlerStatsJson) { + const std::string url = "/stats?format=json"; + Http::TestResponseHeaderMapImpl response_headers; + Buffer::OwnedImpl data; + MockAdminStream admin_stream; + Configuration::MockStatsConfig stats_config; + EXPECT_CALL(stats_config, flushOnAdmin()).WillRepeatedly(testing::Return(false)); + MockInstance instance; + store_->initializeThreading(main_thread_dispatcher_, tls_); + EXPECT_CALL(instance, stats()).WillRepeatedly(testing::ReturnRef(*store_)); + EXPECT_CALL(instance, statsConfig()).WillRepeatedly(testing::ReturnRef(stats_config)); + StatsHandler handler(instance); + + Stats::Counter& c1 = store_->counterFromString("c1"); + Stats::Counter& c2 = store_->counterFromString("c2"); + + c1.add(10); + c2.add(20); + + Stats::TextReadout& t = store_->textReadoutFromString("t"); + t.set("hello world"); + + Stats::Histogram& h = store_->histogramFromString("h", Stats::Histogram::Unit::Unspecified); + + EXPECT_CALL(sink_, onHistogramComplete(Ref(h), 200)); + h.recordValue(200); + + store_->mergeHistograms([]() -> void {}); + + Http::Code code = handler.handlerStats(url, response_headers, data, admin_stream); + EXPECT_EQ(Http::Code::OK, code); + + const std::string expected_json_old = R"EOF({ + "stats": [ + { + "name":"t", + "value":"hello world" + }, + { + "name":"c1", + "value":10, + }, + { + "name":"c2", + "value":20 + }, + { + "histograms": { + "supported_quantiles": [ + 0.0, + 25.0, + 50.0, + 75.0, + 90.0, + 95.0, + 99.0, + 99.5, + 99.9, + 100.0 + ], + "computed_quantiles": [ + { + "name":"h", + "values": [ + { + "cumulative":200, + "interval":200 + }, + { + "cumulative":202.5, + "interval":202.5 + }, + { + "cumulative":205, + "interval":205 + }, + { + "cumulative":207.5, + "interval":207.5 + }, + { + "cumulative":209, + "interval":209 + }, + { + "cumulative":209.5, + "interval":209.5 + }, + { + "cumulative":209.9, + "interval":209.9 + }, + { + "cumulative":209.95, + "interval":209.95 + }, + { + "cumulative":209.99, + "interval":209.99 + }, + { + "cumulative":210, + "interval":210 + } + ] + }, + ] + } + } + ] +})EOF"; + + EXPECT_THAT(expected_json_old, JsonStringEq(data.toString())); + + shutdownThreading(); +} + TEST_P(AdminStatsTest, StatsAsJson) { InSequence s; store_->initializeThreading(main_thread_dispatcher_, tls_); diff --git a/test/server/config_validation/BUILD b/test/server/config_validation/BUILD index 2292906cddb94..51ebda4ae37c4 100644 --- a/test/server/config_validation/BUILD +++ b/test/server/config_validation/BUILD @@ -133,6 +133,7 @@ envoy_cc_test_library( deps = [ ":xds_fuzz_proto_cc_proto", ":xds_verifier_lib", + "//source/common/common:matchers_lib", "//test/fuzz:utility_lib", "//test/integration:http_integration_lib", "@envoy_api//envoy/admin/v3:pkg_cc_proto", diff --git a/test/server/config_validation/xds_fuzz.cc b/test/server/config_validation/xds_fuzz.cc index 3b4d23f4b1575..653acd8db012b 100644 --- a/test/server/config_validation/xds_fuzz.cc +++ b/test/server/config_validation/xds_fuzz.cc @@ -6,6 +6,8 @@ #include "envoy/config/listener/v3/listener.pb.h" #include "envoy/config/route/v3/route.pb.h" +#include "source/common/common/matchers.h" + namespace Envoy { // Helper functions to build API responses. @@ -380,8 +382,8 @@ void XdsFuzzTest::verifyState() { } envoy::admin::v3::ListenersConfigDump XdsFuzzTest::getListenersConfigDump() { - auto message_ptr = - test_server_->server().admin().getConfigTracker().getCallbacksMap().at("listeners")(); + auto message_ptr = test_server_->server().admin().getConfigTracker().getCallbacksMap().at( + "listeners")(Matchers::UniversalStringMatcher()); return dynamic_cast(*message_ptr); } @@ -393,7 +395,7 @@ std::vector XdsFuzzTest::getRoutes return {}; } - auto message_ptr = map.at("routes")(); + auto message_ptr = map.at("routes")(Matchers::UniversalStringMatcher()); auto dump = dynamic_cast(*message_ptr); // Since the route config dump gives the RouteConfigurations as an Any, go through and cast them diff --git a/test/server/drain_manager_impl_test.cc b/test/server/drain_manager_impl_test.cc index eadddc978abe1..d0a2a63b6c1e8 100644 --- a/test/server/drain_manager_impl_test.cc +++ b/test/server/drain_manager_impl_test.cc @@ -1,5 +1,6 @@ #include +#include "envoy/common/callback.h" #include "envoy/config/listener/v3/listener.pb.h" #include "source/server/drain_manager_impl.h" @@ -10,7 +11,10 @@ #include "gtest/gtest.h" using testing::_; +using testing::AllOf; +using testing::Ge; using testing::InSequence; +using testing::Le; using testing::Return; namespace Envoy { @@ -34,7 +38,8 @@ class DrainManagerImplTest : public Event::TestUsingSimulatedTime, TEST_F(DrainManagerImplTest, Default) { InSequence s; - DrainManagerImpl drain_manager(server_, envoy::config::listener::v3::Listener::DEFAULT); + DrainManagerImpl drain_manager(server_, envoy::config::listener::v3::Listener::DEFAULT, + server_.dispatcher()); // Test parent shutdown. Event::MockTimer* shutdown_timer = new Event::MockTimer(&server_.dispatcher_); @@ -62,7 +67,8 @@ TEST_F(DrainManagerImplTest, Default) { TEST_F(DrainManagerImplTest, ModifyOnly) { InSequence s; - DrainManagerImpl drain_manager(server_, envoy::config::listener::v3::Listener::MODIFY_ONLY); + DrainManagerImpl drain_manager(server_, envoy::config::listener::v3::Listener::MODIFY_ONLY, + server_.dispatcher()); EXPECT_CALL(server_, healthCheckFailed()).Times(0); // Listener check will short-circuit EXPECT_FALSE(drain_manager.drainClose()); @@ -75,7 +81,8 @@ TEST_P(DrainManagerImplTest, DrainDeadline) { : Server::DrainStrategy::Immediate)); // TODO(auni53): Add integration tests for this once TestDrainManager is // removed. - DrainManagerImpl drain_manager(server_, envoy::config::listener::v3::Listener::DEFAULT); + DrainManagerImpl drain_manager(server_, envoy::config::listener::v3::Listener::DEFAULT, + server_.dispatcher()); // Ensure drainClose() behaviour is determined by the deadline. drain_manager.startDrainSequence([] {}); @@ -120,7 +127,8 @@ TEST_P(DrainManagerImplTest, DrainDeadlineProbability) { ON_CALL(server_.api_.random_, random()).WillByDefault(Return(4)); ON_CALL(server_.options_, drainTime()).WillByDefault(Return(std::chrono::seconds(3))); - DrainManagerImpl drain_manager(server_, envoy::config::listener::v3::Listener::DEFAULT); + DrainManagerImpl drain_manager(server_, envoy::config::listener::v3::Listener::DEFAULT, + server_.dispatcher()); EXPECT_CALL(server_, healthCheckFailed()).WillOnce(Return(true)); EXPECT_TRUE(drain_manager.drainClose()); @@ -151,8 +159,410 @@ TEST_P(DrainManagerImplTest, DrainDeadlineProbability) { } } +TEST_P(DrainManagerImplTest, OnDrainCallbacks) { + constexpr int num_cbs = 20; + const bool drain_gradually = GetParam(); + ON_CALL(server_.options_, drainStrategy()) + .WillByDefault(Return(drain_gradually ? Server::DrainStrategy::Gradual + : Server::DrainStrategy::Immediate)); + ON_CALL(server_.options_, drainTime()).WillByDefault(Return(std::chrono::seconds(4))); + + DrainManagerImpl drain_manager(server_, envoy::config::listener::v3::Listener::DEFAULT, + server_.dispatcher()); + + { + // Register callbacks (store in array to keep in scope for test) + std::array, num_cbs> cbs; + std::array cb_handles; + for (auto i = 0; i < num_cbs; i++) { + auto& cb = cbs[i]; + if (drain_gradually) { + auto step = 1000 / num_cbs; + EXPECT_CALL(cb, Call(_)).WillRepeatedly(Invoke([i, step](std::chrono::milliseconds delay) { + // Everything should happen within the first 1/4 of the drain time + EXPECT_LT(delay.count(), 1001); + + // Validate that our wait times are spread out (within some small error) + EXPECT_THAT(delay.count(), AllOf(Ge(i * step - 1), Le(i * step + 1))); + })); + } else { + EXPECT_CALL(cb, Call(std::chrono::milliseconds{0})); + } + + cb_handles[i] = drain_manager.addOnDrainCloseCb(cb.AsStdFunction()); + } + drain_manager.startDrainSequence([] {}); + } + + EXPECT_TRUE(drain_manager.draining()); +} + INSTANTIATE_TEST_SUITE_P(DrainStrategies, DrainManagerImplTest, testing::Bool()); +// Test gradual draining when there are more callbacks than milliseconds in the drain time, +// which should cause some drains to happen within roughly the same window. +TEST_F(DrainManagerImplTest, OnDrainCallbacksManyGradualSteps) { + constexpr int num_cbs = 3000; + ON_CALL(server_.options_, drainStrategy()).WillByDefault(Return(Server::DrainStrategy::Gradual)); + ON_CALL(server_.options_, drainTime()).WillByDefault(Return(std::chrono::seconds(4))); + + DrainManagerImpl drain_manager(server_, envoy::config::listener::v3::Listener::DEFAULT, + server_.dispatcher()); + + { + // Register callbacks (store in array to keep in scope for test) + std::array, num_cbs> cbs; + std::array cb_handles; + for (auto i = 0; i < num_cbs; i++) { + auto& cb = cbs[i]; + auto step = 1000.0 / num_cbs; + EXPECT_CALL(cb, Call(_)).WillRepeatedly(Invoke([i, step](std::chrono::milliseconds delay) { + // Everything should happen within the first 1/4 of the drain time + EXPECT_LT(delay.count(), 1001); + + // Validate that our wait times are spread out (within some small error) + EXPECT_THAT(delay.count(), AllOf(Ge(i * step - 1), Le(i * step + 1))); + })); + + cb_handles[i] = drain_manager.addOnDrainCloseCb(cb.AsStdFunction()); + } + drain_manager.startDrainSequence([] {}); + } + + EXPECT_TRUE(drain_manager.draining()); +} + +// Test gradual draining when the number of callbacks does not evenly divide into +// the drain time. +TEST_F(DrainManagerImplTest, OnDrainCallbacksNonEvenlyDividedSteps) { + constexpr int num_cbs = 30; + ON_CALL(server_.options_, drainStrategy()).WillByDefault(Return(Server::DrainStrategy::Gradual)); + ON_CALL(server_.options_, drainTime()).WillByDefault(Return(std::chrono::seconds(1))); + + DrainManagerImpl drain_manager(server_, envoy::config::listener::v3::Listener::DEFAULT, + server_.dispatcher()); + + { + // Register callbacks (store in array to keep in scope for test) + std::array, num_cbs> cbs; + std::array cb_handles; + for (auto i = 0; i < num_cbs; i++) { + auto& cb = cbs[i]; + auto step = 250.0 / num_cbs; + EXPECT_CALL(cb, Call(_)).WillRepeatedly(Invoke([i, step](std::chrono::milliseconds delay) { + // Everything should happen within the first 1/4 of the drain time + EXPECT_LT(delay.count(), 251); + + // Validate that our wait times are spread out (within some small error) + EXPECT_THAT(delay.count(), AllOf(Ge(i * step - 1), Le(i * step + 1))); + })); + + cb_handles[i] = drain_manager.addOnDrainCloseCb(cb.AsStdFunction()); + } + + drain_manager.startDrainSequence([] {}); + } + + EXPECT_TRUE(drain_manager.draining()); +} + +// Validate the expected behavior when a drain-close callback is registered after draining has begun +// with a Gradual drain strategy (should be called with delay between 0 and maximum) +TEST_F(DrainManagerImplTest, RegisterCallbackAfterDrainBeginGradualStrategy) { + ON_CALL(server_.options_, drainStrategy()).WillByDefault(Return(Server::DrainStrategy::Gradual)); + ON_CALL(server_.options_, drainTime()).WillByDefault(Return(std::chrono::seconds(1))); + + DrainManagerImpl drain_manager(server_, envoy::config::listener::v3::Listener::DEFAULT, + server_.dispatcher()); + + testing::MockFunction cb_before_drain; + testing::MockFunction cb_after_drain1; + testing::MockFunction cb_after_drain2; + + EXPECT_CALL(cb_before_drain, Call(_)); + // Validate that callbacks after the drain sequence has started (or after the drain deadline + // has been reached) are called with a random value between 0 (immediate) and the max + // drain window (minus time that has passed). + EXPECT_CALL(cb_after_drain1, Call(_)).WillOnce(Invoke([](std::chrono::milliseconds delay) { + EXPECT_THAT(delay.count(), Ge(0)); + EXPECT_THAT(delay.count(), Le(990)); + })); + EXPECT_CALL(cb_after_drain2, Call(_)).WillOnce(Invoke([](std::chrono::milliseconds delay) { + EXPECT_EQ(delay.count(), 0); + })); + + auto before_handle = drain_manager.addOnDrainCloseCb(cb_before_drain.AsStdFunction()); + drain_manager.startDrainSequence([] {}); + + server_.api_.time_system_.advanceTimeWait(std::chrono::milliseconds(10)); + auto after_handle1 = drain_manager.addOnDrainCloseCb(cb_after_drain1.AsStdFunction()); + + server_.api_.time_system_.advanceTimeWait(std::chrono::milliseconds(1000)); + auto after_handle2 = drain_manager.addOnDrainCloseCb(cb_after_drain2.AsStdFunction()); + + EXPECT_EQ(after_handle1, nullptr); + EXPECT_EQ(after_handle2, nullptr); +} + +// Validate the expected behavior when a drain-close callback is registered after draining has begun +// with an Immediate drain strategy (should be called with 0 delay) +TEST_F(DrainManagerImplTest, RegisterCallbackAfterDrainBeginImmediateStrategy) { + ON_CALL(server_.options_, drainStrategy()).WillByDefault(Return(Server::DrainStrategy::Gradual)); + ON_CALL(server_.options_, drainTime()).WillByDefault(Return(std::chrono::seconds(1))); + + DrainManagerImpl drain_manager(server_, envoy::config::listener::v3::Listener::DEFAULT, + server_.dispatcher()); + + testing::MockFunction cb_before_drain; + testing::MockFunction cb_after_drain; + + EXPECT_CALL(cb_before_drain, Call(_)); + EXPECT_CALL(cb_after_drain, Call(_)).WillOnce(Invoke([](std::chrono::milliseconds delay) { + EXPECT_EQ(delay.count(), 0); + })); + + auto before_handle = drain_manager.addOnDrainCloseCb(cb_before_drain.AsStdFunction()); + drain_manager.startDrainSequence([] {}); + auto after_handle = drain_manager.addOnDrainCloseCb(cb_after_drain.AsStdFunction()); + EXPECT_EQ(after_handle, nullptr); +} + +// Destruction doesn't trigger draining, so it should be for the parent to be cleaned up +// before the child. +TEST_F(DrainManagerImplTest, ParentDestructedBeforeChildren) { + ON_CALL(server_.options_, drainStrategy()).WillByDefault(Return(Server::DrainStrategy::Gradual)); + ON_CALL(server_.options_, drainTime()).WillByDefault(Return(std::chrono::seconds(1))); + + auto parent = std::make_unique( + server_, envoy::config::listener::v3::Listener::DEFAULT, server_.dispatcher()); + auto child_a = parent->createChildManager(server_.dispatcher()); + auto child_b = parent->createChildManager(server_.dispatcher()); + + EXPECT_FALSE(parent->draining()); + EXPECT_FALSE(child_a->draining()); + EXPECT_FALSE(child_b->draining()); + + parent.reset(); + + // parent destruction should not effect drain state + EXPECT_FALSE(child_a->draining()); + EXPECT_FALSE(child_b->draining()); + + // Further children creation (from existing children) is still possible + auto child_a1 = child_a->createChildManager(server_.dispatcher()); + auto child_b1 = child_b->createChildManager(server_.dispatcher()); + EXPECT_TRUE(child_a1 != nullptr); + EXPECT_TRUE(child_b1 != nullptr); + + // draining cascades as expected + int called = 0; + testing::MockFunction cb_a1; + testing::MockFunction cb_b1; + EXPECT_CALL(cb_a1, Call(_)).WillRepeatedly(Invoke([&called](std::chrono::milliseconds) { + called += 1; + })); + EXPECT_CALL(cb_b1, Call(_)).WillRepeatedly(Invoke([&called](std::chrono::milliseconds) { + called += 1; + })); + auto handle_a1 = child_a1->addOnDrainCloseCb(cb_a1.AsStdFunction()); + auto handle_b1 = child_b1->addOnDrainCloseCb(cb_b1.AsStdFunction()); + child_a->startDrainSequence([] {}); + child_b->startDrainSequence([] {}); + EXPECT_EQ(called, 2); + + // It is safe to clean up children + child_a.reset(); + child_b.reset(); +} + +// Validate that draining will cascade through all nodes in the tree. This test uses the following +// tree structure: +// a +// │ +// ┌──────┴────────┐ +// ▼ ▼ +// b c +// │ │ +// ┌───┴────┐ ┌────┴───┐ +// ▼ ▼ ▼ ▼ +// d e f g +TEST_F(DrainManagerImplTest, DrainingCascadesThroughAllNodesInTree) { + ON_CALL(server_.options_, drainStrategy()).WillByDefault(Return(Server::DrainStrategy::Gradual)); + ON_CALL(server_.options_, drainTime()).WillByDefault(Return(std::chrono::seconds(1))); + + auto a = DrainManagerImpl(server_, envoy::config::listener::v3::Listener::DEFAULT, + server_.dispatcher()); + + auto b = a.createChildManager(server_.dispatcher()); + auto d = b->createChildManager(server_.dispatcher()); + auto e = b->createChildManager(server_.dispatcher()); + + auto c = a.createChildManager(server_.dispatcher()); + auto f = c->createChildManager(server_.dispatcher()); + auto g = c->createChildManager(server_.dispatcher()); + + // wire up callbacks at all levels + int call_count = 0; + std::array, 7> cbs; + + for (auto& cb : cbs) { + EXPECT_CALL(cb, Call(_)).WillOnce(Invoke([&call_count](std::chrono::milliseconds) { + call_count++; + })); + } + auto handle_a = a.addOnDrainCloseCb(cbs[0].AsStdFunction()); + auto handle_b = b->addOnDrainCloseCb(cbs[1].AsStdFunction()); + auto handle_c = c->addOnDrainCloseCb(cbs[2].AsStdFunction()); + auto handle_d = d->addOnDrainCloseCb(cbs[3].AsStdFunction()); + auto handle_e = e->addOnDrainCloseCb(cbs[4].AsStdFunction()); + auto handle_f = f->addOnDrainCloseCb(cbs[5].AsStdFunction()); + auto handle_g = g->addOnDrainCloseCb(cbs[6].AsStdFunction()); + + a.startDrainSequence([] {}); + EXPECT_EQ(call_count, 7); +} + +// Validate that sub-trees are independent of each other (a tree's drain-state is not affected by +// its neighbors). This test uses the following tree structure: +// a +// │ +// ┌──────┴────────┐ +// ▼ ▼ +// b c +// │ │ +// ┌───┴────┐ ┌────┴───┐ +// ▼ ▼ ▼ ▼ +// d e f g +// +// Draining will happen on B and validate that no impact is seen on C. +TEST_F(DrainManagerImplTest, DrainingIsIndependentToNeighbors) { + ON_CALL(server_.options_, drainStrategy()).WillByDefault(Return(Server::DrainStrategy::Gradual)); + ON_CALL(server_.options_, drainTime()).WillByDefault(Return(std::chrono::seconds(1))); + + auto a = DrainManagerImpl(server_, envoy::config::listener::v3::Listener::DEFAULT, + server_.dispatcher()); + + auto b = a.createChildManager(server_.dispatcher()); + auto d = b->createChildManager(server_.dispatcher()); + auto e = b->createChildManager(server_.dispatcher()); + + auto c = a.createChildManager(server_.dispatcher()); + auto f = c->createChildManager(server_.dispatcher()); + auto g = c->createChildManager(server_.dispatcher()); + + int call_count = 0; + testing::MockFunction cb_d; + testing::MockFunction cb_e; + testing::MockFunction cb_f; + testing::MockFunction cb_g; + + EXPECT_CALL(cb_d, Call(_)).WillOnce(Invoke([&call_count](std::chrono::milliseconds) { + call_count++; + })); + EXPECT_CALL(cb_e, Call(_)).WillOnce(Invoke([&call_count](std::chrono::milliseconds) { + call_count++; + })); + // validate neighbor remains uneffected + EXPECT_CALL(cb_f, Call(_)).Times(0); + EXPECT_CALL(cb_g, Call(_)).Times(0); + + auto handle_d = d->addOnDrainCloseCb(cb_d.AsStdFunction()); + auto handle_e = e->addOnDrainCloseCb(cb_e.AsStdFunction()); + auto handle_f = f->addOnDrainCloseCb(cb_f.AsStdFunction()); + auto handle_g = g->addOnDrainCloseCb(cb_g.AsStdFunction()); + + b->startDrainSequence([] {}); + EXPECT_EQ(call_count, 2); +} + +// Validate that draining of a child does not impact the drain-state of the parent +TEST_F(DrainManagerImplTest, DrainOnlyCascadesDownwards) { + ON_CALL(server_.options_, drainStrategy()).WillByDefault(Return(Server::DrainStrategy::Gradual)); + ON_CALL(server_.options_, drainTime()).WillByDefault(Return(std::chrono::seconds(1))); + + auto a = DrainManagerImpl(server_, envoy::config::listener::v3::Listener::DEFAULT, + server_.dispatcher()); + auto b = a.createChildManager(server_.dispatcher()); + auto c = b->createChildManager(server_.dispatcher()); + + int call_count = 0; + testing::MockFunction cb_a; + testing::MockFunction cb_b; + testing::MockFunction cb_c; + + // validate top-level callback is never fired + EXPECT_CALL(cb_a, Call(_)).Times(0); + EXPECT_CALL(cb_b, Call(_)).WillOnce(Invoke([&call_count](std::chrono::milliseconds) { + call_count++; + })); + EXPECT_CALL(cb_c, Call(_)).WillOnce(Invoke([&call_count](std::chrono::milliseconds) { + call_count++; + })); + auto handle_a = a.addOnDrainCloseCb(cb_a.AsStdFunction()); + auto handle_b = b->addOnDrainCloseCb(cb_b.AsStdFunction()); + auto handle_c = c->addOnDrainCloseCb(cb_c.AsStdFunction()); + + // drain the middle of the tree + b->startDrainSequence([] {}); + EXPECT_EQ(call_count, 2); +} + +// Validate that we can initiate draining on a child (to no effect) after the parent +// has already started draining +TEST_F(DrainManagerImplTest, DrainChildExplicitlyAfterParent) { + ON_CALL(server_.options_, drainStrategy()).WillByDefault(Return(Server::DrainStrategy::Gradual)); + ON_CALL(server_.options_, drainTime()).WillByDefault(Return(std::chrono::seconds(1))); + + auto a = DrainManagerImpl(server_, envoy::config::listener::v3::Listener::DEFAULT, + server_.dispatcher()); + auto b = a.createChildManager(server_.dispatcher()); + auto c = b->createChildManager(server_.dispatcher()); + + int call_count = 0; + testing::MockFunction cb; + + // validate top-level callback is never fired + EXPECT_CALL(cb, Call(_)).WillRepeatedly(Invoke([&call_count](std::chrono::milliseconds) { + call_count++; + })); + auto handle_a = a.addOnDrainCloseCb(cb.AsStdFunction()); + auto handle_b = b->addOnDrainCloseCb(cb.AsStdFunction()); + auto handle_c = c->addOnDrainCloseCb(cb.AsStdFunction()); + + // Drain the parent, then the child + a.startDrainSequence([&] {}); + b->startDrainSequence([&] {}); + EXPECT_EQ(call_count, 3); +} + +// Validate that we can initiate draining on a parent safely after a child has +// already started draining +TEST_F(DrainManagerImplTest, DrainParentAfterChild) { + ON_CALL(server_.options_, drainStrategy()).WillByDefault(Return(Server::DrainStrategy::Gradual)); + ON_CALL(server_.options_, drainTime()).WillByDefault(Return(std::chrono::seconds(1))); + + auto a = DrainManagerImpl(server_, envoy::config::listener::v3::Listener::DEFAULT, + server_.dispatcher()); + auto b = a.createChildManager(server_.dispatcher()); + auto c = b->createChildManager(server_.dispatcher()); + + int call_count = 0; + testing::MockFunction cb; + + // validate top-level callback is never fired + EXPECT_CALL(cb, Call(_)).WillRepeatedly(Invoke([&call_count](std::chrono::milliseconds) { + call_count++; + })); + auto handle_a = a.addOnDrainCloseCb(cb.AsStdFunction()); + auto handle_b = b->addOnDrainCloseCb(cb.AsStdFunction()); + auto handle_c = c->addOnDrainCloseCb(cb.AsStdFunction()); + + // Drain the child, then the parent + b->startDrainSequence([] {}); + a.startDrainSequence([] {}); + EXPECT_EQ(call_count, 3); +} + } // namespace } // namespace Server } // namespace Envoy diff --git a/test/server/filter_chain_benchmark_test.cc b/test/server/filter_chain_benchmark_test.cc index f14ab189beb71..89569264633b0 100644 --- a/test/server/filter_chain_benchmark_test.cc +++ b/test/server/filter_chain_benchmark_test.cc @@ -61,6 +61,8 @@ class MockConnectionSocket : public Network::ConnectionSocket { } else { res->address_provider_->setRemoteAddress( Network::Utility::parseInternetAddress(source_address, source_port)); + res->address_provider_->setDirectRemoteAddressForTest( + Network::Utility::parseInternetAddress(source_address, source_port)); } res->server_name_ = server_name; res->transport_protocol_ = transport_protocol; @@ -124,7 +126,7 @@ class MockConnectionSocket : public Network::ConnectionSocket { private: Network::IoHandlePtr io_handle_; OptionsSharedPtr options_; - Network::SocketAddressSetterSharedPtr address_provider_; + std::shared_ptr address_provider_; std::string server_name_; std::string transport_protocol_; std::vector application_protocols_; @@ -134,7 +136,6 @@ const char YamlHeader[] = R"EOF( socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - typed_config: {} filter_chains: - filter_chain_match: # empty diff --git a/test/server/listener_manager_impl_test.cc b/test/server/listener_manager_impl_test.cc index e8fc4bb106168..fa67c82401dc0 100644 --- a/test/server/listener_manager_impl_test.cc +++ b/test/server/listener_manager_impl_test.cc @@ -26,6 +26,7 @@ #include "source/extensions/transport_sockets/tls/ssl_socket.h" #include "test/mocks/init/mocks.h" +#include "test/mocks/matcher/mocks.h" #include "test/server/utility.h" #include "test/test_common/network_utility.h" #include "test/test_common/registry.h" @@ -441,7 +442,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, BadFilterConfig) { - filters: - foo: type name: name - typed_config: {} )EOF"; EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), @@ -477,7 +477,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TerminalNotLast) { filter_chains: - filters: - name: non_terminal - typed_config: {} )EOF"; EXPECT_THROW_WITH_REGEX( @@ -500,7 +499,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, NotTerminalLast) { stat_prefix: tcp cluster: cluster - name: unknown_but_will_not_be_processed - typed_config: {} )EOF"; EXPECT_THROW_WITH_REGEX( @@ -518,7 +516,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, BadFilterName) { filter_chains: - filters: - name: invalid - typed_config: {} )EOF"; EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), @@ -568,7 +565,6 @@ bind_to_port: false filter_chains: - filters: - name: stats_test - typed_config: {} )EOF"; EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, ListenSocketCreationParams(false))); @@ -946,7 +942,6 @@ name: foo filter_chains: - filters: - name: fake - typed_config: {} )EOF"; EXPECT_FALSE( @@ -1159,7 +1154,8 @@ name: foo TEST_F(ListenerManagerImplTest, AddOrUpdateListener) { time_system_.setSystemTime(std::chrono::milliseconds(1001001001001)); - + NiceMock mock_matcher; + ON_CALL(mock_matcher, match(_)).WillByDefault(Return(false)); InSequence s; auto* lds_api = new MockLdsApi(); @@ -1207,6 +1203,13 @@ version_info: version1 seconds: 1001001001 nanos: 1000000 )EOF"); + EXPECT_CALL(*lds_api, versionInfo()).WillOnce(Return("version1")); + checkConfigDump(R"EOF( +version_info: version1 +static_listeners: +dynamic_listeners: +)EOF", + mock_matcher); // Update duplicate should be a NOP. EXPECT_FALSE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); @@ -1251,6 +1254,13 @@ version_info: version2 seconds: 2002002002 nanos: 2000000 )EOF"); + EXPECT_CALL(*lds_api, versionInfo()).WillOnce(Return("version2")); + checkConfigDump(R"EOF( +version_info: version2 +static_listeners: +dynamic_listeners: +)EOF", + mock_matcher); // Validate that workers_started stat is zero before calling startWorkers. EXPECT_EQ(0, server_.stats_store_ @@ -1326,6 +1336,14 @@ version_info: version3 nanos: 2000000 )EOF"); + EXPECT_CALL(*lds_api, versionInfo()).WillOnce(Return("version3")); + checkConfigDump(R"EOF( +version_info: version3 +static_listeners: +dynamic_listeners: +)EOF", + mock_matcher); + EXPECT_CALL(*worker_, removeListener(_, _)); listener_foo_update1->drain_manager_->drain_sequence_completion_(); checkStats(__LINE__, 1, 2, 0, 0, 1, 1, 0); @@ -1421,6 +1439,14 @@ version_info: version5 nanos: 5000000 )EOF"); + EXPECT_CALL(*lds_api, versionInfo()).WillOnce(Return("version5")); + checkConfigDump(R"EOF( +version_info: version5 +static_listeners: +dynamic_listeners: +)EOF", + mock_matcher); + // Update a duplicate baz that is currently warming. EXPECT_FALSE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_baz_yaml), "", true)); checkStats(__LINE__, 3, 2, 0, 1, 2, 0, 0); @@ -1435,7 +1461,6 @@ name: baz filter_chains: - filters: - name: fake - typed_config: {} )EOF"; ListenerHandle* listener_baz_update1 = expectListenerCreate(true, true); @@ -2375,7 +2400,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithDestinationP socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - typed_config: {} filter_chains: - filter_chain_match: destination_port: 8080 @@ -2415,7 +2439,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithDestinationP EXPECT_EQ(filter_chain, nullptr); } -TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithDestinationIPMatch) { +TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithDirectSourceIPMatch) { const std::string yaml = TestEnvironment::substitute(R"EOF( address: socket_address: { address: 127.0.0.1, port_value: 1234 } @@ -2423,6 +2447,51 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithDestinationI - name: "envoy.filters.listener.tls_inspector" typed_config: {} filter_chains: + - filter_chain_match: + direct_source_prefix_ranges: { address_prefix: 127.0.0.0, prefix_len: 8 } + transport_socket: + name: tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem" } + private_key: { filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem" } + )EOF", + Network::Address::IpVersion::v4); + + EXPECT_CALL(server_.api_.random_, uuid()); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); + EXPECT_EQ(1U, manager_->listeners().size()); + + // IPv4 client connects to unknown IP - no match. + auto filter_chain = findFilterChain(1234, "1.2.3.4", "", "tls", {}, "8.8.8.8", 111, "1.2.3.4"); + EXPECT_EQ(filter_chain, nullptr); + + // IPv4 client connects to valid IP - using 1st filter chain. + filter_chain = findFilterChain(1234, "1.2.3.4", "", "tls", {}, "8.8.8.8", 111, "127.0.0.1"); + ASSERT_NE(filter_chain, nullptr); + EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport()); + auto transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr); + auto ssl_socket = + dynamic_cast(transport_socket.get()); + auto server_names = ssl_socket->ssl()->dnsSansLocalCertificate(); + EXPECT_EQ(server_names.size(), 1); + EXPECT_EQ(server_names.front(), "server1.example.com"); + + // UDS client - no match. + filter_chain = findFilterChain(0, "/tmp/test.sock", "", "tls", {}, "/tmp/test.sock", 111); + EXPECT_EQ(filter_chain, nullptr); +} + +TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithDestinationIPMatch) { + const std::string yaml = TestEnvironment::substitute(R"EOF( + address: + socket_address: { address: 127.0.0.1, port_value: 1234 } + listener_filters: + - name: "envoy.filters.listener.tls_inspector" + filter_chains: - filter_chain_match: prefix_ranges: { address_prefix: 127.0.0.0, prefix_len: 8 } transport_socket: @@ -2467,7 +2536,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithServerNamesM socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - typed_config: {} filter_chains: - filter_chain_match: server_names: "server1.example.com" @@ -2514,7 +2582,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithTransportPro socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - typed_config: {} filter_chains: - filter_chain_match: transport_protocol: "tls" @@ -2556,7 +2623,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithApplicationP socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - typed_config: {} filter_chains: - filter_chain_match: application_protocols: "http/1.1" @@ -2603,7 +2669,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceTypeMa socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - typed_config: {} filter_chains: - filter_chain_match: source_type: SAME_IP_OR_LOOPBACK @@ -2662,7 +2727,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceIpMatc socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - typed_config: {} filter_chains: - filter_chain_match: source_prefix_ranges: @@ -2722,7 +2786,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceIpv6Ma socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - typed_config: {} filter_chains: - filter_chain_match: source_prefix_ranges: @@ -2762,7 +2825,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourcePortMa socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - typed_config: {} filter_chains: - filter_chain_match: source_ports: @@ -2809,7 +2871,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainWithSourceType socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - typed_config: {} filter_chains: - filter_chain_match: source_type: SAME_IP_OR_LOOPBACK @@ -2897,7 +2958,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithDestinati socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - typed_config: {} filter_chains: - filter_chain_match: # empty @@ -2983,7 +3043,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithDestinati socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - typed_config: {} filter_chains: - filter_chain_match: # empty @@ -3023,7 +3082,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithDestinati manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); - // IPv4 client connects to default IP - using 1st filter chain. + // UDS client connects - using 1st filter chain with no IP match auto filter_chain = findFilterChain(1234, "127.0.0.1", "", "tls", {}, "127.0.0.1", 111); ASSERT_NE(filter_chain, nullptr); EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport()); @@ -3033,6 +3092,15 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithDestinati auto uri = ssl_socket->ssl()->uriSanLocalCertificate(); EXPECT_EQ(uri[0], "spiffe://lyft.com/test-team"); + // IPv4 client connects to default IP - using 1st filter chain. + filter_chain = findFilterChain(1234, "127.0.0.1", "", "tls", {}, "127.0.0.1", 111); + ASSERT_NE(filter_chain, nullptr); + EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport()); + transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr); + ssl_socket = dynamic_cast(transport_socket.get()); + uri = ssl_socket->ssl()->uriSanLocalCertificate(); + EXPECT_EQ(uri[0], "spiffe://lyft.com/test-team"); + // IPv4 client connects to exact IP match - using 2nd filter chain. filter_chain = findFilterChain(1234, "192.168.0.1", "", "tls", {}, "127.0.0.1", 111); ASSERT_NE(filter_chain, nullptr); @@ -3063,7 +3131,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithDestinati EXPECT_EQ(uri[0], "spiffe://lyft.com/test-team"); } -TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithServerNamesMatch) { +TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithDirectSourceIPMatch) { const std::string yaml = TestEnvironment::substitute(R"EOF( address: socket_address: { address: 127.0.0.1, port_value: 1234 } @@ -3071,6 +3139,91 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithServerNam - name: "envoy.filters.listener.tls_inspector" typed_config: {} filter_chains: + - filter_chain_match: + # empty + transport_socket: + name: tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem" } + private_key: { filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem" } + - filter_chain_match: + direct_source_prefix_ranges: { address_prefix: 192.168.0.1, prefix_len: 32 } + transport_socket: + name: tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem" } + private_key: { filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem" } + - filter_chain_match: + direct_source_prefix_ranges: { address_prefix: 192.168.0.0, prefix_len: 16 } + transport_socket: + name: tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert.pem" } + private_key: { filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_key.pem" } + )EOF", + Network::Address::IpVersion::v4); + + EXPECT_CALL(server_.api_.random_, uuid()); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); + EXPECT_EQ(1U, manager_->listeners().size()); + + // UDS client connects - using 1st filter chain with no IP match + auto filter_chain = findFilterChain(1234, "/uds_1", "", "tls", {}, "/uds_2", 111, "/uds_3"); + ASSERT_NE(filter_chain, nullptr); + EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport()); + auto transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr); + auto ssl_socket = + dynamic_cast(transport_socket.get()); + auto uri = ssl_socket->ssl()->uriSanLocalCertificate(); + EXPECT_EQ(uri[0], "spiffe://lyft.com/test-team"); + + // IPv4 client connects to default IP - using 1st filter chain. + filter_chain = findFilterChain(1234, "127.0.0.1", "", "tls", {}, "127.0.0.1", 111, "127.0.0.1"); + ASSERT_NE(filter_chain, nullptr); + EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport()); + transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr); + ssl_socket = dynamic_cast(transport_socket.get()); + uri = ssl_socket->ssl()->uriSanLocalCertificate(); + EXPECT_EQ(uri[0], "spiffe://lyft.com/test-team"); + + // IPv4 client connects to exact IP match - using 2nd filter chain. + filter_chain = findFilterChain(1234, "127.0.0.1", "", "tls", {}, "127.0.0.1", 111, "192.168.0.1"); + ASSERT_NE(filter_chain, nullptr); + EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport()); + transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr); + ssl_socket = dynamic_cast(transport_socket.get()); + auto server_names = ssl_socket->ssl()->dnsSansLocalCertificate(); + EXPECT_EQ(server_names.size(), 1); + EXPECT_EQ(server_names.front(), "server1.example.com"); + + // IPv4 client connects to wildcard IP match - using 3rd filter chain. + filter_chain = findFilterChain(1234, "127.0.0.1", "", "tls", {}, "127.0.0.1", 111, "192.168.1.1"); + ASSERT_NE(filter_chain, nullptr); + EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport()); + transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr); + ssl_socket = dynamic_cast(transport_socket.get()); + server_names = ssl_socket->ssl()->dnsSansLocalCertificate(); + EXPECT_EQ(server_names.size(), 2); + EXPECT_EQ(server_names.front(), "*.example.com"); +} + +TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithServerNamesMatch) { + const std::string yaml = TestEnvironment::substitute(R"EOF( + address: + socket_address: { address: 127.0.0.1, port_value: 1234 } + listener_filters: + - name: "envoy.filters.listener.tls_inspector" + filter_chains: - filter_chain_match: # empty transport_socket: @@ -3168,7 +3321,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithTransport socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - typed_config: {} filter_chains: - filter_chain_match: # empty @@ -3213,7 +3365,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithApplicati socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - typed_config: {} filter_chains: - filter_chain_match: # empty @@ -3261,7 +3412,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithMultipleR socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - typed_config: {} filter_chains: - filter_chain_match: # empty @@ -3324,7 +3474,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithDifferent socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - typed_config: {} filter_chains: - filter_chain_match: server_names: "example.com" @@ -3368,7 +3517,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - typed_config: {} filter_chains: - filter_chain_match: server_names: "example.com" @@ -3408,7 +3556,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithInvalidDesti socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - typed_config: {} filter_chains: - filter_chain_match: prefix_ranges: { address_prefix: a.b.c.d, prefix_len: 32 } @@ -3425,7 +3572,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithInvalidServe socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - typed_config: {} filter_chains: - filter_chain_match: server_names: "*w.example.com" @@ -3444,7 +3590,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithSameMatch socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - typed_config: {} filter_chains: - name : foo filter_chain_match: @@ -3468,7 +3613,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - typed_config: {} filter_chains: - name: foo filter_chain_match: @@ -3491,7 +3635,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithOverlappi socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - typed_config: {} filter_chains: - filter_chain_match: server_names: "example.com" @@ -3569,7 +3712,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.listener.tls_inspector" - typed_config: {} filter_chains: - filter_chain_match: transport_protocol: "tls" @@ -3894,7 +4036,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, Metadata) { route: { cluster: service_foo } listener_filters: - name: "envoy.filters.listener.original_dst" - typed_config: {} )EOF", Network::Address::IpVersion::v4); Configuration::ListenerFactoryContext* listener_factory_context = nullptr; @@ -3927,7 +4068,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstFilterWin32NoTrafficDi filter_chains: {} listener_filters: - name: "envoy.filters.listener.original_dst" - typed_config: {} )EOF", Network::Address::IpVersion::v4); @@ -3948,7 +4088,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstFilterWin32NoFeatureSu traffic_direction: INBOUND listener_filters: - name: "envoy.filters.listener.original_dst" - typed_config: {} )EOF", Network::Address::IpVersion::v4); EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); @@ -3967,7 +4106,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstFilter) { traffic_direction: INBOUND listener_filters: - name: "envoy.filters.listener.original_dst" - typed_config: {} )EOF", Network::Address::IpVersion::v4); EXPECT_CALL(server_.api_.random_, uuid()); @@ -4051,7 +4189,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstTestFilterOutbound) { traffic_direction: OUTBOUND listener_filters: - name: "test.listener.original_dst" - typed_config: {} )EOF", Network::Address::IpVersion::v4); @@ -4107,7 +4244,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstFilterStopsIteration) traffic_direction: OUTBOUND listener_filters: - name: "test.listener.original_dst" - typed_config: {} )EOF", Network::Address::IpVersion::v4); @@ -4158,7 +4294,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstTestFilterInbound) { traffic_direction: INBOUND listener_filters: - name: "test.listener.original_dst" - typed_config: {} )EOF", Network::Address::IpVersion::v4); @@ -4240,7 +4375,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstTestFilterIPv6) { filter_chains: {} listener_filters: - name: "test.listener.original_dstipv6" - typed_config: {} )EOF", Network::Address::IpVersion::v6); diff --git a/test/server/listener_manager_impl_test.h b/test/server/listener_manager_impl_test.h index 26b19f70e672d..459c84e9aeb78 100644 --- a/test/server/listener_manager_impl_test.h +++ b/test/server/listener_manager_impl_test.h @@ -175,7 +175,8 @@ class ListenerManagerImplTest : public testing::Test { findFilterChain(uint16_t destination_port, const std::string& destination_address, const std::string& server_name, const std::string& transport_protocol, const std::vector& application_protocols, - const std::string& source_address, uint16_t source_port) { + const std::string& source_address, uint16_t source_port, + std::string direct_source_address = "") { if (absl::StartsWith(destination_address, "/")) { local_address_ = std::make_shared(destination_address); } else { @@ -197,6 +198,18 @@ class ListenerManagerImplTest : public testing::Test { } socket_->address_provider_->setRemoteAddress(remote_address_); + if (direct_source_address.empty()) { + direct_source_address = source_address; + } + if (absl::StartsWith(direct_source_address, "/")) { + direct_remote_address_ = + std::make_shared(direct_source_address); + } else { + direct_remote_address_ = + Network::Utility::parseInternetAddress(direct_source_address, source_port); + } + socket_->address_provider_->setDirectRemoteAddressForTest(direct_remote_address_); + return manager_->listeners().back().get().filterChainManager().findFilterChain(*socket_); } @@ -262,8 +275,11 @@ class ListenerManagerImplTest : public testing::Test { .value()); } - void checkConfigDump(const std::string& expected_dump_yaml) { - auto message_ptr = server_.admin_.config_tracker_.config_tracker_callbacks_["listeners"](); + void checkConfigDump( + const std::string& expected_dump_yaml, + const Matchers::StringMatcher& name_matcher = Matchers::UniversalStringMatcher()) { + auto message_ptr = + server_.admin_.config_tracker_.config_tracker_callbacks_["listeners"](name_matcher); const auto& listeners_config_dump = dynamic_cast(*message_ptr); @@ -294,6 +310,7 @@ class ListenerManagerImplTest : public testing::Test { Api::ApiPtr api_; Network::Address::InstanceConstSharedPtr local_address_; Network::Address::InstanceConstSharedPtr remote_address_; + Network::Address::InstanceConstSharedPtr direct_remote_address_; std::unique_ptr socket_; uint64_t listener_tag_{1}; bool enable_dispatcher_stats_{false}; diff --git a/test/server/server_corpus/big_maglev_table b/test/server/server_corpus/big_maglev_table new file mode 100644 index 0000000000000..8d8667c06b388 --- /dev/null +++ b/test/server/server_corpus/big_maglev_table @@ -0,0 +1,106 @@ +static_resources { + clusters { + name: "ser" + type: STATIC + connect_timeout { + nanos: 813 + } + lb_policy: MAGLEV + circuit_breakers { + } + http_protocol_options { + default_host_for_http_10: "\005" + } + dns_lookup_family: V6_ONLY + metadata { + filter_metadata { + key: "\177" + value { + } + } + } + alt_stat_name: "search" + load_assignment { + cluster_name: "." + endpoints { + locality { + sub_zone: "\002\000\000\000\000\000\000\000" + } + priority: 50 + } + endpoints { + lb_endpoints { + endpoint { + address { + envoy_internal_address { + } + } + } + health_status: DRAINING + } + } + endpoints { + lb_endpoints { + endpoint { + address { + envoy_internal_address { + } + } + } + health_status: DRAINING + } + lb_endpoints { + endpoint { + address { + pipe { + path: ")" + } + } + } + health_status: DRAINING + } + load_balancing_weight { + value: 262144 + } + priority: 16 + } + endpoints { + } + endpoints { + locality { + sub_zone: "\002\000\000\000\000\000\000\000" + } + priority: 16 + } + } + upstream_http_protocol_options { + auto_sni: true + auto_san_validation: true + } + maglev_lb_config { + table_size { + value: 18374967954648334337 + } + } + } +} +watchdog { + megamiss_timeout { + nanos: 95 + } + multikill_timeout { + nanos: 620756992 + } + max_kill_timeout_jitter { + nanos: 262239 + } +} +stats_config { +} +hds_config { + refresh_delay { + seconds: -648518346341351424 + } +} +stats_server_version_override { +} diff --git a/test/test_common/utility.h b/test/test_common/utility.h index e0470f27a59e1..2da5dee226feb 100644 --- a/test/test_common/utility.h +++ b/test/test_common/utility.h @@ -866,6 +866,33 @@ class ConditionalInitializer { bool ready_ ABSL_GUARDED_BY(mutex_){false}; }; +namespace Tracing { + +class TestTraceContextImpl : public Tracing::TraceContext { +public: + TestTraceContextImpl(const std::initializer_list>& values) { + for (const auto& value : values) { + context_map_[value.first] = value.second; + } + } + + absl::optional getTraceContext(absl::string_view key) const override { + auto iter = context_map_.find(key); + if (iter == context_map_.end()) { + return absl::nullopt; + } + return iter->second; + } + + void setTraceContext(absl::string_view key, absl::string_view val) override { + context_map_.insert({std::string(key), std::string(val)}); + } + + absl::flat_hash_map context_map_; +}; + +} // namespace Tracing + namespace Http { /** @@ -1079,6 +1106,25 @@ class TestRequestHeaderMapImpl INLINE_REQ_NUMERIC_HEADERS(DEFINE_TEST_INLINE_NUMERIC_HEADER_FUNCS) INLINE_REQ_RESP_STRING_HEADERS(DEFINE_TEST_INLINE_STRING_HEADER_FUNCS) INLINE_REQ_RESP_NUMERIC_HEADERS(DEFINE_TEST_INLINE_NUMERIC_HEADER_FUNCS) + + absl::optional getTraceContext(absl::string_view key) const override { + ASSERT(header_map_); + return header_map_->getTraceContext(key); + } + void setTraceContext(absl::string_view key, absl::string_view value) override { + ASSERT(header_map_); + header_map_->setTraceContext(key, value); + } + + void setTraceContextReferenceKey(absl::string_view key, absl::string_view val) override { + ASSERT(header_map_); + header_map_->setTraceContextReferenceKey(key, val); + } + + void setTraceContextReference(absl::string_view key, absl::string_view val) override { + ASSERT(header_map_); + header_map_->setTraceContextReference(key, val); + } }; using TestRequestTrailerMapImpl = TestHeaderMapImplBase; diff --git a/test/test_common/wasm_base.h b/test/test_common/wasm_base.h index 8231788e24ec3..5e3394bf5559c 100644 --- a/test/test_common/wasm_base.h +++ b/test/test_common/wasm_base.h @@ -84,15 +84,13 @@ template class WasmTestBase : public Base { Extensions::Common::Wasm::createWasm( plugin_, scope_, cluster_manager_, init_manager_, dispatcher_, *api, lifecycle_notifier_, remote_data_provider_, [this](WasmHandleSharedPtr wasm) { wasm_ = wasm; }, create_root); - if (wasm_) { - plugin_handle_ = getOrCreateThreadLocalPlugin( - wasm_, plugin_, dispatcher_, - [this, create_root](Wasm* wasm, const std::shared_ptr& plugin) { - root_context_ = static_cast(create_root(wasm, plugin)); - return root_context_; - }); - wasm_ = plugin_handle_->wasmHandleForTest(); - } + plugin_handle_ = getOrCreateThreadLocalPlugin( + wasm_, plugin_, dispatcher_, + [this, create_root](Wasm* wasm, const std::shared_ptr& plugin) { + root_context_ = static_cast(create_root(wasm, plugin)); + return root_context_; + }); + wasm_ = plugin_handle_->wasmHandle(); } WasmHandleSharedPtr& wasm() { return wasm_; } @@ -143,7 +141,8 @@ template class WasmHttpFilterTestBase : public W template void setupFilterBase() { auto wasm = WasmTestBase::wasm_ ? WasmTestBase::wasm_->wasm().get() : nullptr; int root_context_id = wasm ? wasm->getRootContext(WasmTestBase::plugin_, false)->id() : 0; - context_ = std::make_unique(wasm, root_context_id, WasmTestBase::plugin_); + context_ = + std::make_unique(wasm, root_context_id, WasmTestBase::plugin_handle_); context_->setDecoderFilterCallbacks(decoder_callbacks_); context_->setEncoderFilterCallbacks(encoder_callbacks_); } @@ -160,7 +159,8 @@ class WasmNetworkFilterTestBase : public WasmTestBase { template void setupFilterBase() { auto wasm = WasmTestBase::wasm_ ? WasmTestBase::wasm_->wasm().get() : nullptr; int root_context_id = wasm ? wasm->getRootContext(WasmTestBase::plugin_, false)->id() : 0; - context_ = std::make_unique(wasm, root_context_id, WasmTestBase::plugin_); + context_ = + std::make_unique(wasm, root_context_id, WasmTestBase::plugin_handle_); context_->initializeReadFilterCallbacks(read_filter_callbacks_); context_->initializeWriteFilterCallbacks(write_filter_callbacks_); } diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index 95d0940ce8dd6..311efac16a3d8 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -36,9 +36,7 @@ "./source/extensions/common/wasm/ext", "./examples/wasm-cc", ) -SUFFIXES = ( - "BUILD", "WORKSPACE", ".bzl", ".cc", ".h", ".java", ".m", ".md", ".mm", ".proto", ".rst") -DOCS_SUFFIX = (".md", ".rst") +SUFFIXES = ("BUILD", "WORKSPACE", ".bzl", ".cc", ".h", ".java", ".m", ".mm", ".proto") PROTO_SUFFIX = (".proto") # Files in these paths can make reference to protobuf stuff directly @@ -117,11 +115,10 @@ EXCEPTION_DENYLIST = ( "./source/common/http/http2/codec_impl.h", "./source/common/http/http2/codec_impl.cc") +# Files that are allowed to use try without main thread assertion. RAW_TRY_ALLOWLIST = ( - "./source/common/common/regex.cc", - "./source/common/common/thread.h", - "./source/common/network/utility.cc", -) + "./source/common/common/regex.cc", "./source/common/common/thread.h", + "./source/common/network/utility.cc") # These are entire files that are allowed to use std::string_view vs. individual exclusions. Right # now this is just WASM which makes use of std::string_view heavily so we need to convert to @@ -181,16 +178,11 @@ COMMENT_REGEX = re.compile(r"//|\*") DURATION_VALUE_REGEX = re.compile(r'\b[Dd]uration\(([0-9.]+)') PROTO_VALIDATION_STRING = re.compile(r'\bmin_bytes\b') -VERSION_HISTORY_NEW_LINE_REGEX = re.compile("\* ([a-z \-_]+): ([a-z:`]+)") -VERSION_HISTORY_SECTION_NAME = re.compile("^[A-Z][A-Za-z ]*$") -RELOADABLE_FLAG_REGEX = re.compile(".*(...)(envoy.reloadable_features.[^ ]*)\s.*") -INVALID_REFLINK = re.compile(".* ref:.*") OLD_MOCK_METHOD_REGEX = re.compile("MOCK_METHOD\d") # C++17 feature, lacks sufficient support across various libraries / compilers. FOR_EACH_N_REGEX = re.compile("for_each_n\(") # Check for punctuation in a terminal ref clause, e.g. # :ref:`panic mode. ` -REF_WITH_PUNCTUATION_REGEX = re.compile(".*\. <[^<]*>`\s*") DOT_MULTI_SPACE_REGEX = re.compile("\\. +") FLAG_REGEX = re.compile(" \"(.*)\",") @@ -454,7 +446,7 @@ def allow_listed_for_register_factory(self, file_path): return any(file_path.startswith(prefix) for prefix in REGISTER_FACTORY_TEST_ALLOWLIST) def allow_listed_for_serialize_as_string(self, file_path): - return file_path in SERIALIZE_AS_STRING_ALLOWLIST or file_path.endswith(DOCS_SUFFIX) + return file_path in SERIALIZE_AS_STRING_ALLOWLIST def allow_listed_for_std_string_view(self, file_path): return file_path in STD_STRING_VIEW_ALLOWLIST @@ -466,8 +458,7 @@ def allow_listed_for_histogram_si_suffix(self, name): return name in HISTOGRAM_WITH_SI_SUFFIX_ALLOWLIST def allow_listed_for_std_regex(self, file_path): - return file_path.startswith( - "./test") or file_path in STD_REGEX_ALLOWLIST or file_path.endswith(DOCS_SUFFIX) + return file_path.startswith("./test") or file_path in STD_REGEX_ALLOWLIST def allow_listed_for_grpc_init(self, file_path): return file_path in GRPC_INIT_ALLOWLIST @@ -484,8 +475,6 @@ def allow_listed_for_raw_try(self, file_path): def deny_listed_for_exceptions(self, file_path): # Returns true when it is a non test header file or the file_path is in DENYLIST or # it is under tools/testdata subdirectory. - if file_path.endswith(DOCS_SUFFIX): - return False return (file_path.endswith('.h') and not file_path.startswith("./test/") and not file_path in EXCEPTION_ALLOWLIST) or file_path in EXCEPTION_DENYLIST \ or self.is_in_subdir(file_path, 'tools/testdata') @@ -553,92 +542,8 @@ def check_runtime_flags(self, file_path, error_messages): error_messages.append("%s and %s are out of order\n" % (line, previous_flag)) previous_flag = line - def check_current_release_notes(self, file_path, error_messages): - first_word_of_prior_line = '' - next_word_to_check = '' # first word after : - prior_line = '' - - def ends_with_period(prior_line): - if not prior_line: - return True # Don't punctuation-check empty lines. - if prior_line.endswith('.'): - return True # Actually ends with . - if prior_line.endswith('`') and REF_WITH_PUNCTUATION_REGEX.match(prior_line): - return True # The text in the :ref ends with a . - return False - - for line_number, line in enumerate(self.read_lines(file_path)): - - def report_error(message): - error_messages.append("%s:%d: %s" % (file_path, line_number + 1, message)) - - if VERSION_HISTORY_SECTION_NAME.match(line): - if line == "Deprecated": - # The deprecations section is last, and does not have enforced formatting. - break - - # Reset all parsing at the start of a section. - first_word_of_prior_line = '' - next_word_to_check = '' # first word after : - prior_line = '' - - invalid_reflink_match = INVALID_REFLINK.match(line) - if invalid_reflink_match: - report_error("Found text \" ref:\". This should probably be \" :ref:\"\n%s" % line) - - # make sure flags are surrounded by ``s (ie "inline literal") - flag_match = RELOADABLE_FLAG_REGEX.match(line) - if flag_match: - if not flag_match.groups()[0].startswith(' ``'): - report_error( - "Flag %s should be enclosed in double back ticks" % flag_match.groups()[1]) - - if line.startswith("* "): - if not ends_with_period(prior_line): - report_error( - "The following release note does not end with a '.'\n %s" % prior_line) - - match = VERSION_HISTORY_NEW_LINE_REGEX.match(line) - if not match: - report_error( - "Version history line malformed. " - "Does not match VERSION_HISTORY_NEW_LINE_REGEX in check_format.py\n %s\n" - "Please use messages in the form 'category: feature explanation.', " - "starting with a lower-cased letter and ending with a period." % line) - else: - first_word = match.groups()[0] - next_word = match.groups()[1] - # Do basic alphabetization checks of the first word on the line and the - # first word after the : - if first_word_of_prior_line and first_word_of_prior_line > first_word: - report_error( - "Version history not in alphabetical order (%s vs %s): please check placement of line\n %s. " - % (first_word_of_prior_line, first_word, line)) - if first_word_of_prior_line == first_word and next_word_to_check and next_word_to_check > next_word: - report_error( - "Version history not in alphabetical order (%s vs %s): please check placement of line\n %s. " - % (next_word_to_check, next_word, line)) - first_word_of_prior_line = first_word - next_word_to_check = next_word - - prior_line = line - elif not line: - # If we hit the end of this release note block block, check the prior line. - if not ends_with_period(prior_line): - report_error( - "The following release note does not end with a '.'\n %s" % prior_line) - prior_line = '' - elif prior_line: - prior_line += line - def check_file_contents(self, file_path, checker): error_messages = [] - - if file_path.endswith("version_history/current.rst"): - # Version file checking has enough special cased logic to merit its own checks. - # This only validates entries for the current release as very old release - # notes have a different format. - self.check_current_release_notes(file_path, error_messages) if file_path.endswith("source/common/runtime/runtime_features.cc"): # Do runtime alphabetical order checks. self.check_runtime_flags(file_path, error_messages) @@ -1038,10 +943,9 @@ def fix_source_path(self, file_path): error_messages = [] - if not file_path.endswith(DOCS_SUFFIX): - if not file_path.endswith(PROTO_SUFFIX): - error_messages += self.fix_header_order(file_path) - error_messages += self.clang_format(file_path) + if not file_path.endswith(PROTO_SUFFIX): + error_messages += self.fix_header_order(file_path) + error_messages += self.clang_format(file_path) if file_path.endswith(PROTO_SUFFIX) and self.is_api_file(file_path): package_name, error_message = self.package_name_for_proto(file_path) if package_name is None: @@ -1051,16 +955,15 @@ def fix_source_path(self, file_path): def check_source_path(self, file_path): error_messages = self.check_file_contents(file_path, self.check_source_line) - if not file_path.endswith(DOCS_SUFFIX): - if not file_path.endswith(PROTO_SUFFIX): - error_messages += self.check_namespace(file_path) - command = ( - "%s --include_dir_order %s --path %s | diff %s -" % - (HEADER_ORDER_PATH, self.include_dir_order, file_path, file_path)) - error_messages += self.execute_command( - command, "header_order.py check failed", file_path) - command = ("%s %s | diff %s -" % (CLANG_FORMAT_PATH, file_path, file_path)) - error_messages += self.execute_command(command, "clang-format check failed", file_path) + if not file_path.endswith(PROTO_SUFFIX): + error_messages += self.check_namespace(file_path) + command = ( + "%s --include_dir_order %s --path %s | diff %s -" % + (HEADER_ORDER_PATH, self.include_dir_order, file_path, file_path)) + error_messages += self.execute_command( + command, "header_order.py check failed", file_path) + command = ("%s %s | diff %s -" % (CLANG_FORMAT_PATH, file_path, file_path)) + error_messages += self.execute_command(command, "clang-format check failed", file_path) if file_path.endswith(PROTO_SUFFIX) and self.is_api_file(file_path): package_name, error_message = self.package_name_for_proto(file_path) @@ -1107,12 +1010,6 @@ def clang_format(self, file_path): return [] def check_format(self, file_path): - if file_path.startswith(EXCLUDED_PREFIXES): - return [] - - if not file_path.endswith(SUFFIXES): - return [] - error_messages = [] # Apply fixes first, if asked, and then run checks. If we wind up attempting to fix # an issue, but there's still an error, that's a problem. @@ -1323,9 +1220,10 @@ def owned_directories(error_messages): # Calculate the list of owned directories once per run. error_messages = [] owned_directories = owned_directories(error_messages) - if os.path.isfile(args.target_path): - error_messages += format_checker.check_format("./" + args.target_path) + if not args.target_path.startswith(EXCLUDED_PREFIXES) and args.target_path.endswith( + SUFFIXES): + error_messages += format_checker.check_format("./" + args.target_path) else: results = [] @@ -1334,9 +1232,18 @@ def pooled_check_format(path_predicate): # For each file in target_path, start a new task in the pool and collect the # results (results is passed by reference, and is used as an output). for root, _, files in os.walk(args.target_path): + _files = [] + for filename in files: + file_path = os.path.join(root, filename) + check_file = ( + path_predicate(filename) and not file_path.startswith(EXCLUDED_PREFIXES) + and file_path.endswith(SUFFIXES)) + if check_file: + _files.append(filename) + if not _files: + continue format_checker.check_format_visitor( - (pool, results, owned_directories, error_messages), root, - [f for f in files if path_predicate(f)]) + (pool, results, owned_directories, error_messages), root, _files) # Close the pool to new tasks, wait for all of the running tasks to finish, # then collect the error messages. diff --git a/tools/code_format/check_format_test_helper.py b/tools/code_format/check_format_test_helper.py index 057362709f760..9353ec65256e8 100755 --- a/tools/code_format/check_format_test_helper.py +++ b/tools/code_format/check_format_test_helper.py @@ -121,7 +121,7 @@ def check_tool_not_found_error(): # Temporarily change PATH to test the error about lack of external tools. oldPath = os.environ["PATH"] os.environ["PATH"] = "/sbin:/usr/sbin" - clang_format = os.getenv("CLANG_FORMAT", "clang-format-9") + clang_format = os.getenv("CLANG_FORMAT", "clang-format-11") # If CLANG_FORMAT points directly to the binary, skip this test. if os.path.isfile(clang_format) and os.access(clang_format, os.X_OK): os.environ["PATH"] = oldPath @@ -194,17 +194,6 @@ def run_checks(): errors += check_unfixable_error( "serialize_as_string.cc", "Don't use MessageLite::SerializeAsString for generating deterministic serialization") - errors += check_unfixable_error( - "version_history/current.rst", - "Version history not in alphabetical order (zzzzz vs aaaaa): please check placement of line" - ) - errors += check_unfixable_error( - "version_history/current.rst", - "Version history not in alphabetical order (this vs aaaa): please check placement of line") - errors += check_unfixable_error( - "version_history/current.rst", - "Version history line malformed. Does not match VERSION_HISTORY_NEW_LINE_REGEX in " - "check_format.py") errors += check_unfixable_error( "counter_from_string.cc", "Don't lookup stats by name at runtime; use StatName saved during construction") diff --git a/tools/code_format/python_check.py b/tools/code_format/python_check.py index 9f4a556b62e8e..512576bfec7cb 100755 --- a/tools/code_format/python_check.py +++ b/tools/code_format/python_check.py @@ -8,7 +8,7 @@ # # alternatively, if you have the necessary python deps available # -# ./tools/code_format/python_check.py -h +# PYTHONPATH=. ./tools/code_format/python_check.py -h # # python requires: flake8, yapf # diff --git a/tools/deprecate_version/requirements.txt b/tools/deprecate_version/requirements.txt index 7789602e2d470..95482317a8451 100644 --- a/tools/deprecate_version/requirements.txt +++ b/tools/deprecate_version/requirements.txt @@ -67,9 +67,9 @@ gitdb==4.0.7 \ # via # -r tools/deprecate_version/requirements.txt # gitpython -gitpython==3.1.17 \ - --hash=sha256:29fe82050709760081f588dd50ce83504feddbebdc4da6956d02351552b1c135 \ - --hash=sha256:ee24bdc93dce357630764db659edaf6b8d664d4ff5447ccfeedd2dc5c253f41e +gitpython==3.1.18 \ + --hash=sha256:fce760879cd2aebd2991b3542876dc5c4a909b30c9d69dfc488e504a8db37ee8 \ + --hash=sha256:b838a895977b45ab6f0cc926a9045c8d1c44e2b653c1fcc39fe91f42c6e8f05b # via -r tools/deprecate_version/requirements.txt idna==2.10 \ --hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 \ diff --git a/tools/docs/BUILD b/tools/docs/BUILD index 922587ffb976e..db47c9f37d362 100644 --- a/tools/docs/BUILD +++ b/tools/docs/BUILD @@ -1,6 +1,7 @@ load("@rules_python//python:defs.bzl", "py_binary") load("//bazel:envoy_build_system.bzl", "envoy_package") load("@docs_pip3//:requirements.bzl", "requirement") +load("//tools/base:envoy_python.bzl", "envoy_py_binary") licenses(["notice"]) # Apache 2 @@ -32,3 +33,51 @@ py_binary( "generate_api_rst.py", ], ) + +envoy_py_binary( + name = "tools.docs.sphinx_runner", + deps = [ + "//tools/base:runner", + requirement("alabaster"), + requirement("Babel"), + requirement("certifi"), + requirement("chardet"), + requirement("colorama"), + requirement("docutils"), + requirement("gitdb"), + requirement("GitPython"), + requirement("idna"), + requirement("imagesize"), + requirement("Jinja2"), + requirement("MarkupSafe"), + requirement("packaging"), + requirement("Pygments"), + requirement("pyparsing"), + requirement("pytz"), + requirement("pyyaml"), + requirement("requests"), + requirement("setuptools"), + requirement("six"), + requirement("smmap"), + requirement("snowballstemmer"), + requirement("Sphinx"), + requirement("sphinx-copybutton"), + requirement("sphinx-rtd-theme"), + requirement("sphinx-tabs"), + requirement("sphinxcontrib-applehelp"), + requirement("sphinxcontrib-devhelp"), + requirement("sphinxcontrib-htmlhelp"), + requirement("sphinxcontrib-httpdomain"), + requirement("sphinxcontrib-jsmath"), + requirement("sphinxcontrib-qthelp"), + requirement("sphinxext-rediraffe"), + requirement("sphinxcontrib-serializinghtml"), + requirement("urllib3"), + ], +) + +envoy_py_binary( + name = "tools.docs.rst_check", + data = ["//docs:root/version_history/current.rst"], + deps = ["//tools/base:checker"], +) diff --git a/tools/docs/requirements.txt b/tools/docs/requirements.txt index 22110da4d2ad5..fd1e8e523334a 100644 --- a/tools/docs/requirements.txt +++ b/tools/docs/requirements.txt @@ -2,8 +2,135 @@ # This file is autogenerated by pip-compile # To update, run: # -# pip-compile --generate-hashes tools/docs/requirements.txt +# pip-compile --allow-unsafe --generate-hashes tools/docs/requirements.txt # +alabaster==0.7.12 \ + --hash=sha256:446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359 \ + --hash=sha256:a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02 + # via + # -r tools/docs/requirements.txt + # sphinx +babel==2.9.1 \ + --hash=sha256:ab49e12b91d937cd11f0b67cb259a57ab4ad2b59ac7a3b41d6c06c0ac5b0def9 \ + --hash=sha256:bc0c176f9f6a994582230df350aa6e05ba2ebe4b3ac317eab29d9be5d2768da0 + # via + # -r tools/docs/requirements.txt + # sphinx +certifi==2021.5.30 \ + --hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee \ + --hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 + # via + # -r tools/docs/requirements.txt + # requests +chardet==4.0.0 \ + --hash=sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa \ + --hash=sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5 + # via + # -r tools/docs/requirements.txt + # requests +colorama==0.4.4 \ + --hash=sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b \ + --hash=sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2 + # via -r tools/docs/requirements.txt +docutils==0.16 \ + --hash=sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af \ + --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc + # via + # -r tools/docs/requirements.txt + # sphinx + # sphinx-rtd-theme + # sphinx-tabs +gitdb==4.0.7 \ + --hash=sha256:6c4cc71933456991da20917998acbe6cf4fb41eeaab7d6d67fbc05ecd4c865b0 \ + --hash=sha256:96bf5c08b157a666fec41129e6d327235284cca4c81e92109260f353ba138005 + # via + # -r tools/docs/requirements.txt + # gitpython +gitpython==3.1.18 \ + --hash=sha256:fce760879cd2aebd2991b3542876dc5c4a909b30c9d69dfc488e504a8db37ee8 \ + --hash=sha256:b838a895977b45ab6f0cc926a9045c8d1c44e2b653c1fcc39fe91f42c6e8f05b + # via -r tools/docs/requirements.txt +idna==2.10 \ + --hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 \ + --hash=sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0 + # via + # -r tools/docs/requirements.txt + # requests +imagesize==1.2.0 \ + --hash=sha256:6965f19a6a2039c7d48bca7dba2473069ff854c36ae6f19d2cde309d998228a1 \ + --hash=sha256:b1f6b5a4eab1f73479a50fb79fcf729514a900c341d8503d62a62dbc4127a2b1 + # via + # -r tools/docs/requirements.txt + # sphinx +jinja2==3.0.1 \ + --hash=sha256:1f06f2da51e7b56b8f238affdd6b4e2c61e39598a378cc49345bc1bd42a978a4 \ + --hash=sha256:703f484b47a6af502e743c9122595cc812b0271f661722403114f71a79d0f5a4 + # via + # -r tools/docs/requirements.txt + # sphinx +markupsafe==2.0.1 \ + --hash=sha256:01a9b8ea66f1658938f65b93a85ebe8bc016e6769611be228d797c9d998dd298 \ + --hash=sha256:023cb26ec21ece8dc3907c0e8320058b2e0cb3c55cf9564da612bc325bed5e64 \ + --hash=sha256:0446679737af14f45767963a1a9ef7620189912317d095f2d9ffa183a4d25d2b \ + --hash=sha256:0717a7390a68be14b8c793ba258e075c6f4ca819f15edfc2a3a027c823718567 \ + --hash=sha256:0955295dd5eec6cb6cc2fe1698f4c6d84af2e92de33fbcac4111913cd100a6ff \ + --hash=sha256:10f82115e21dc0dfec9ab5c0223652f7197feb168c940f3ef61563fc2d6beb74 \ + --hash=sha256:1d609f577dc6e1aa17d746f8bd3c31aa4d258f4070d61b2aa5c4166c1539de35 \ + --hash=sha256:2ef54abee730b502252bcdf31b10dacb0a416229b72c18b19e24a4509f273d26 \ + --hash=sha256:3c112550557578c26af18a1ccc9e090bfe03832ae994343cfdacd287db6a6ae7 \ + --hash=sha256:47ab1e7b91c098ab893b828deafa1203de86d0bc6ab587b160f78fe6c4011f75 \ + --hash=sha256:49e3ceeabbfb9d66c3aef5af3a60cc43b85c33df25ce03d0031a608b0a8b2e3f \ + --hash=sha256:4efca8f86c54b22348a5467704e3fec767b2db12fc39c6d963168ab1d3fc9135 \ + --hash=sha256:53edb4da6925ad13c07b6d26c2a852bd81e364f95301c66e930ab2aef5b5ddd8 \ + --hash=sha256:594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a \ + --hash=sha256:611d1ad9a4288cf3e3c16014564df047fe08410e628f89805e475368bd304914 \ + --hash=sha256:6557b31b5e2c9ddf0de32a691f2312a32f77cd7681d8af66c2692efdbef84c18 \ + --hash=sha256:693ce3f9e70a6cf7d2fb9e6c9d8b204b6b39897a2c4a1aa65728d5ac97dcc1d8 \ + --hash=sha256:6a7fae0dd14cf60ad5ff42baa2e95727c3d81ded453457771d02b7d2b3f9c0c2 \ + --hash=sha256:6c4ca60fa24e85fe25b912b01e62cb969d69a23a5d5867682dd3e80b5b02581d \ + --hash=sha256:7d91275b0245b1da4d4cfa07e0faedd5b0812efc15b702576d103293e252af1b \ + --hash=sha256:905fec760bd2fa1388bb5b489ee8ee5f7291d692638ea5f67982d968366bef9f \ + --hash=sha256:97383d78eb34da7e1fa37dd273c20ad4320929af65d156e35a5e2d89566d9dfb \ + --hash=sha256:984d76483eb32f1bcb536dc27e4ad56bba4baa70be32fa87152832cdd9db0833 \ + --hash=sha256:a30e67a65b53ea0a5e62fe23682cfe22712e01f453b95233b25502f7c61cb415 \ + --hash=sha256:ab3ef638ace319fa26553db0624c4699e31a28bb2a835c5faca8f8acf6a5a902 \ + --hash=sha256:b2f4bf27480f5e5e8ce285a8c8fd176c0b03e93dcc6646477d4630e83440c6a9 \ + --hash=sha256:b7f2d075102dc8c794cbde1947378051c4e5180d52d276987b8d28a3bd58c17d \ + --hash=sha256:be98f628055368795d818ebf93da628541e10b75b41c559fdf36d104c5787066 \ + --hash=sha256:d7f9850398e85aba693bb640262d3611788b1f29a79f0c93c565694658f4071f \ + --hash=sha256:f5653a225f31e113b152e56f154ccbe59eeb1c7487b39b9d9f9cdb58e6c79dc5 \ + --hash=sha256:f826e31d18b516f653fe296d967d700fddad5901ae07c622bb3705955e1faa94 \ + --hash=sha256:f8ba0e8349a38d3001fae7eadded3f6606f0da5d748ee53cc1dab1d6527b9509 \ + --hash=sha256:f9081981fe268bd86831e5c75f7de206ef275defcb82bc70740ae6dc507aee51 \ + --hash=sha256:fa130dd50c57d53368c9d59395cb5526eda596d3ffe36666cd81a44d56e48872 + # via + # -r tools/docs/requirements.txt + # jinja2 +packaging==20.9 \ + --hash=sha256:5b327ac1320dc863dca72f4514ecc086f31186744b84a230374cc1fd776feae5 \ + --hash=sha256:67714da7f7bc052e064859c05c595155bd1ee9f69f76557e21f051443c20947a + # via + # -r tools/docs/requirements.txt + # sphinx +pygments==2.9.0 \ + --hash=sha256:a18f47b506a429f6f4b9df81bb02beab9ca21d0a5fee38ed15aef65f0545519f \ + --hash=sha256:d66e804411278594d764fc69ec36ec13d9ae9147193a1740cd34d272ca383b8e + # via + # -r tools/docs/requirements.txt + # sphinx + # sphinx-tabs +pyparsing==2.4.7 \ + --hash=sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1 \ + --hash=sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b + # via + # -r tools/docs/requirements.txt + # packaging +pytz==2021.1 \ + --hash=sha256:83a4a90894bf38e243cf052c8b58f381bfe9a7a483f6a9cab140bc7f702ac4da \ + --hash=sha256:eb10ce3e7736052ed3623d49975ce333bcd712c7bb19a58b9e2089d4057d0798 + # via + # -r tools/docs/requirements.txt + # babel pyyaml==5.4.1 \ --hash=sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf \ --hash=sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696 \ @@ -35,3 +162,105 @@ pyyaml==5.4.1 \ --hash=sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6 \ --hash=sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0 # via -r tools/docs/requirements.txt +requests==2.25.1 \ + --hash=sha256:27973dd4a904a4f13b263a19c866c13b92a39ed1c964655f025f3f8d3d75b804 \ + --hash=sha256:c210084e36a42ae6b9219e00e48287def368a26d03a048ddad7bfee44f75871e + # via + # -r tools/docs/requirements.txt + # sphinx +six==1.16.0 \ + --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ + --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 + # via + # -r tools/docs/requirements.txt + # sphinxcontrib-httpdomain +smmap==4.0.0 \ + --hash=sha256:7e65386bd122d45405ddf795637b7f7d2b532e7e401d46bbe3fb49b9986d5182 \ + --hash=sha256:a9a7479e4c572e2e775c404dcd3080c8dc49f39918c2cf74913d30c4c478e3c2 + # via + # -r tools/docs/requirements.txt + # gitdb +snowballstemmer==2.1.0 \ + --hash=sha256:b51b447bea85f9968c13b650126a888aabd4cb4463fca868ec596826325dedc2 \ + --hash=sha256:e997baa4f2e9139951b6f4c631bad912dfd3c792467e2f03d7239464af90e914 + # via + # -r tools/docs/requirements.txt + # sphinx +sphinx-copybutton==0.3.3 \ + --hash=sha256:19850e9c1ed09c899f136ce3cfa304cb3b6d2f508528c19d8f512ccc8c66b0d4 \ + --hash=sha256:3695987d5e98e3b223471aaed8aa7491e03e9bfc48ed655a91446fd5e30b6c25 + # via -r tools/docs/requirements.txt +sphinx-rtd-theme==0.5.2 \ + --hash=sha256:32bd3b5d13dc8186d7a42fc816a23d32e83a4827d7d9882948e7b837c232da5a \ + --hash=sha256:4a05bdbe8b1446d77a01e20a23ebc6777c74f43237035e76be89699308987d6f + # via -r tools/docs/requirements.txt +sphinx-tabs==3.1.0 \ + --hash=sha256:63df94e84bc05eb8598419a313ffc24455a14d1a580d174bb748404063958a67 \ + --hash=sha256:5eee2a869b1226e1f618f0c7ed267e5e3c24425565e6313cad80d00a7119694f + # via -r tools/docs/requirements.txt +sphinx==4.0.2 \ + --hash=sha256:b5c2ae4120bf00c799ba9b3699bc895816d272d120080fbc967292f29b52b48c \ + --hash=sha256:d1cb10bee9c4231f1700ec2e24a91be3f3a3aba066ea4ca9f3bbe47e59d5a1d4 + # via + # -r tools/docs/requirements.txt + # sphinx-copybutton + # sphinx-rtd-theme + # sphinx-tabs + # sphinxcontrib-httpdomain + # sphinxext-rediraffe +sphinxcontrib-applehelp==1.0.2 \ + --hash=sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a \ + --hash=sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58 + # via + # -r tools/docs/requirements.txt + # sphinx +sphinxcontrib-devhelp==1.0.2 \ + --hash=sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e \ + --hash=sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4 + # via + # -r tools/docs/requirements.txt + # sphinx +sphinxcontrib-htmlhelp==2.0.0 \ + --hash=sha256:d412243dfb797ae3ec2b59eca0e52dac12e75a241bf0e4eb861e450d06c6ed07 \ + --hash=sha256:f5f8bb2d0d629f398bf47d0d69c07bc13b65f75a81ad9e2f71a63d4b7a2f6db2 + # via + # -r tools/docs/requirements.txt + # sphinx +sphinxcontrib-httpdomain==1.7.0 \ + --hash=sha256:1fb5375007d70bf180cdd1c79e741082be7aa2d37ba99efe561e1c2e3f38191e \ + --hash=sha256:ac40b4fba58c76b073b03931c7b8ead611066a6aebccafb34dc19694f4eb6335 + # via -r tools/docs/requirements.txt +sphinxcontrib-jsmath==1.0.1 \ + --hash=sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178 \ + --hash=sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8 + # via + # -r tools/docs/requirements.txt + # sphinx +sphinxcontrib-qthelp==1.0.3 \ + --hash=sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72 \ + --hash=sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6 + # via + # -r tools/docs/requirements.txt + # sphinx +sphinxcontrib-serializinghtml==1.1.5 \ + --hash=sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd \ + --hash=sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952 + # via + # -r tools/docs/requirements.txt + # sphinx +sphinxext-rediraffe==0.2.7 \ + --hash=sha256:651dcbfae5ffda9ffd534dfb8025f36120e5efb6ea1a33f5420023862b9f725d \ + --hash=sha256:9e430a52d4403847f4ffb3a8dd6dfc34a9fe43525305131f52ed899743a5fd8c + # via -r tools/docs/requirements.txt +urllib3==1.26.5 \ + --hash=sha256:753a0374df26658f99d826cfe40394a686d05985786d946fbe4165b5148f5a7c \ + --hash=sha256:a7acd0977125325f516bda9735fa7142b909a8d01e8b2e4c8108d0984e6e0098 + # via + # -r tools/docs/requirements.txt + # requests + +# The following packages are considered to be unsafe in a requirements file: +setuptools==57.0.0 \ + --hash=sha256:401cbf33a7bf817d08014d51560fc003b895c4cdc1a5b521ad2969e928a07535 \ + --hash=sha256:c8b9f1a457949002e358fea7d3f2a1e1b94ddc0354b2e40afc066bf95d21bf7b + # via sphinx diff --git a/tools/docs/rst_check.py b/tools/docs/rst_check.py new file mode 100644 index 0000000000000..47bad1aa123a6 --- /dev/null +++ b/tools/docs/rst_check.py @@ -0,0 +1,128 @@ +import re +import sys +from typing import Iterator + +from tools.base import checker + +INVALID_REFLINK = re.compile(r".* ref:.*") +REF_WITH_PUNCTUATION_REGEX = re.compile(r".*\. <[^<]*>`\s*") +RELOADABLE_FLAG_REGEX = re.compile(r".*(...)(envoy.reloadable_features.[^ ]*)\s.*") +VERSION_HISTORY_NEW_LINE_REGEX = re.compile(r"\* ([a-z \-_]+): ([a-z:`]+)") +VERSION_HISTORY_SECTION_NAME = re.compile(r"^[A-Z][A-Za-z ]*$") + + +class CurrentVersionFile(object): + + def __init__(self, path): + self._path = path + + @property + def lines(self) -> Iterator[str]: + with open(self.path) as f: + for line in f.readlines(): + yield line.strip() + + @property + def path(self) -> str: + return self._path + + @property + def prior_endswith_period(self) -> bool: + return bool( + self.prior_line.endswith(".") + # Don't punctuation-check empty lines. + or not self.prior_line + # The text in the :ref ends with a . + or + (self.prior_line.endswith('`') and REF_WITH_PUNCTUATION_REGEX.match(self.prior_line))) + + def check_flags(self, line: str) -> list: + # TODO(phlax): improve checking of inline literals + # make sure flags are surrounded by ``s (ie "inline literal") + flag_match = RELOADABLE_FLAG_REGEX.match(line) + return ([f"Flag {flag_match.groups()[1]} should be enclosed in double back ticks"] + if flag_match and not flag_match.groups()[0].startswith(' ``') else []) + + def check_line(self, line: str) -> list: + errors = self.check_reflink(line) + self.check_flags(line) + if line.startswith("* "): + errors += self.check_list_item(line) + elif not line: + # If we hit the end of this release note block block, check the prior line. + errors += self.check_previous_period() + self.prior_line = '' + elif self.prior_line: + self.prior_line += line + return errors + + def check_list_item(self, line: str) -> list: + errors = [] + if not self.prior_endswith_period: + errors.append(f"The following release note does not end with a '.'\n {self.prior_line}") + + match = VERSION_HISTORY_NEW_LINE_REGEX.match(line) + if not match: + return errors + [ + "Version history line malformed. " + f"Does not match VERSION_HISTORY_NEW_LINE_REGEX in docs_check.py\n {line}\n" + "Please use messages in the form 'category: feature explanation.', " + "starting with a lower-cased letter and ending with a period." + ] + first_word = match.groups()[0] + next_word = match.groups()[1] + + # Do basic alphabetization checks of the first word on the line and the + # first word after the : + if self.first_word_of_prior_line and self.first_word_of_prior_line > first_word: + errors.append( + f"Version history not in alphabetical order " + f"({self.first_word_of_prior_line} vs {first_word}): " + f"please check placement of line\n {line}. ") + if self.first_word_of_prior_line == first_word and self.next_word_to_check and self.next_word_to_check > next_word: + errors.append( + f"Version history not in alphabetical order " + f"({self.next_word_to_check} vs {next_word}): " + f"please check placement of line\n {line}. ") + self.set_tokens(line, first_word, next_word) + return errors + + def check_previous_period(self) -> list: + return ([f"The following release note does not end with a '.'\n {self.prior_line}"] + if not self.prior_endswith_period else []) + + def check_reflink(self, line: str) -> list: + # TODO(phlax): Check reflinks for all rst files + return ([f"Found text \" ref:\". This should probably be \" :ref:\"\n{line}"] + if INVALID_REFLINK.match(line) else []) + + def run_checks(self) -> Iterator[str]: + self.set_tokens() + for line_number, line in enumerate(self.lines): + if VERSION_HISTORY_SECTION_NAME.match(line): + if line == "Deprecated": + break + self.set_tokens() + for error in self.check_line(line): + yield f"({self.path}:{line_number + 1}) {error}" + + def set_tokens(self, line: str = "", first_word: str = "", next_word: str = "") -> None: + self.prior_line = line + self.first_word_of_prior_line = first_word + self.next_word_to_check = next_word + + +class RSTChecker(checker.Checker): + checks = ("current_version",) + + def check_current_version(self): + errors = list(CurrentVersionFile("docs/root/version_history/current.rst").run_checks()) + if errors: + self.error("current_version", errors) + + +def main(*args) -> int: + return RSTChecker(*args).run() + + +if __name__ == "__main__": + sys.exit(main(*sys.argv[1:])) diff --git a/tools/docs/sphinx_runner.py b/tools/docs/sphinx_runner.py new file mode 100644 index 0000000000000..4476b6ae3a5d0 --- /dev/null +++ b/tools/docs/sphinx_runner.py @@ -0,0 +1,228 @@ +import argparse +import os +import platform +import re +import sys +import tarfile +import tempfile +from functools import cached_property + +import yaml + +from colorama import Fore, Style + +from sphinx.cmd.build import main as sphinx_build + +from tools.base import runner + + +class SphinxBuildError(Exception): + pass + + +class SphinxEnvError(Exception): + pass + + +class SphinxRunner(runner.Runner): + _build_dir = "." + _build_sha = "UNKNOWN" + + @property + def blob_sha(self) -> str: + """Returns either the version tag or the current build sha""" + return self.docs_tag or self.build_sha + + @property + def build_dir(self) -> str: + """Returns current build_dir - most likely a temp directory""" + return self._build_dir + + @property + def build_sha(self) -> str: + """Returns either a provided build_sha or a default""" + return self.args.build_sha or self._build_sha + + @cached_property + def colors(self) -> dict: + """Color scheme for build summary""" + return dict(chrome=Fore.LIGHTYELLOW_EX, key=Fore.LIGHTCYAN_EX, value=Fore.LIGHTMAGENTA_EX) + + @cached_property + def config_file(self) -> str: + """Populates a config file with self.configs and returns the file path""" + with open(self.config_file_path, "w") as f: + f.write(yaml.dump(self.configs)) + return self.config_file_path + + @property + def config_file_path(self) -> str: + """Path to a (temporary) build config""" + return os.path.join(self.build_dir, "build.yaml") + + @cached_property + def configs(self) -> str: + """Build configs derived from provided args""" + _configs = dict( + version_string=self.version_string, + release_level=self.release_level, + blob_sha=self.blob_sha, + version_number=self.version_number, + docker_image_tag_name=self.docker_image_tag_name) + if self.validator_path: + _configs["validator_path"] = self.validator_path + if self.descriptor_path: + _configs["descriptor_path"] = self.descriptor_path + return _configs + + @property + def descriptor_path(self) -> str: + """Path to a descriptor file for config validation""" + return os.path.abspath(self.args.descriptor_path) + + @property + def docker_image_tag_name(self) -> str: + """Tag name of current docker image""" + return re.sub(r"([0-9]+\.[0-9]+)\.[0-9]+.*", r"v\1-latest", self.version_number) + + @property + def docs_tag(self) -> str: + """Tag name - ie named version for this docs build""" + return self.args.docs_tag + + @cached_property + def html_dir(self) -> str: + """Path to (temporary) directory for outputting html""" + return os.path.join(self.build_dir, "generated/html") + + @property + def output_filename(self) -> str: + """Path to tar file for saving generated html docs""" + return self.args.output_filename + + @property + def py_compatible(self) -> bool: + """Current python version is compatible""" + return bool(sys.version_info.major == 3 and sys.version_info.minor >= 8) + + @property + def release_level(self) -> str: + """Current python version is compatible""" + return "tagged" if self.docs_tag else "pre-release" + + @cached_property + def rst_dir(self) -> str: + """Populates an rst directory with contents of given rst tar, + and returns the path to the directory + """ + rst_dir = os.path.join(self.build_dir, "generated/rst") + if self.rst_tar: + with tarfile.open(self.rst_tar) as tarfiles: + tarfiles.extractall(path=rst_dir) + return rst_dir + + @property + def rst_tar(self) -> str: + """Path to the rst tarball""" + return self.args.rst_tar + + @property + def sphinx_args(self) -> list: + """Command args for sphinx""" + return ["-W", "--keep-going", "--color", "-b", "html", self.rst_dir, self.html_dir] + + @property + def validator_path(self) -> str: + """Path to validator utility for validating snippets""" + return os.path.abspath(self.args.validator_path) + + @property + def version_file(self) -> str: + """Path to version files for deriving docs version""" + return self.args.version_file + + @cached_property + def version_number(self) -> str: + """Semantic version""" + with open(self.version_file) as f: + return f.read().strip() + + @property + def version_string(self) -> str: + """Version string derived from either docs_tag or build_sha""" + return ( + f"tag-{self.docs_tag}" + if self.docs_tag else f"{self.version_number}-{self.build_sha[:6]}") + + def add_arguments(self, parser: argparse.ArgumentParser) -> None: + parser.add_argument("--build_sha") + parser.add_argument("--docs_tag") + parser.add_argument("--version_file") + parser.add_argument("--validator_path") + parser.add_argument("--descriptor_path") + parser.add_argument("rst_tar") + parser.add_argument("output_filename") + + def build_html(self) -> None: + if sphinx_build(self.sphinx_args): + raise SphinxBuildError("BUILD FAILED") + + def build_summary(self) -> None: + print() + print(self._color("#### Sphinx build configs #####################")) + print(self._color("###")) + for k, v in self.configs.items(): + print(f"{self._color('###')} {self._color(k, 'key')}: {self._color(v, 'value')}") + print(self._color("###")) + print(self._color("###############################################")) + print() + + def check_env(self) -> None: + if not self.py_compatible: + raise SphinxEnvError( + f"ERROR: python version must be >= 3.8, you have {platform.python_version()}") + if not self.configs["release_level"] == "tagged": + return + if f"v{self.version_number}" != self.docs_tag: + raise SphinxEnvError( + "Given git tag does not match the VERSION file content:" + f"{self.docs_tag} vs v{self.version_number}") + with open(os.path.join(self.rst_dir, "version_history/current.rst")) as f: + if not self.version_number in f.read(): + raise SphinxEnvError( + f"Git tag ({self.version_number}) not found in version_history/current.rst") + + def create_tarball(self) -> None: + with tarfile.open(self.output_filename, "w") as tar: + tar.add(self.html_dir, arcname=".") + + def run(self) -> int: + with tempfile.TemporaryDirectory() as build_dir: + return self._run(build_dir) + + def _color(self, msg, name=None): + return f"{self.colors[name or 'chrome']}{msg}{Style.RESET_ALL}" + + def _run(self, build_dir): + self._build_dir = build_dir + os.environ["ENVOY_DOCS_BUILD_CONFIG"] = self.config_file + try: + self.check_env() + except SphinxEnvError as e: + print(e) + return 1 + self.build_summary() + try: + self.build_html() + except SphinxBuildError as e: + print(e) + return 1 + self.create_tarball() + + +def main(*args) -> int: + return SphinxRunner(*args).run() + + +if __name__ == "__main__": + sys.exit(main(*sys.argv[1:])) diff --git a/tools/docs/tests/test_rst_check.py b/tools/docs/tests/test_rst_check.py new file mode 100644 index 0000000000000..19678db367817 --- /dev/null +++ b/tools/docs/tests/test_rst_check.py @@ -0,0 +1,373 @@ + +import types +from unittest.mock import MagicMock, PropertyMock + +import pytest + +from tools.docs import rst_check + + +def test_rst_check_current_version_constructor(): + version_file = rst_check.CurrentVersionFile("PATH") + assert version_file._path == "PATH" + assert version_file.path == "PATH" + + +def test_rst_check_current_version_lines(patches): + version_file = rst_check.CurrentVersionFile("PATH") + patched = patches( + "open", + ("CurrentVersionFile.path", dict(new_callable=PropertyMock)), + prefix="tools.docs.rst_check") + + expected = [MagicMock(), MagicMock()] + with patched as (m_open, m_path): + m_open.return_value.__enter__.return_value.readlines.return_value = expected + _lines = version_file.lines + assert isinstance(_lines, types.GeneratorType) + lines = list(_lines) + + assert ( + list(m_open.call_args) + == [(m_path.return_value,), {}]) + assert lines == [expected[0].strip.return_value, expected[1].strip.return_value] + + +@pytest.mark.parametrize( + "prior", [ + [".", True], + ["asdf .", True], + ["asdf.", True], + ["asdf", False], + ["asdf,", False], + ["", True], + ["foo. `", True], + ["foo. ` xxx", False], + ["foo `", False]]) +def test_rst_check_current_version_prior_ends_with_period(prior): + version_file = rst_check.CurrentVersionFile("PATH") + version_file.prior_line, expected = prior + assert version_file.prior_endswith_period == expected + + +@pytest.mark.parametrize("matches", [True, False, "partial"]) +def test_rst_check_current_version_check_flags(patches, matches): + version_file = rst_check.CurrentVersionFile("PATH") + patched = patches( + "RELOADABLE_FLAG_REGEX", + prefix="tools.docs.rst_check") + + with patched as (m_flag, ): + if matches == "partial": + m_flag.match.return_value.groups.return_value.__getitem__.return_value.startswith.return_value = False + elif not matches: + m_flag.match.return_value = False + result = version_file.check_flags("LINE") + + assert ( + list(m_flag.match.call_args) + == [('LINE',), {}]) + + if matches: + assert ( + list(m_flag.match.return_value.groups.call_args) + == [(), {}]) + assert ( + list(m_flag.match.return_value.groups.return_value.__getitem__.return_value.startswith.call_args) + == [(' ``',), {}]) + if matches == "partial": + assert ( + result + == [f"Flag {m_flag.match.return_value.groups.return_value.__getitem__.return_value} should be enclosed in double back ticks"]) + assert ( + list(list(c) for c in m_flag.match.return_value.groups.return_value.__getitem__.call_args_list) + == [[(0,), {}], [(1,), {}]]) + else: + assert ( + list(list(c) for c in m_flag.match.return_value.groups.return_value.__getitem__.call_args_list) + == [[(0,), {}]]) + assert result == [] + else: + assert result == [] + + +@pytest.mark.parametrize("line", ["", " ", "* ", "*asdf"]) +@pytest.mark.parametrize("prior_period", [True, False]) +@pytest.mark.parametrize("prior_line", ["", "line_content"]) +def test_rst_check_current_version_check_line(patches, line, prior_period, prior_line): + version_file = rst_check.CurrentVersionFile("PATH") + patched = patches( + "CurrentVersionFile.check_reflink", + "CurrentVersionFile.check_flags", + "CurrentVersionFile.check_list_item", + "CurrentVersionFile.check_previous_period", + prefix="tools.docs.rst_check") + version_file.prior_line = prior_line + + with patched as (m_ref, m_flags, m_item, m_period): + result = version_file.check_line(line) + + expected = m_ref.return_value.__add__.return_value + assert ( + list(m_ref.call_args) + == [(line,), {}]) + assert ( + list(m_flags.call_args) + == [(line,), {}]) + assert ( + list(m_ref.return_value.__add__.call_args) + == [(m_flags.return_value,), {}]) + + if line.startswith("* "): + assert ( + list(expected.__iadd__.call_args) + == [(m_item.return_value,), {}]) + assert ( + list(m_item.call_args) + == [(line,), {}]) + assert not m_period.called + assert result == expected.__iadd__.return_value + assert version_file.prior_line == prior_line + elif not line: + assert ( + list(expected.__iadd__.call_args) + == [(m_period.return_value,), {}]) + assert ( + list(m_period.call_args) + == [(), {}]) + assert result == expected.__iadd__.return_value + assert not m_item.called + assert version_file.prior_line == "" + elif prior_line: + assert not m_period.called + assert not m_item.called + assert version_file.prior_line == prior_line + line + assert result == expected + else: + assert not m_period.called + assert not m_item.called + assert version_file.prior_line == prior_line + assert result == expected + + +@pytest.mark.parametrize("prior", [True, False]) +@pytest.mark.parametrize("matches", [True, False]) +@pytest.mark.parametrize("prior_first", ["", "AAA", "BBB", "CCC"]) +@pytest.mark.parametrize("prior_next", ["", "AAA", "BBB", "CCC"]) +@pytest.mark.parametrize("first_word", ["AAA", "BBB", "CCC"]) +@pytest.mark.parametrize("next_word", ["AAA", "BBB", "CCC"]) +def test_rst_check_current_version_check_list_item(patches, matches, prior, prior_first, prior_next, first_word, next_word): + version_file = rst_check.CurrentVersionFile("PATH") + patched = patches( + "VERSION_HISTORY_NEW_LINE_REGEX", + "CurrentVersionFile.set_tokens", + ("CurrentVersionFile.prior_endswith_period", dict(new_callable=PropertyMock)), + prefix="tools.docs.rst_check") + version_file.prior_line = "PRIOR LINE" + version_file.first_word_of_prior_line = prior_first + version_file.next_word_to_check = prior_next + + def _get_item(item): + if item == 0: + return first_word + return next_word + + with patched as (m_regex, m_tokens, m_prior): + if not matches: + m_regex.match.return_value = False + else: + m_regex.match.return_value.groups.return_value.__getitem__.side_effect = _get_item + m_prior.return_value = prior + result = version_file.check_list_item("LINE") + + expected = [] + if not prior: + expected += ["The following release note does not end with a '.'\n PRIOR LINE"] + + assert ( + list(m_regex.match.call_args) + == [('LINE',), {}]) + + if not matches: + expected += [ + f"Version history line malformed. " + f"Does not match VERSION_HISTORY_NEW_LINE_REGEX in docs_check.py\n LINE\n" + "Please use messages in the form 'category: feature explanation.', " + "starting with a lower-cased letter and ending with a period."] + assert result == expected + assert not m_tokens.called + return + + assert ( + list(list(c) for c in m_regex.match.return_value.groups.call_args_list) + == [[(), {}], [(), {}]]) + + if prior_first and prior_first > first_word: + expected += [f'Version history not in alphabetical order ({prior_first} vs {first_word}): please check placement of line\n LINE. '] + + if prior_first == first_word and prior_next > next_word: + expected += [f'Version history not in alphabetical order ({prior_next} vs {next_word}): please check placement of line\n LINE. '] + + assert result == expected + assert ( + list(m_tokens.call_args) + == [('LINE', first_word, next_word), {}]) + + +@pytest.mark.parametrize("prior", [True, False]) +def test_rst_check_current_version_check_previous_period(patches, prior): + version_file = rst_check.CurrentVersionFile("PATH") + patched = patches( + ("CurrentVersionFile.prior_endswith_period", dict(new_callable=PropertyMock)), + prefix="tools.docs.rst_check") + + version_file.prior_line = "PRIOR" + + with patched as (m_period, ): + m_period.return_value = prior + result = version_file.check_previous_period() + if prior: + assert result == [] + else: + assert result == ["The following release note does not end with a '.'\n PRIOR"] + + +@pytest.mark.parametrize("matches", [True, False]) +def test_rst_check_current_version_check_reflink(patches, matches): + version_file = rst_check.CurrentVersionFile("PATH") + patched = patches( + "INVALID_REFLINK", + prefix="tools.docs.rst_check") + + with patched as (m_reflink, ): + m_reflink.match.return_value = matches + result = version_file.check_reflink("LINE") + + assert ( + list(m_reflink.match.call_args) + == [('LINE',), {}]) + + if matches: + assert ( + result + == ['Found text " ref:". This should probably be " :ref:"\nLINE']) + else: + assert result == [] + + +@pytest.mark.parametrize( + "lines", + [[], + [[0, "AAA"], [1, "BBB"]], + [[0, "AAA"], [1, "BBB"], [2, "CCC"]], + [[0, "AAA"], [1, "Deprecated"], [2, "BBB"], [3, "CCC"]]]) +@pytest.mark.parametrize("errors", [[], ["err1", "err2"]]) +@pytest.mark.parametrize("matches", [True, False]) +def test_rst_check_current_version_run_checks(patches, lines, errors, matches): + version_file = rst_check.CurrentVersionFile("PATH") + patched = patches( + "enumerate", + "VERSION_HISTORY_SECTION_NAME", + "CurrentVersionFile.set_tokens", + "CurrentVersionFile.check_line", + ("CurrentVersionFile.lines", dict(new_callable=PropertyMock)), + prefix="tools.docs.rst_check") + + with patched as (m_enum, m_section, m_tokens, m_check, m_lines): + m_enum.return_value = lines + m_check.return_value = errors + m_section.match.return_value = matches + _result = version_file.run_checks() + assert isinstance(_result, types.GeneratorType) + result = list(_result) + + assert ( + list(m_enum.call_args) + == [(m_lines.return_value,), {}]) + + if not lines: + assert result == [] + assert not m_section.match.called + assert not m_check.called + assert ( + list(list(c) for c in m_tokens.call_args_list) + == [[(), {}]]) + return + + _match = [] + _tokens = 1 + _checks = [] + _errors = [] + + for line_number, line in lines: + _match.append(line) + if matches: + if line == "Deprecated": + break + _tokens += 1 + _checks.append(line) + for error in errors: + _errors.append((line_number, error)) + assert ( + list(list(c) for c in m_section.match.call_args_list) + == [[(line,), {}] for line in _match]) + assert ( + list(list(c) for c in m_tokens.call_args_list) + == [[(), {}]] * _tokens) + assert ( + list(list(c) for c in m_check.call_args_list) + == [[(line,), {}] for line in _checks]) + assert ( + result + == [f"(PATH:{line_number + 1}) {error}" + for line_number, error in _errors]) + + +@pytest.mark.parametrize("line", [None, "", "foo"]) +@pytest.mark.parametrize("first_word", [None, "", "foo"]) +@pytest.mark.parametrize("next_word", [None, "", "foo"]) +def test_rst_check_current_version_set_tokens(patches, line, first_word, next_word): + version_file = rst_check.CurrentVersionFile("PATH") + version_file.set_tokens(line, first_word, next_word) + assert version_file.first_word_of_prior_line == first_word + assert version_file.next_word_to_check == next_word + + +def test_rst_checker_constructor(): + checker = rst_check.RSTChecker("path1", "path2", "path3") + assert checker.checks == ("current_version", ) + assert checker.args.paths == ['path1', 'path2', 'path3'] + + +@pytest.mark.parametrize("errors", [[], ["err1", "err2"]]) +def test_rst_checker_check_current_version(patches, errors): + checker = rst_check.RSTChecker("path1", "path2", "path3") + + patched = patches( + "CurrentVersionFile", + "RSTChecker.error", + prefix="tools.docs.rst_check") + + with patched as (m_version, m_error): + m_version.return_value.run_checks.return_value = errors + checker.check_current_version() + + assert ( + list(m_version.call_args) + == [('docs/root/version_history/current.rst',), {}]) + assert ( + list(m_version.return_value.run_checks.call_args) + == [(), {}]) + + if not errors: + assert not m_error.called + else: + assert ( + list(m_error.call_args) + == [('current_version', ['err1', 'err2']), {}]) + + +def test_rst_checker_main(command_main): + command_main( + rst_check.main, + "tools.docs.rst_check.RSTChecker") diff --git a/tools/docs/tests/test_sphinx_runner.py b/tools/docs/tests/test_sphinx_runner.py new file mode 100644 index 0000000000000..527b7ef44c12b --- /dev/null +++ b/tools/docs/tests/test_sphinx_runner.py @@ -0,0 +1,659 @@ +from unittest.mock import MagicMock, PropertyMock + +import pytest + +from tools.docs import sphinx_runner + + +def test_sphinx_runner_constructor(): + runner = sphinx_runner.SphinxRunner() + assert runner.build_dir == "." + runner._build_dir = "foo" + assert runner.build_dir == "foo" + assert runner._build_sha == "UNKNOWN" + assert "blob_dir" not in runner.__dict__ + + +@pytest.mark.parametrize("docs_tag", [None, "", "SOME_DOCS_TAG"]) +def test_sphinx_runner_blob_sha(patches, docs_tag): + runner = sphinx_runner.SphinxRunner() + patched = patches( + ("SphinxRunner.build_sha", dict(new_callable=PropertyMock)), + ("SphinxRunner.docs_tag", dict(new_callable=PropertyMock)), + prefix="tools.docs.sphinx_runner") + + with patched as (m_sha, m_tag): + m_tag.return_value = docs_tag + if docs_tag: + assert runner.blob_sha == docs_tag + else: + assert runner.blob_sha == m_sha.return_value + assert "blob_sha" not in runner.__dict__ + + +@pytest.mark.parametrize("build_sha", [None, "", "SOME_BUILD_SHA"]) +def test_sphinx_runner_build_sha(patches, build_sha): + runner = sphinx_runner.SphinxRunner() + patched = patches( + ("SphinxRunner.args", dict(new_callable=PropertyMock)), + prefix="tools.docs.sphinx_runner") + + with patched as (m_args, ): + m_args.return_value.build_sha = build_sha + if build_sha: + assert runner.build_sha == build_sha + else: + assert runner.build_sha == "UNKNOWN" + + assert "build_sha" not in runner.__dict__ + + +def test_sphinx_runner_colors(patches): + runner = sphinx_runner.SphinxRunner() + patched = patches( + "Fore", + prefix="tools.docs.sphinx_runner") + + with patched as (m_colors, ): + assert ( + runner.colors + == dict( + chrome=m_colors.LIGHTYELLOW_EX, + key=m_colors.LIGHTCYAN_EX, + value=m_colors.LIGHTMAGENTA_EX)) + + assert "colors" in runner.__dict__ + + +def test_sphinx_runner_config_file(patches): + runner = sphinx_runner.SphinxRunner() + patched = patches( + "open", + "yaml", + ("SphinxRunner.config_file_path", dict(new_callable=PropertyMock)), + ("SphinxRunner.configs", dict(new_callable=PropertyMock)), + prefix="tools.docs.sphinx_runner") + + with patched as (m_open, m_yaml, m_fpath, m_configs): + assert ( + runner.config_file + == m_fpath.return_value) + + assert ( + list(m_open.call_args) + == [(m_fpath.return_value, 'w'), {}]) + assert ( + list(m_yaml.dump.call_args) + == [(m_configs.return_value,), {}]) + assert ( + m_open.return_value.__enter__.return_value.write.call_args + == [(m_yaml.dump.return_value,), {}]) + + assert "config_file" in runner.__dict__ + + +def test_sphinx_runner_config_file_path(patches): + runner = sphinx_runner.SphinxRunner() + patched = patches( + "os.path", + ("SphinxRunner.build_dir", dict(new_callable=PropertyMock)), + prefix="tools.docs.sphinx_runner") + + with patched as (m_path, m_build): + assert runner.config_file_path == m_path.join.return_value + + assert ( + list(m_path.join.call_args) + == [(m_build.return_value, 'build.yaml',), {}]) + assert "config_file_path" not in runner.__dict__ + + +def test_sphinx_runner_configs(patches): + runner = sphinx_runner.SphinxRunner() + mapping = dict( + version_string="version_string", + release_level="release_level", + blob_sha="blob_sha", + version_number="version_number", + docker_image_tag_name="docker_image_tag_name", + validator_path="validator_path", + descriptor_path="descriptor_path") + + patched = patches( + *[f"SphinxRunner.{v}" for v in mapping.values()], + prefix="tools.docs.sphinx_runner") + + with patched as _mocks: + result = runner.configs + + _configs = {} + for k, v in mapping.items(): + _configs[k] = _mocks[list(mapping.values()).index(v)] + assert result == _configs + assert "configs" in runner.__dict__ + + +def test_sphinx_runner_descriptor_path(patches): + runner = sphinx_runner.SphinxRunner() + patched = patches( + "os.path", + ("SphinxRunner.args", dict(new_callable=PropertyMock)), + prefix="tools.docs.sphinx_runner") + + with patched as (m_path, m_args): + assert ( + runner.descriptor_path + == m_path.abspath.return_value) + + assert ( + list(m_path.abspath.call_args) + == [(m_args.return_value.descriptor_path,), {}]) + assert "descriptor_path" not in runner.__dict__ + + +def test_sphinx_runner_docker_image_tag_name(patches): + runner = sphinx_runner.SphinxRunner() + patched = patches( + "re", + ("SphinxRunner.version_number", dict(new_callable=PropertyMock)), + prefix="tools.docs.sphinx_runner") + + with patched as (m_re, m_version): + assert ( + runner.docker_image_tag_name + == m_re.sub.return_value) + + assert ( + list(m_re.sub.call_args) + == [('([0-9]+\\.[0-9]+)\\.[0-9]+.*', 'v\\1-latest', + m_version.return_value), {}]) + assert "docker_image_tag_name" not in runner.__dict__ + + +def test_sphinx_runner_docs_tag(patches): + runner = sphinx_runner.SphinxRunner() + patched = patches( + ("SphinxRunner.args", dict(new_callable=PropertyMock)), + prefix="tools.docs.sphinx_runner") + + with patched as (m_args, ): + assert runner.docs_tag == m_args.return_value.docs_tag + + assert "docs_tag" not in runner.__dict__ + + +def test_sphinx_runner_html_dir(patches): + runner = sphinx_runner.SphinxRunner() + patched = patches( + "os.path", + ("SphinxRunner.build_dir", dict(new_callable=PropertyMock)), + ("SphinxRunner.args", dict(new_callable=PropertyMock)), + prefix="tools.docs.sphinx_runner") + + with patched as (m_path, m_build, m_args): + assert runner.html_dir == m_path.join.return_value + + assert ( + list(m_path.join.call_args) + == [(m_build.return_value, 'generated/html'), {}]) + + assert "html_dir" in runner.__dict__ + + +def test_sphinx_runner_output_filename(patches): + runner = sphinx_runner.SphinxRunner() + patched = patches( + ("SphinxRunner.args", dict(new_callable=PropertyMock)), + prefix="tools.docs.sphinx_runner") + + with patched as (m_args, ): + assert runner.output_filename == m_args.return_value.output_filename + assert "output_filename" not in runner.__dict__ + + +@pytest.mark.parametrize("major", [2, 3, 4]) +@pytest.mark.parametrize("minor", [5, 6, 7, 8, 9]) +def test_sphinx_runner_py_compatible(patches, major, minor): + runner = sphinx_runner.SphinxRunner() + patched = patches( + "bool", + "sys", + prefix="tools.docs.sphinx_runner") + + with patched as (m_bool, m_sys): + m_sys.version_info.major = major + m_sys.version_info.minor = minor + assert runner.py_compatible == m_bool.return_value + expected = ( + True + if major == 3 and minor >= 8 + else False) + assert ( + list(m_bool.call_args) + == [(expected,), {}]) + assert "py_compatible" not in runner.__dict__ + + +@pytest.mark.parametrize("docs_tag", [None, "", "SOME_DOCS_TAG"]) +def test_sphinx_runner_release_level(patches, docs_tag): + runner = sphinx_runner.SphinxRunner() + patched = patches( + ("SphinxRunner.docs_tag", dict(new_callable=PropertyMock)), + prefix="tools.docs.sphinx_runner") + + with patched as (m_tag, ): + m_tag.return_value = docs_tag + if docs_tag: + assert runner.release_level == "tagged" + else: + assert runner.release_level == "pre-release" + assert "release_level" not in runner.__dict__ + + +@pytest.mark.parametrize("rst_tar", [None, "", "SOME_DOCS_TAG"]) +def test_sphinx_runner_rst_dir(patches, rst_tar): + runner = sphinx_runner.SphinxRunner() + patched = patches( + "os.path", + "tarfile", + ("SphinxRunner.build_dir", dict(new_callable=PropertyMock)), + ("SphinxRunner.rst_tar", dict(new_callable=PropertyMock)), + prefix="tools.docs.sphinx_runner") + + with patched as (m_path, m_tar, m_dir, m_rst): + m_rst.return_value = rst_tar + assert runner.rst_dir == m_path.join.return_value + + assert ( + list(m_path.join.call_args) + == [(m_dir.return_value, 'generated/rst'), {}]) + + if rst_tar: + assert ( + list(m_tar.open.call_args) + == [(rst_tar,), {}]) + assert ( + list(m_tar.open.return_value.__enter__.return_value.extractall.call_args) + == [(), {'path': m_path.join.return_value}]) + else: + assert not m_tar.open.called + assert "rst_dir" in runner.__dict__ + + +def test_sphinx_runner_rst_tar(patches): + runner = sphinx_runner.SphinxRunner() + patched = patches( + ("SphinxRunner.args", dict(new_callable=PropertyMock)), + prefix="tools.docs.sphinx_runner") + + with patched as (m_args, ): + assert runner.rst_tar == m_args.return_value.rst_tar + + assert "rst_tar" not in runner.__dict__ + + +def test_sphinx_runner_sphinx_args(patches): + runner = sphinx_runner.SphinxRunner() + patched = patches( + ("SphinxRunner.html_dir", dict(new_callable=PropertyMock)), + ("SphinxRunner.rst_dir", dict(new_callable=PropertyMock)), + prefix="tools.docs.sphinx_runner") + + with patched as (m_html, m_rst): + assert ( + runner.sphinx_args + == ['-W', '--keep-going', '--color', '-b', 'html', + m_rst.return_value, + m_html.return_value]) + + assert "sphinx_args" not in runner.__dict__ + + +def test_sphinx_runner_validator_path(patches): + runner = sphinx_runner.SphinxRunner() + patched = patches( + "os.path", + ("SphinxRunner.args", dict(new_callable=PropertyMock)), + prefix="tools.docs.sphinx_runner") + + with patched as (m_path, m_args): + assert ( + runner.validator_path + == m_path.abspath.return_value) + + assert ( + list(m_path.abspath.call_args) + == [(m_args.return_value.validator_path,), {}]) + assert "validator_path" not in runner.__dict__ + + +def test_sphinx_runner_version_file(patches): + runner = sphinx_runner.SphinxRunner() + patched = patches( + ("SphinxRunner.args", dict(new_callable=PropertyMock)), + prefix="tools.docs.sphinx_runner") + + with patched as (m_args, ): + assert runner.version_file == m_args.return_value.version_file + + assert "version_file" not in runner.__dict__ + + +def test_sphinx_runner_version_number(patches): + runner = sphinx_runner.SphinxRunner() + patched = patches( + "open", + ("SphinxRunner.version_file", dict(new_callable=PropertyMock)), + prefix="tools.docs.sphinx_runner") + + with patched as (m_open, m_file): + assert ( + runner.version_number + == m_open.return_value.__enter__.return_value.read.return_value.strip.return_value) + + assert ( + list(m_open.call_args) + == [(m_file.return_value,), {}]) + assert ( + list(m_open.return_value.__enter__.return_value.read.call_args) + == [(), {}]) + assert ( + list(m_open.return_value.__enter__.return_value.read.return_value.strip.call_args) + == [(), {}]) + + assert "version_number" in runner.__dict__ + + +@pytest.mark.parametrize("docs_tag", [None, "", "SOME_DOCS_TAG"]) +def test_sphinx_runner_version_string(patches, docs_tag): + runner = sphinx_runner.SphinxRunner() + patched = patches( + ("SphinxRunner.docs_tag", dict(new_callable=PropertyMock)), + ("SphinxRunner.build_sha", dict(new_callable=PropertyMock)), + ("SphinxRunner.version_number", dict(new_callable=PropertyMock)), + prefix="tools.docs.sphinx_runner") + + with patched as (m_tag, m_sha, m_version): + m_tag.return_value = docs_tag + if docs_tag: + assert runner.version_string == f"tag-{docs_tag}" + else: + assert runner.version_string == f"{m_version.return_value}-{m_sha.return_value.__getitem__.return_value}" + assert ( + list(m_sha.return_value.__getitem__.call_args) + == [(slice(None, 6, None),), {}]) + + assert "version_string" not in runner.__dict__ + + +def test_sphinx_runner_add_arguments(): + runner = sphinx_runner.SphinxRunner() + parser = MagicMock() + runner.add_arguments(parser) + assert ( + list(list(c) for c in parser.add_argument.call_args_list) + == [[('--build_sha',), {}], + [('--docs_tag',), {}], + [('--version_file',), {}], + [('--validator_path',), {}], + [('--descriptor_path',), {}], + [('rst_tar',), {}], + [('output_filename',), {}]]) + + +@pytest.mark.parametrize("fails", [True, False]) +def test_sphinx_runner_build_html(patches, fails): + runner = sphinx_runner.SphinxRunner() + patched = patches( + "sphinx_build", + ("SphinxRunner.sphinx_args", dict(new_callable=PropertyMock)), + prefix="tools.docs.sphinx_runner") + + with patched as (m_sphinx, m_args): + m_sphinx.side_effect = lambda s: fails + e = None + if fails: + with pytest.raises(sphinx_runner.SphinxBuildError) as e: + runner.build_html() + else: + runner.build_html() + + assert ( + list(m_sphinx.call_args) + == [(m_args.return_value,), {}]) + + if fails: + assert e.value.args == ('BUILD FAILED',) + else: + assert not e + + +def test_sphinx_runner_build_summary(patches): + runner = sphinx_runner.SphinxRunner() + patched = patches( + "print", + "SphinxRunner._color", + ("SphinxRunner.configs", dict(new_callable=PropertyMock)), + prefix="tools.docs.sphinx_runner") + + with patched as (m_print, m_color, m_configs): + m_configs.return_value.items.return_value = (("a", "A"), ("b", "B")) + runner.build_summary() + + assert ( + list(list(c) for c in m_print.call_args_list) + == [[(), {}], + [(m_color.return_value,), {}], + [(m_color.return_value,), {}], + [(f"{m_color.return_value} {m_color.return_value}: {m_color.return_value}",), {}], + [(f"{m_color.return_value} {m_color.return_value}: {m_color.return_value}",), {}], + [(m_color.return_value,), {}], + [(m_color.return_value,), {}], + [(), {}]]) + assert ( + list(list(c) for c in m_color.call_args_list) + == [[('#### Sphinx build configs #####################',), {}], + [('###',), {}], + [('###',), {}], + [('a', 'key'), {}], + [('A', 'value'), {}], + [('###',), {}], + [('b', 'key'), {}], + [('B', 'value'), {}], + [('###',), {}], + [('###############################################',), {}]]) + + +@pytest.mark.parametrize("py_compat", [True, False]) +@pytest.mark.parametrize("release_level", ["pre-release", "tagged"]) +@pytest.mark.parametrize("version_number", ["1.17", "1.23", "1.43"]) +@pytest.mark.parametrize("docs_tag", ["v1.17", "v1.23", "v1.73"]) +@pytest.mark.parametrize("current", ["XXX v1.17 ZZZ", "AAA v1.23 VVV", "BBB v1.73 EEE"]) +def test_sphinx_runner_check_env(patches, py_compat, release_level, version_number, docs_tag, current): + runner = sphinx_runner.SphinxRunner() + patched = patches( + "open", + "os.path", + "platform", + ("SphinxRunner.configs", dict(new_callable=PropertyMock)), + ("SphinxRunner.version_number", dict(new_callable=PropertyMock)), + ("SphinxRunner.docs_tag", dict(new_callable=PropertyMock)), + ("SphinxRunner.py_compatible", dict(new_callable=PropertyMock)), + ("SphinxRunner.rst_dir", dict(new_callable=PropertyMock)), + prefix="tools.docs.sphinx_runner") + + fails = ( + not py_compat + or (release_level == "tagged" + and (f"v{version_number}" != docs_tag + or version_number not in current))) + + with patched as (m_open, m_path, m_platform, m_configs, m_version, m_tag, m_py, m_rst): + m_py.return_value = py_compat + m_configs.return_value.__getitem__.return_value = release_level + m_version.return_value = version_number + m_tag.return_value = docs_tag + m_open.return_value.__enter__.return_value.read.return_value = current + + if fails: + with pytest.raises(sphinx_runner.SphinxEnvError) as e: + runner.check_env() + else: + runner.check_env() + + if not py_compat: + assert ( + e.value.args + == ("ERROR: python version must be >= 3.8, " + f"you have {m_platform.python_version.return_value}", )) + assert not m_open.called + return + + if release_level != "tagged": + assert not m_open.called + return + + if f"v{version_number}" != docs_tag: + assert not m_open.called + assert ( + e.value.args + == ("Given git tag does not match the VERSION file content:" + f"{docs_tag} vs v{version_number}", )) + return + + assert ( + list(m_open.call_args) + == [(m_path.join.return_value,), {}]) + assert ( + list(m_path.join.call_args) + == [(m_rst.return_value, "version_history/current.rst"), {}]) + assert ( + list(m_open.return_value.__enter__.return_value.read.call_args) + == [(), {}]) + + if version_number not in current: + assert ( + e.value.args + == (f"Git tag ({version_number}) not found in version_history/current.rst", )) + + +def test_sphinx_runner_create_tarball(patches): + runner = sphinx_runner.SphinxRunner() + patched = patches( + "tarfile", + ("SphinxRunner.output_filename", dict(new_callable=PropertyMock)), + ("SphinxRunner.html_dir", dict(new_callable=PropertyMock)), + prefix="tools.docs.sphinx_runner") + + with patched as (m_tar, m_out, m_html): + runner.create_tarball() + + assert ( + list(m_tar.open.call_args) + == [(m_out.return_value, 'w'), {}]) + assert ( + list(m_tar.open.return_value.__enter__.return_value.add.call_args) + == [(m_html.return_value,), {'arcname': '.'}]) + + +def test_sphinx_runner_run(patches): + runner = sphinx_runner.SphinxRunner() + patched = patches( + "tempfile", + "SphinxRunner._run", + prefix="tools.docs.sphinx_runner") + + with patched as (m_tmp, m_run): + assert runner.run() == m_run.return_value + + assert ( + list(m_run.call_args) + == [(m_tmp.TemporaryDirectory.return_value.__enter__.return_value,), {}]) + + +@pytest.mark.parametrize("color", [None, "COLOR"]) +def test_sphinx_runner__color(patches, color): + runner = sphinx_runner.SphinxRunner() + patched = patches( + "Style", + ("SphinxRunner.colors", dict(new_callable=PropertyMock)), + prefix="tools.docs.sphinx_runner") + + with patched as (m_style, m_colors): + assert ( + runner._color("MSG", color) + == f"{m_colors.return_value.__getitem__.return_value}MSG{m_style.RESET_ALL}") + assert ( + list(m_colors.return_value.__getitem__.call_args) + == [(color or "chrome",), {}]) + + +@pytest.mark.parametrize("check_fails", [True, False]) +@pytest.mark.parametrize("build_fails", [True, False]) +def test_sphinx_runner__run(patches, check_fails, build_fails): + runner = sphinx_runner.SphinxRunner() + patched = patches( + "print", + "os", + "SphinxRunner.build_summary", + "SphinxRunner.check_env", + "SphinxRunner.build_html", + "SphinxRunner.create_tarball", + ("SphinxRunner.config_file", dict(new_callable=PropertyMock)), + prefix="tools.docs.sphinx_runner") + + def _raise(error): + raise error + + with patched as (m_print, m_os, m_summary, m_check, m_build, m_create, m_config): + if check_fails: + _check_error = sphinx_runner.SphinxEnvError("CHECK FAILED") + m_check.side_effect = lambda: _raise(_check_error) + if build_fails: + _build_error = sphinx_runner.SphinxBuildError("BUILD FAILED") + m_build.side_effect = lambda: _raise(_build_error) + assert runner._run("BUILD_DIR") == (1 if (check_fails or build_fails) else None) + + assert ( + runner._build_dir + == "BUILD_DIR") + assert ( + list(m_check.call_args) + == [(), {}]) + assert ( + list(m_os.environ.__setitem__.call_args) + == [('ENVOY_DOCS_BUILD_CONFIG', m_config.return_value), {}]) + + if check_fails: + assert ( + list(m_print.call_args) + == [(_check_error,), {}]) + assert not m_summary.called + assert not m_build.called + assert not m_create.called + return + + assert ( + list(m_summary.call_args) + == [(), {}]) + assert ( + list(m_build.call_args) + == [(), {}]) + + if build_fails: + assert ( + list(m_print.call_args) + == [(_build_error,), {}]) + assert not m_create.called + return + + assert not m_print.called + assert ( + list(m_create.call_args) + == [(), {}]) + + +def test_sphinx_runner_main(command_main): + command_main( + sphinx_runner.main, + "tools.docs.sphinx_runner.SphinxRunner") diff --git a/tools/proto_format/proto_format.sh b/tools/proto_format/proto_format.sh index 2f64f9eb1cac8..e80dab257f55b 100755 --- a/tools/proto_format/proto_format.sh +++ b/tools/proto_format/proto_format.sh @@ -20,12 +20,6 @@ read -ra BAZEL_BUILD_OPTIONS <<< "${BAZEL_BUILD_OPTIONS:-}" exit 0 } -if [[ "$2" == "--test" ]]; then - echo "protoxform_test..." - ./tools/protoxform/protoxform_test.sh - bazel test "${BAZEL_BUILD_OPTIONS[@]}" //tools/protoxform:merge_active_shadow_test -fi - # Generate //versioning:active_protos. ./tools/proto_format/active_protos_gen.py ./api > ./api/versioning/BUILD diff --git a/tools/protoxform/protoxform_test.sh b/tools/protoxform/protoxform_test.sh index 66e6fc4efbac1..69cbc859aa520 100755 --- a/tools/protoxform/protoxform_test.sh +++ b/tools/protoxform/protoxform_test.sh @@ -10,7 +10,6 @@ TOOLS="$(dirname "$(dirname "$(realpath "$0")")")" # to satisfy dependency on run_command export PYTHONPATH="$TOOLS" - # protoxform fix test cases PROTO_TARGETS=() protos=$(bazel query "labels(srcs, labels(deps, //tools/testdata/protoxform:fix_protos))") diff --git a/tools/spelling/check_spelling.sh b/tools/spelling/check_spelling.sh deleted file mode 100755 index b5a0a9847d0b3..0000000000000 --- a/tools/spelling/check_spelling.sh +++ /dev/null @@ -1,81 +0,0 @@ -#!/bin/bash - -# Applies requisite code formatters to the source tree -# check_spelling.sh - -# Why choose misspell? -# https://github.com/client9/misspell#what-are-other-misspelling-correctors-and-whats-wrong-with-them - -set -u -set -e - -VERSION="0.3.4" -LINUX_MISSPELL_SHA="34d489dbc5ddb4dfd6d3cfac9fde8660e6c37e6c" -MAC_MISSPELL_SHA="f2607e2297b9e8af562e384c38045033375c7433" -TMP_DIR="/tmp" -OS="" - -MISSPELL_ARGS="-error -o stderr" - -if [[ "$#" -lt 1 ]]; then - echo "Usage: $0 check|fix" - exit 1 -fi - -if [[ "$1" == "fix" ]]; then - MISSPELL_ARGS="-w" -fi - -if [[ "$(uname)" == "Darwin" ]]; then - OS="mac" -elif [[ "$(uname)" == "Linux" ]]; then - OS="linux" -else - echo "Current only support mac/Linux" - exit 1 -fi - -SCRIPTPATH=$( cd "$(dirname "$0")" ; pwd -P ) -ROOTDIR="${SCRIPTPATH}/../.." -cd "$ROOTDIR" - -BIN_FILENAME="misspell_${VERSION}_${OS}_64bit.tar.gz" -# Install tools we need -if [[ ! -e "${TMP_DIR}/misspell" ]]; then - if ! wget https://github.com/client9/misspell/releases/download/v"${VERSION}"/"${BIN_FILENAME}" \ - -O "${TMP_DIR}/${BIN_FILENAME}" --no-verbose --tries=3 -o "${TMP_DIR}/wget.log"; then - cat "${TMP_DIR}/wget.log" - exit 1 - fi - tar -xvf "${TMP_DIR}/${BIN_FILENAME}" -C "${TMP_DIR}" &> /dev/null -fi - -ACTUAL_SHA="" -EXPECT_SHA="" - -if [[ "${OS}" == "linux" ]]; then - ACTUAL_SHA=$(sha1sum "${TMP_DIR}"/misspell|cut -d' ' -f1) - EXPECT_SHA="${LINUX_MISSPELL_SHA}" -else - ACTUAL_SHA=$(shasum -a 1 "${TMP_DIR}"/misspell|cut -d' ' -f1) - EXPECT_SHA="${MAC_MISSPELL_SHA}" -fi - -if [[ ! ${ACTUAL_SHA} == "${EXPECT_SHA}" ]]; then - echo "Expect shasum is ${ACTUAL_SHA}, but actual is shasum ${EXPECT_SHA}" - exit 1 -fi - -chmod +x "${TMP_DIR}/misspell" - -# Spell checking -# All the skipping files are defined in tools/spelling/spelling_skip_files.txt -read -ra SKIP_FILES < "${ROOTDIR}/tools/spelling/spelling_skip_files.txt" -read -ra SKIP_FILES <<< "${SKIP_FILES[@]/#/-e }" - -# All the ignore words are defined in tools/spelling/spelling_allowlist_words.txt -SPELLING_ALLOWLIST_WORDS_FILE="${ROOTDIR}/tools/spelling/spelling_allowlist_words.txt" -ALLOWLIST_WORDS=$(grep -vE '^#|^$' "${SPELLING_ALLOWLIST_WORDS_FILE}" | xargs | tr ' ' ',') - -git ls-files | grep -v "${SKIP_FILES[@]}" | xargs "${TMP_DIR}/misspell" -i \ - "${ALLOWLIST_WORDS}" ${MISSPELL_ARGS} diff --git a/tools/spelling/spelling_allowlist_words.txt b/tools/spelling/spelling_allowlist_words.txt deleted file mode 100644 index 3f7b1b8050877..0000000000000 --- a/tools/spelling/spelling_allowlist_words.txt +++ /dev/null @@ -1,2 +0,0 @@ -# One word per line, these words are not spell checked. -# you can add a comment to each word to explain why you don't need to do a spell check. diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index 6291ed0b42e91..f5dad4b68e77f 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -937,6 +937,8 @@ preorder prepend prepended prepends +preresolve +preresolved prev probabilistically proc diff --git a/tools/spelling/spelling_skip_files.txt b/tools/spelling/spelling_skip_files.txt deleted file mode 100644 index 8351c72345a44..0000000000000 --- a/tools/spelling/spelling_skip_files.txt +++ /dev/null @@ -1 +0,0 @@ -OWNERS.md corpus diff --git a/tools/vscode/generate_debug_config.py b/tools/vscode/generate_debug_config.py index 8fe44872cf4c5..92964376b051c 100755 --- a/tools/vscode/generate_debug_config.py +++ b/tools/vscode/generate_debug_config.py @@ -89,19 +89,31 @@ def lldb_config(target, binary, workspace, execroot, arguments): } -def add_to_launch_json(target, binary, workspace, execroot, arguments, debugger_type): +def add_to_launch_json(target, binary, workspace, execroot, arguments, debugger_type, overwrite): launch = get_launch_json(workspace) new_config = {} + always_overwritten_fields = [] if debugger_type == "lldb": + always_overwritten_fields = ["program", "sourceMap", "cwd", "type", "request"] new_config = lldb_config(target, binary, workspace, execroot, arguments) else: + always_overwritten_fields = [ + "request", "type", "target", "debugger_args", "cwd", "valuesFormatting" + ] new_config = gdb_config(target, binary, workspace, execroot, arguments) configurations = launch.get("configurations", []) for config in configurations: if config.get("name", None) == new_config["name"]: - config.clear() - config.update(new_config) + if overwrite: + config.clear() + config.update(new_config) + else: + for k in always_overwritten_fields: + config[k] = new_config[k] + print( + f"old config exists, only {always_overwritten_fields} will be updated, use --overwrite to recreate config" + ) break else: configurations.append(new_config) @@ -112,13 +124,18 @@ def add_to_launch_json(target, binary, workspace, execroot, arguments, debugger_ if __name__ == "__main__": parser = argparse.ArgumentParser(description='Build and generate launch config for VSCode') - parser.add_argument('--debugger', default="gdb") - parser.add_argument('--args', default='') - parser.add_argument('target') + parser.add_argument('--debugger', default="gdb", help="debugger type, one of [gdb, lldb]") + parser.add_argument('--args', default='', help="command line arguments if target binary") + parser.add_argument( + '--overwrite', + action="store_true", + help="recreate config without preserving any existing config") + parser.add_argument('target', help="target binary which you want to build") args = parser.parse_args() workspace = get_workspace() execution_root = get_execution_root(workspace) debug_binary = build_binary_with_debug_info(args.target) add_to_launch_json( - args.target, debug_binary, workspace, execution_root, args.args, args.debugger) + args.target, debug_binary, workspace, execution_root, args.args, args.debugger, + args.overwrite)