diff --git a/.azure-pipelines/linux.yml b/.azure-pipelines/linux.yml new file mode 100644 index 0000000000000..462332c758285 --- /dev/null +++ b/.azure-pipelines/linux.yml @@ -0,0 +1,14 @@ +resources: + containers: + - container: envoy-build + image: envoyproxy/envoy-build:07f65995ece0b8a21bb2888d07a8c1d060e7daaa + +jobs: +- job: BuildEnvoy + timeoutInMinutes: 360 + pool: + vmImage: 'Ubuntu 16.04' + container: envoy-build + steps: + - script: bazel build //source/exe:envoy-static + diff --git a/.azure-pipelines/macos.yml b/.azure-pipelines/macos.yml new file mode 100644 index 0000000000000..d05887f372544 --- /dev/null +++ b/.azure-pipelines/macos.yml @@ -0,0 +1,24 @@ +# Azure Pipelines +trigger: +- master + +jobs: +- job: macOS + timeoutInMinutes: 360 + pool: + vmImage: 'macos-latest' + + steps: + - script: ./ci/mac_ci_setup.sh + displayName: 'Install dependencies' + + - script: ./ci/mac_ci_steps.sh + displayName: 'Run Mac CI' + env: + BAZEL_REMOTE_CACHE: https://storage.googleapis.com/envoy-circleci-bazel-cache/ + + - task: PublishTestResults@2 + inputs: + testResultsFiles: '**/bazel-testlogs/**/test.xml' + testRunTitle: 'macOS' + condition: always() diff --git a/.bazelignore b/.bazelignore new file mode 100644 index 0000000000000..24185aa19e58d --- /dev/null +++ b/.bazelignore @@ -0,0 +1 @@ +examples/grpc-bridge/script diff --git a/.bazelrc b/.bazelrc index 9ab266df6c3bc..eeafe30b73294 100644 --- a/.bazelrc +++ b/.bazelrc @@ -6,6 +6,7 @@ # leave room for compiler/linker. startup --host_jvm_args=-Xmx512m build --workspace_status_command=bazel/get_workspace_status +build --experimental_remap_main_repo # Basic ASAN/UBSAN that works for gcc build:asan --define ENVOY_CONFIG_ASAN=1 @@ -13,6 +14,7 @@ build:asan --copt -fsanitize=address,undefined build:asan --linkopt -fsanitize=address,undefined build:asan --copt -fno-sanitize=vptr build:asan --linkopt -fno-sanitize=vptr +build:asan --linkopt -fuse-ld=lld build:asan --linkopt -ldl build:asan --define tcmalloc=disabled build:asan --build_tag_filters=-no_asan @@ -20,12 +22,12 @@ build:asan --test_tag_filters=-no_asan build:asan --define signal_trace=disabled build:asan --copt -DADDRESS_SANITIZER=1 build:asan --copt -D__SANITIZE_ADDRESS__ -build:asan --test_env=ASAN_OPTIONS=handle_abort=1:allow_addr2line=true:check_initialization_order=true:strict_init_order=true +build:asan --test_env=ASAN_OPTIONS=handle_abort=1:allow_addr2line=true:check_initialization_order=true:strict_init_order=true:detect_odr_violation=1 +build:asan --test_env=UBSAN_OPTIONS=halt_on_error=true:print_stacktrace=1 build:asan --test_env=ASAN_SYMBOLIZER_PATH # Clang ASAN/UBSAN build:clang-asan --config=asan -build:clang-asan --linkopt -fuse-ld=lld # macOS ASAN/UBSAN build:macos-asan --config=asan @@ -37,8 +39,10 @@ build:macos-asan --copt -D_FORTIFY_SOURCE=0 build:clang-tsan --define ENVOY_CONFIG_TSAN=1 build:clang-tsan --copt -fsanitize=thread build:clang-tsan --linkopt -fsanitize=thread -build:clang-tsan --define tcmalloc=disabled build:clang-tsan --linkopt -fuse-ld=lld +build:clang-tsan --define tcmalloc=disabled +# Needed due to https://github.com/libevent/libevent/issues/777 +build:clang-tsan --copt -DEVENT__DISABLE_DEBUG_MODE # Clang MSAN - broken today since we need to rebuild lib[std]c++ and external deps with MSAN # support (see https://github.com/envoyproxy/envoy/issues/443). @@ -47,15 +51,15 @@ build:clang-msan --copt -fsanitize=memory build:clang-msan --linkopt -fsanitize=memory build:clang-msan --define tcmalloc=disabled build:clang-msan --copt -fsanitize-memory-track-origins=2 -build:clang-msan --linkopt -fuse-ld=lld # Clang with libc++ +# TODO(cmluciano) fix and re-enable _LIBCPP_VERSION testing for TCMALLOC in Envoy::Stats::TestUtil::hasDeterministicMallocStats +# and update stats_integration_test with appropriate m_per_cluster value build:libc++ --action_env=CC build:libc++ --action_env=CXX build:libc++ --action_env=CXXFLAGS=-stdlib=libc++ build:libc++ --action_env=PATH build:libc++ --define force_libcpp=enabled -build:libc++ --linkopt -fuse-ld=lld # Test options test --test_env=HEAPCHECK=normal --test_env=PPROF_PATH diff --git a/.circleci/config.yml b/.circleci/config.yml index 93d74d757170c..ccfdd86ecd283 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -4,13 +4,15 @@ executors: ubuntu-build: description: "A regular build executor based on ubuntu image" docker: - - image: envoyproxy/envoy-build:698009170e362f9ca0594f2b1927fbbee199bf98 + - image: envoyproxy/envoy-build:cfc514546bc0284536893cca5fa43d7128edcd35 resource_class: xlarge working_directory: /source jobs: release: executor: ubuntu-build + environment: + BAZEL_REMOTE_CACHE: https://storage.googleapis.com/envoy-circleci-bazel-cache/ steps: - run: rm -rf /home/circleci/project/.git # CircleCI git caching is likely broken - checkout @@ -18,12 +20,26 @@ jobs: command: ci/do_circle_ci.sh bazel.release no_output_timeout: 60m - - setup_remote_docker - - run: ci/docker_push.sh - - run: ci/docker_tag.sh + - persist_to_workspace: + root: . + paths: + - build_release + - build_release_stripped - store_artifacts: path: /build/envoy/generated destination: / + docker: + docker: + - image: docker + steps: + - run: rm -rf /home/circleci/project/.git # CircleCI git caching is likely broken + - checkout + - attach_workspace: + at: . + - setup_remote_docker + - run: ci/docker_build.sh + - run: ci/docker_push.sh + - run: ci/docker_tag.sh asan: executor: ubuntu-build steps: @@ -159,15 +175,6 @@ jobs: - run: docs/publish.sh - store_artifacts: path: generated/docs - mac: - macos: - xcode: "9.3.0" - steps: - - run: sudo ntpdate -vu time.apple.com - - run: rm -rf /home/circleci/project/.git # CircleCI git caching is likely broken - - checkout - - run: ci/mac_ci_setup.sh - - run: ci/mac_ci_steps.sh workflows: version: 2 @@ -177,6 +184,11 @@ workflows: filters: tags: only: /^v.*/ + - docker: + requires: [release] + filters: + tags: + only: /^v.*/ - asan - tsan - compile_time_options @@ -191,4 +203,3 @@ workflows: filters: tags: only: /^v.*/ - - mac diff --git a/.clang-tidy b/.clang-tidy index 0794aa66661f2..a62ee3c944146 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -1,7 +1,7 @@ -Checks: 'clang-diagnostic-*,clang-analyzer-*,abseil-*,bugprone-*,modernize-*,performance-*,readability-redundant-*,readability-braces-around-statements' +Checks: 'clang-diagnostic-*,clang-analyzer-*,abseil-*,bugprone-*,modernize-*,performance-*,readability-redundant-*,readability-braces-around-statements,readability-container-size-empty' #TODO(lizan): grow this list, fix possible warnings and make more checks as error -WarningsAsErrors: 'bugprone-assert-side-effect,modernize-make-shared,modernize-make-unique,readability-redundant-smartptr-get,readability-braces-around-statements,readability-redundant-string-cstr,bugprone-use-after-move' +WarningsAsErrors: 'bugprone-assert-side-effect,modernize-make-shared,modernize-make-unique,readability-redundant-smartptr-get,readability-braces-around-statements,readability-redundant-string-cstr,bugprone-use-after-move,readability-container-size-empty' CheckOptions: - key: bugprone-assert-side-effect.AssertMacros diff --git a/.gitignore b/.gitignore index 524b2764489e8..14327c0f31b28 100644 --- a/.gitignore +++ b/.gitignore @@ -2,18 +2,22 @@ BROWSE /build /build_* +*.bzlc .cache +.classpath +.clwb/ /ci/bazel-* -/ci/prebuilt/thirdparty -/ci/prebuilt/thirdparty_build compile_commands.json cscope.* .deps /docs/landing_source/.bundle /generated +.idea/ +.project *.pyc **/pyformat SOURCE_VERSION +.settings/ *.sw* tags TAGS diff --git a/CODEOWNERS b/CODEOWNERS index abdf25c16f860..4916bead4c8d0 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -2,8 +2,10 @@ # By default, @envoyproxy/maintainers own everything. #* @envoyproxy/maintainers +# csrf extension +/*/extensions/filters/http/csrf @dschaller @mattklein123 # dubbo_proxy extension -/*/extensions/filters/network/dubbo_proxy @zyfjeff @lizan +/*/extensions/filters/network/dubbo_proxy @zyfjeff @lizan # thrift_proxy extension /*/extensions/filters/network/thrift_proxy @zuercher @brian-pane # jwt_authn http filter extension @@ -22,3 +24,5 @@ /*/extensions/filters/network/mysql_proxy @rshriram @venilnoronha @mattklein123 # quic extension /*/extensions/quic_listeners/ @alyssawilk @danzh2010 @mattklein123 @mpwarres @wu-bin +# zookeeper_proxy extension +/*/extensions/filters/network/zookeeper_proxy @rgs1 @snowp diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index de6cfbc6080bc..345192d66ddfe 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -33,6 +33,17 @@ maximize the chances of your PR being merged. deprecations between 1.3.0 and 1.4.0 will be deleted soon AFTER 1.5.0 is tagged and released (at the beginning of the 1.6.0 release cycle). This results in a three to six month window for migrating from deprecated code paths to new code paths. +* Unless the community and Envoy maintainer team agrees on an exception, during the + first release cycle after a feature has been deprecated, use of that feature + will cause a logged warning, and incrementing the + [runtime](https://www.envoyproxy.io/docs/envoy/latest/configuration/runtime#config-runtime) + runtime.deprecated_feature_use stat. + During the second release cycle, use of the deprecated configuration will + cause a configuration load failure, unless the feature in question is + explicitly overridden in + [runtime](https://www.envoyproxy.io/docs/envoy/latest/configuration/runtime#config-runtime) + config. Finally during the third release cycle the code and configuration will be removed + entirely. * This policy means that organizations deploying master should have some time to get ready for breaking changes, but we make no guarantees about the length of time. * The breaking change policy also applies to source level extensions (e.g., filters). Code that @@ -40,9 +51,11 @@ maximize the chances of your PR being merged. deprecation window. Within this window, a warning of deprecation should be carefully logged (some features might need rate limiting for logging this). We make no guarantees about code or deployments that rely on undocumented behavior. -* All deprecations/breaking changes will be clearly listed in [DEPRECATED.md](DEPRECATED.md). -* All deprecations/breaking changes will be announced to the - [envoy-announce](https://groups.google.com/forum/#!forum/envoy-announce) email list. +* All deprecations/breaking changes will be clearly listed in the [deprecated log](docs/root/intro/deprecated.rst). +* High risk deprecations//breaking changes may be announced to the + [envoy-announce](https://groups.google.com/forum/#!forum/envoy-announce) email list but by default + it is expected the multi-phase warn-by-default/fail-by-default is sufficient to warn users to move + away from deprecated features. * Protobuf configuration in an alpha namespace, e.g. `v2alpha`, do not have any restrictions on breaking changes. They may be freely modified, together with their respective features. @@ -119,13 +132,69 @@ maximize the chances of your PR being merged. changes for 7 days. Obviously PRs that are closed due to lack of activity can be reopened later. Closing stale PRs helps us to keep on top of all of the work currently in flight. * If a commit deprecates a feature, the commit message must mention what has been deprecated. - Additionally, [DEPRECATED.md](DEPRECATED.md) must be updated as part of the commit. + Additionally, the [deprecated log](docs/root/intro/deprecated.rst) must be updated with relevant + RST links for fields and messages as part of the commit. * Please consider joining the [envoy-dev](https://groups.google.com/forum/#!forum/envoy-dev) mailing list. * If your PR involves any changes to [envoy-filter-example](https://github.com/envoyproxy/envoy-filter-example) (for example making a new branch so that CI can pass) it is your responsibility to follow through with merging those changes back to master once the CI dance is done. +* If your PR is a high risk change, the reviewer may ask that you runtime guard + it. See the section on runtime guarding below. + + +# Runtime guarding + +Some high risk changes in Envoy are deemed worthy of runtime guarding. Instead of just replacing +old code with new code, both code paths are supported for between one Envoy release (if it is +guarded due to performance concerns) and a full deprecation cycle (if it is a high risk behavioral +change). + +The canonical way to runtime guard a feature is +``` +if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.my_feature_name")) { + [new code path] +} else { + [old_code_path] +} +``` +Runtime guarded features named with the "envoy.reloadable_features." prefix must be safe to flip +true or false on running Envoy instances. In some situations it may make more sense to +latch the value in a member variable on class creation, for example: + +``` +bool use_new_code_path_ = + Runtime::runtimeFeatureEnabled("envoy.reloadable_features.my_feature_name") +``` + +This should only be done if the lifetime of the object in question is relatively short compared to +the lifetime of most Envoy instances, i.e. latching state on creation of the +Http::ConnectionManagerImpl or all Network::ConnectionImpl classes, to ensure that the new behavior +will be exercised as the runtime value is flipped, and that the old behavior will trail off over +time. + +Runtime guarded features may either set true (running the new code by default) in the initial PR, +after a testing interval, or during the next release cycle, at the PR author's and reviewing +maintainer's discretion. Generally all runtime guarded features will be set true when a +release is cut, and the old code path will be deprecated at that time. Runtime features +are set true by default by inclusion in +[source/common/runtime/runtime_features.h](https://github.com/envoyproxy/envoy/blob/master/source/common/runtime/runtime_features.h) + +There are four suggested options for testing new runtime features: + +1. Create a per-test Runtime::LoaderSingleton as done in [DeprecatedFieldsTest.IndividualFieldDisallowedWithRuntimeOverride](https://github.com/envoyproxy/envoy/blob/master/test/common/protobuf/utility_test.cc) +2. Create a [parameterized test](https://github.com/google/googletest/blob/master/googletest/docs/advanced.md#how-to-write-value-parameterized-tests) + where the set up of the test sets the new runtime value explicitly to + GetParam() as outlined in (1). +3. Set up integration tests with custom runtime defaults as documented in the + [integration test README](https://github.com/envoyproxy/envoy/blob/master/test/integration/README.md) +4. Run a given unit test with the new runtime value explicitly set true as done + for [runtime_flag_override_test](https://github.com/envoyproxy/envoy/blob/master/test/common/runtime/BUILD) + +Runtime code is held to the same standard as regular Envoy code, so both the old +path and the new should have 100% coverage both with the feature defaulting true +and false. # PR review policy for maintainers @@ -169,7 +238,7 @@ The sign-off is a simple line at the end of the explanation for the patch, which certifies that you wrote it or otherwise have the right to pass it on as an open-source patch. The rules are pretty simple: if you can certify the below (from -[developercertificate.org](http://developercertificate.org/)): +[developercertificate.org](https://developercertificate.org/)): ``` Developer Certificate of Origin @@ -228,7 +297,7 @@ git config --add alias.c "commit -s" ## Fixing DCO If your PR fails the DCO check, it's necessary to fix the entire commit history in the PR. Best -practice is to [squash](http://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html) +practice is to [squash](https://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html) the commit history to a single commit, append the DCO sign-off as described above, and [force push](https://git-scm.com/docs/git-push#git-push---force). For example, if you have 2 commits in your history: @@ -244,9 +313,17 @@ should only be done to correct a DCO mistake. ## Triggering CI re-run without making changes -Sometimes CI test runs fail due to obvious resource problems or other issues -which are not related to your PR. It may be desirable to re-trigger CI without -making any code changes. Consider adding an alias into your `.gitconfig` file: +To rerun failed tasks in CI, add a comment with the the line + +``` +/retest +``` + +in it. This should rebuild only the failed tasks. + +Sometimes tasks will be stuck in CI and won't be marked as failed, which means +the above command won't work. Should this happen, pushing an empty commit should +re-run all the CI tasks. Consider adding an alias into your `.gitconfig` file: ``` [alias] diff --git a/DEPRECATED.md b/DEPRECATED.md index d7b6997c034a4..1b2962adcb975 100644 --- a/DEPRECATED.md +++ b/DEPRECATED.md @@ -1,113 +1,3 @@ # DEPRECATED -As of release 1.3.0, Envoy will follow a -[Breaking Change Policy](https://github.com/envoyproxy/envoy/blob/master//CONTRIBUTING.md#breaking-change-policy). - -The following features have been DEPRECATED and will be removed in the specified release cycle. -A logged warning is expected for each deprecated item that is in deprecation window. - -## Version 1.10.0 (pending) -* Use of `enabled` in `CorsPolicy`, found in - [route.proto](https://github.com/envoyproxy/envoy/blob/master/api/envoy/api/v2/route/route.proto). - Set the `filter_enabled` field instead. -* Use of google.protobuf.Struct for extension opaque configs is deprecated. Use google.protobuf.Any instead or pack -google.protobuf.Struct in google.protobuf.Any. - -## Version 1.9.0 (Dec 20, 2018) - -* Order of execution of the network write filter chain has been reversed. Prior to this release cycle it was incorrect, see [#4599](https://github.com/envoyproxy/envoy/issues/4599). In the 1.9.0 release cycle we introduced `bugfix_reverse_write_filter_order` in [lds.proto] (https://github.com/envoyproxy/envoy/blob/master/api/envoy/api/v2/lds.proto) to temporarily support both old and new behaviors. Note this boolean field is deprecated. -* Order of execution of the HTTP encoder filter chain has been reversed. Prior to this release cycle it was incorrect, see [#4599](https://github.com/envoyproxy/envoy/issues/4599). In the 1.9.0 release cycle we introduced `bugfix_reverse_encode_order` in [http_connection_manager.proto] (https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto) to temporarily support both old and new behaviors. Note this boolean field is deprecated. -* Use of the v1 REST_LEGACY ApiConfigSource is deprecated. -* Use of std::hash in the ring hash load balancer is deprecated. -* Use of `rate_limit_service` configuration in the [bootstrap configuration](https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/bootstrap/v2/bootstrap.proto) is deprecated. -* Use of `runtime_key` in `RequestMirrorPolicy`, found in - [route.proto](https://github.com/envoyproxy/envoy/blob/master/api/envoy/api/v2/route/route.proto) - is deprecated. Set the `runtime_fraction` field instead. -* Use of buffer filter `max_request_time` is deprecated in favor of the request timeout found in [HttpConnectionManager](https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto) - -## Version 1.8.0 (Oct 4, 2018) - -* Use of the v1 API (including `*.deprecated_v1` fields in the v2 API) is deprecated. - See envoy-announce [email](https://groups.google.com/forum/#!topic/envoy-announce/oPnYMZw8H4U). -* Use of the legacy - [ratelimit.proto](https://github.com/envoyproxy/envoy/blob/b0a518d064c8255e0e20557a8f909b6ff457558f/source/common/ratelimit/ratelimit.proto) - is deprecated, in favor of the proto defined in - [date-plane-api](https://github.com/envoyproxy/envoy/blob/master/api/envoy/service/ratelimit/v2/rls.proto) - Prior to 1.8.0, Envoy can use either proto to send client requests to a ratelimit server with the use of the - `use_data_plane_proto` boolean flag in the [ratelimit configuration](https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/ratelimit/v2/rls.proto). - However, when using the deprecated client a warning is logged. -* Use of the --v2-config-only flag. -* Use of both `use_websocket` and `websocket_config` in - [route.proto](https://github.com/envoyproxy/envoy/blob/master/api/envoy/api/v2/route/route.proto) - is deprecated. Please use the new `upgrade_configs` in the - [HttpConnectionManager](https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto) - instead. -* Use of the integer `percent` field in [FaultDelay](https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/filter/fault/v2/fault.proto) - and in [FaultAbort](https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/filter/http/fault/v2/fault.proto) is deprecated in favor - of the new `FractionalPercent` based `percentage` field. -* Setting hosts via `hosts` field in `Cluster` is deprecated. Use `load_assignment` instead. -* Use of `response_headers_to_*` and `request_headers_to_add` are deprecated at the `RouteAction` - level. Please use the configuration options at the `Route` level. -* Use of `runtime` in `RouteMatch`, found in - [route.proto](https://github.com/envoyproxy/envoy/blob/master/api/envoy/api/v2/route/route.proto). - Set the `runtime_fraction` field instead. -* Use of the string `user` field in `Authenticated` in [rbac.proto](https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/rbac/v2alpha/rbac.proto) - is deprecated in favor of the new `StringMatcher` based `principal_name` field. - -## Version 1.7.0 (Jun 21, 2018) - -* Admin mutations should be sent as POSTs rather than GETs. HTTP GETs will result in an error - status code and will not have their intended effect. Prior to 1.7, GETs can be used for - admin mutations, but a warning is logged. -* Rate limit service configuration via the `cluster_name` field is deprecated. Use `grpc_service` - instead. -* gRPC service configuration via the `cluster_names` field in `ApiConfigSource` is deprecated. Use - `grpc_services` instead. Prior to 1.7, a warning is logged. -* Redis health checker configuration via the `redis_health_check` field in `HealthCheck` is - deprecated. Use `custom_health_check` with name `envoy.health_checkers.redis` instead. Prior - to 1.7, `redis_health_check` can be used, but warning is logged. -* `SAN` is replaced by `URI` in the `x-forwarded-client-cert` header. -* The `endpoint` field in the http health check filter is deprecated in favor of the `headers` - field where one can specify HeaderMatch objects to match on. -* The `sni_domains` field in the filter chain match was deprecated/renamed to `server_names`. - -## Version 1.6.0 (March 20, 2018) - -* DOWNSTREAM_ADDRESS log formatter is deprecated. Use DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT - instead. -* CLIENT_IP header formatter is deprecated. Use DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT instead. -* 'use_original_dst' field in the v2 LDS API is deprecated. Use listener filters and filter chain - matching instead. -* `value` and `regex` fields in the `HeaderMatcher` message is deprecated. Use the `exact_match` - or `regex_match` oneof instead. - -## Version 1.5.0 (Dec 4, 2017) - -* The outlier detection `ejections_total` stats counter has been deprecated and not replaced. Monitor - the individual `ejections_detected_*` counters for the detectors of interest, or - `ejections_enforced_total` for the total number of ejections that actually occurred. -* The outlier detection `ejections_consecutive_5xx` stats counter has been deprecated in favour of - `ejections_detected_consecutive_5xx` and `ejections_enforced_consecutive_5xx`. -* The outlier detection `ejections_success_rate` stats counter has been deprecated in favour of - `ejections_detected_success_rate` and `ejections_enforced_success_rate`. - -## Version 1.4.0 (Aug 24, 2017) - -* Config option `statsd_local_udp_port` has been deprecated and has been replaced with - `statsd_udp_ip_address`. -* `HttpFilterConfigFactory` filter API has been deprecated in favor of `NamedHttpFilterConfigFactory`. -* Config option `http_codec_options` has been deprecated and has been replaced with `http2_settings`. -* The following log macros have been deprecated: `log_trace`, `log_debug`, `conn_log`, - `conn_log_info`, `conn_log_debug`, `conn_log_trace`, `stream_log`, `stream_log_info`, - `stream_log_debug`, `stream_log_trace`. For replacements, please see - [logger.h](https://github.com/envoyproxy/envoy/blob/master/source/common/common/logger.h). -* The connectionId() and ssl() callbacks of StreamFilterCallbacks have been deprecated and - replaced with a more general connection() callback, which, when not returning a nullptr, can be - used to get the connection id and SSL connection from the returned Connection object pointer. -* The protobuf stub gRPC support via `Grpc::RpcChannelImpl` is now replaced with `Grpc::AsyncClientImpl`. - This no longer uses `protoc` generated stubs but instead utilizes C++ template generation of the - RPC stubs. `Grpc::AsyncClientImpl` supports streaming, in addition to the previous unary, RPCs. -* The direction of network and HTTP filters in the configuration will be ignored from 1.4.0 and - later removed from the configuration in the v2 APIs. Filter direction is now implied at the C++ type - level. The `type()` methods on the `NamedNetworkFilterConfigFactory` and - `NamedHttpFilterConfigFactory` interfaces have been removed to reflect this. +The [deprecated log](https://www.envoyproxy.io/docs/envoy/latest/intro/deprecated) can be found in the official Envoy developer documentation. diff --git a/EXTENSION_POLICY.md b/EXTENSION_POLICY.md new file mode 100644 index 0000000000000..d7f1df8334c5d --- /dev/null +++ b/EXTENSION_POLICY.md @@ -0,0 +1,58 @@ +# Envoy Extension Policy + +## Quality requirements + +All extensions contained in the main Envoy repository will be held to the same quality bar as the +core Envoy code. This includes coding style, code reviews, test coverage, etc. In the future we +may consider creating a sandbox repository for extensions that are not compiled/tested by default +and held to a lower quality standard, but that is out of scope currently. + +## Adding new extensions + +The following procedure will be used when proposing new extensions for inclusion in the repository: + 1. A GitHub issue should be opened describing the proposed extension as with any major feature + proposal. + 2. All extensions must be sponsored by an existing maintainer. Sponsorship means that the + maintainer will shepherd the extension through design/code reviews. Maintainers can self-sponsor + extensions if they are going to write them, shepherd them, and maintain them. + + Sponsorship serves two purposes: + * It ensures that the extension will ultimately meet the Envoy quality bar. + * It makes sure that incentives are aligned and that extensions are not added to the repo without + sufficient thought put into future maintenance. + + *If sponsorship cannot be found from an existing maintainer, an organization can consider + [doing the work to become a maintainer](./GOVERNANCE.md#process-for-becoming-a-maintainer) in + order to be able to self-sponsor extensions.* + + 3. Each extension must have two reviewers proposed for reviewing PRs to the extension. Neither of + the reviewers must be a senior maintainer. Existing maintainers (including the sponsor) and other + contributors can count towards this number. The initial reviewers will be codified in the + [CODEOWNERS](./CODEOWNERS) file for long term maintenance. These reviewers can be swapped out as + needed. + 4. Any extension added via this process becomes a full part of the repository. This means that any + API breaking changes in the core code will be automatically fixed as part of the normal PR process + by other contributors. + +## Removing existing extensions + +As stated in the previous section, once an extension becomes part of the repository it will be +maintained by the collective set of Envoy contributors as needed. + +However, if an extension has known issues that are not being rectified by the original sponsor and +reviewers or new contributors that are willing to step into the role of extension owner, a +[vote of the maintainers](./GOVERNANCE.md#conflict-resolution-and-voting) can be called to remove the +extension from the repository. + +## Extension pull request reviews + +Extension PRs must not modify core Envoy code. In the event that an extension requires changes to core +Envoy code, those changes should be submitted as a separate PR and will undergo the normal code review +process, as documented in the [contributor's guide](./CONTRIBUTING.md). + +Extension PRs must be approved by at least one sponsoring maintainer and an extension reviewer. These +may be a single individual, but it is always preferred to have multiple reviewers when feasible. + +In the event that the Extension PR author is a sponsoring maintainer and no other sponsoring maintainer +is available, another maintainer may be enlisted to perform a minimal review for style and common C++ +anti-patterns. The Extension PR must still be approved by a non-maintainer reviewer. diff --git a/GOVERNANCE.md b/GOVERNANCE.md index 1693d58158800..6639dbffca172 100644 --- a/GOVERNANCE.md +++ b/GOVERNANCE.md @@ -37,6 +37,8 @@ * Triage GitHub issues and perform pull request reviews for other maintainers and the community. The areas of specialization listed in [OWNERS.md](OWNERS.md) can be used to help with routing an issue/question to the right person. +* Triage build issues - file issues for known flaky builds or bugs, and either fix or find someone + to fix any master build breakages. * During GitHub issue triage, apply all applicable [labels](https://github.com/envoyproxy/envoy/labels) to each new issue. Labels are extremely useful for future issue follow up. Which labels to apply is somewhat subjective so just use your best judgment. A few of the most important labels that are @@ -62,7 +64,9 @@ forward. To reiterate, it is *not* the responsibility of the on-call maintainer to answer all questions and do all reviews, but it is their responsibility to make sure that everything is being actively covered by someone. -* The on-call rotation is tracked at PagerDuty. The calendar is visible [here](https://pagerduty.github.io/addons/PDcal/index.html?iCalURL=https://cncf.pagerduty.com/private/e44caf2604ce6c5ccc616b7b84f99b94dc801dba4cceb8d71fb128338f75b9af/feed/PXU9KPH) or you can subscribe to the iCal feed [here](https://cncf.pagerduty.com/private/e44caf2604ce6c5ccc616b7b84f99b94dc801dba4cceb8d71fb128338f75b9af/feed/PXU9KPH). +* The on-call rotation is tracked at Opsgenie. The calendar is visible +[here](https://calendar.google.com/calendar/embed?src=ms6efr2erlvum9aolnvg1688cd3mu85e%40import.calendar.google.com&ctz=America%2FNew_York) +or you can subscribe to the iCal feed [here](https://app.opsgenie.com/webcal/getRecentSchedule?webcalToken=75f2990470ca21de1033ecf4586bea1e40bae32bf3c39e2289f6186da1904ee0&scheduleId=a3505963-c064-4c97-8865-947dfcb06060) ## Cutting a release @@ -80,7 +84,7 @@ corrections. * Switch the [VERSION](VERSION) from a "dev" variant to a final variant. E.g., "1.6.0-dev" to "1.6.0". Also remove the "Pending" tag from the top of the [release notes](docs/root/intro/version_history.rst) - and [DEPRECATED.md](DEPRECATED.md). Get a review and merge. + and [deprecated log](docs/root/intro/deprecated.rst). Get a review and merge. * **Wait for tests to pass on [master](https://circleci.com/gh/envoyproxy/envoy/tree/master).** * Create a [tagged release](https://github.com/envoyproxy/envoy/releases). The release should @@ -95,10 +99,13 @@ Envoy account post). * Do a new PR to update [VERSION](VERSION) to the next development release. E.g., "1.7.0-dev". At the same time, also add a new empty "pending" section to the [release - notes](docs/root/intro/version_history.rst) and to [DEPRECATED.md](DEPRECATED.md) for the + notes](docs/root/intro/version_history.rst) and to [deprecated log](docs/root/intro/deprecated.rst) for the following version. E.g., "1.7.0 (pending)". * Run the deprecate_versions.py script (e.g. `sh tools/deprecate_version/deprecate_version.sh 1.8.0 1.10.0`) to file tracking issues for code which can be removed. +* Run the deprecate_features.py script (e.g. `sh tools/deprecate_version/deprecate_features.sh`) + to make the last release's deprecated features fatal-by-default. Submit the resultant PR and send + an email to envoy-announce. ## When does a maintainer lose maintainer status @@ -109,8 +116,7 @@ the maintainers per the voting process below. # Extension addition policy Adding new [extensions](REPO_LAYOUT.md#sourceextensions-layout) has a dedicated policy. Please -see [this](https://docs.google.com/document/d/1eDQQSxqx2khTXfa2vVm4vqkyRwXYkPzZCcbjxJ2_AvA) document -for more information. +see [this](./EXTENSION_POLICY.md) document for more information. # Conflict resolution and voting diff --git a/OWNERS.md b/OWNERS.md index 66b1a39776feb..816c7eb78c118 100644 --- a/OWNERS.md +++ b/OWNERS.md @@ -17,8 +17,9 @@ routing PRs, questions, etc. to the right place. * Stephan Zuercher ([zuercher](https://github.com/zuercher)) (zuercher@gmail.com) * Load balancing, upstream clusters and cluster manager, logging, complex HTTP routing (metadata, etc.), and macOS build. -* Greg Greenway ([ggreenway](https://github.com/ggreenway)) (ggreenway@apple.com) - * TCP proxy, TLS, logging, and core networking (listeners, connections, etc.). +* Lizan Zhou ([lizan](https://github.com/lizan)) (lizan@tetrate.io) + * gRPC, gRPC/JSON transcoding, and core networking (transport socket abstractions), Bazel, build + issues, and CI in general. # Maintainers @@ -26,8 +27,6 @@ routing PRs, questions, etc. to the right place. * Outlier detection, HTTP routing, xDS, configuration/operational questions. * Dan NoĆ© ([dnoe](https://github.com/dnoe)) (dpn@google.com) * Base server (watchdog, workers, startup, stack trace handling, etc.). -* Lizan Zhou ([lizan](https://github.com/lizan)) (lizan@tetrate.io) - * gRPC, gRPC/JSON transcoding, and core networking (transport socket abstractions). * Dhi Aurrahman ([dio](https://github.com/dio)) (dio@tetrate.io) * Lua, access logging, and general miscellany. * Joshua Marantz ([jmarantz](https://github.com/jmarantz)) (jmarantz@google.com) @@ -35,11 +34,17 @@ routing PRs, questions, etc. to the right place. * Snow Pettersen ([snowp](https://github.com/snowp)) (snowp@squareup.com) * Upstream, host/priority sets, load balancing, and retry plugins. +# Envoy security team + +* All maintainers +* Piotr Sikora ([PiotrSikora](https://github.com/PiotrSikora)) (piotrsikora@google.com) + # Emeritus maintainers * Constance Caramanolis ([ccaraman](https://github.com/ccaraman)) (ccaramanolis@lyft.com) * Roman Dzhabarov ([RomanDzhabarov](https://github.com/RomanDzhabarov)) (rdzhabarov@lyft.com) * Bill Gallagher ([wgallagher](https://github.com/wgallagher)) (bgallagher@lyft.com) +* Greg Greenway ([ggreenway](https://github.com/ggreenway)) (greg.greenway@gmail.com, formerly ggreenway@apple.com) # Friends of Envoy diff --git a/PULL_REQUESTS.md b/PULL_REQUESTS.md index ad9cdafb99466..0bc71ab31e6bb 100644 --- a/PULL_REQUESTS.md +++ b/PULL_REQUESTS.md @@ -74,7 +74,7 @@ you may instead just tag the PR with the issue: ### Deprecated If this PR deprecates existing Envoy APIs or code, it should include -an update to the [deprecated file](DEPRECATED.md) and a one line note in the PR +an update to the [deprecated file](docs/root/intro/deprecated.rst) and a one line note in the PR description. If you mark existing APIs or code as deprecated, when the next release is cut, the diff --git a/PULL_REQUEST_TEMPLATE.md b/PULL_REQUEST_TEMPLATE.md index 4f001a5a6821d..f8bb15ff43e47 100644 --- a/PULL_REQUEST_TEMPLATE.md +++ b/PULL_REQUEST_TEMPLATE.md @@ -1,10 +1,10 @@ -For an explanation of how to fill out the fields, please see the relevant section -in [PULL_REQUESTS.md](./PULL_REQUESTS.md) +For an explanation of how to fill out the fields, please see the relevant section +in [PULL_REQUESTS.md](https://github.com/envoyproxy/envoy/blob/master/PULL_REQUESTS.md) -*Description*: -*Risk Level*: -*Testing*: -*Docs Changes*: -*Release Notes*: +Description: +Risk Level: +Testing: +Docs Changes: +Release Notes: [Optional Fixes #Issue] -[Optional *Deprecated*:] +[Optional Deprecated:] diff --git a/README.md b/README.md index f01a5e5ce56b8..71b468dca47b8 100644 --- a/README.md +++ b/README.md @@ -9,6 +9,9 @@ involved and how Envoy plays a role, read the CNCF [announcement](https://www.cncf.io/blog/2017/09/13/cncf-hosts-envoy/). [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/1266/badge)](https://bestpractices.coreinfrastructure.org/projects/1266) +[![Azure Pipelines](https://dev.azure.com/cncf/envoy/_apis/build/status/envoyproxy.envoy.mac?branchName=master)](https://dev.azure.com/cncf/envoy/_build/latest?definitionId=2&branchName=master) +[![CircleCI](https://circleci.com/gh/envoyproxy/envoy/tree/master.svg?style=shield)](https://circleci.com/gh/envoyproxy/envoy/tree/master) +[![Jenkins](https://img.shields.io/jenkins/s/https/powerci.osuosl.org/job/build-envoy-master/badge/icon/.svg?label=ppc64le%20build)](http://powerci.osuosl.org/job/build-envoy-master/) ## Documentation @@ -42,7 +45,7 @@ to find out more about the origin story and design philosophy of Envoy * [envoy-maintainers](https://groups.google.com/forum/#!forum/envoy-maintainers): Use this list to reach all core Envoy maintainers. * [Twitter](https://twitter.com/EnvoyProxy/): Follow along on Twitter! -* [Slack](https://envoyproxy.slack.com/): Slack, to get invited go [here](http://envoyslack.cncf.io). +* [Slack](https://envoyproxy.slack.com/): Slack, to get invited go [here](https://envoyslack.cncf.io). We have the IRC/XMPP gateways enabled if you prefer either of those. Once an account is created, connection instructions for IRC/XMPP can be found [here](https://envoyproxy.slack.com/account/gateways). * NOTE: Response to user questions is best effort on Slack. For a "guaranteed" response please email diff --git a/SECURITY_RELEASE_PROCESS.md b/SECURITY_RELEASE_PROCESS.md index ac49c34c04745..5d07cb1b37e6f 100644 --- a/SECURITY_RELEASE_PROCESS.md +++ b/SECURITY_RELEASE_PROCESS.md @@ -82,6 +82,12 @@ score](https://www.first.org/cvss/specification-document#i5)) the Fix Team can d release process down in the face of holidays, developer bandwidth, etc. These decisions must be discussed on the envoy-security mailing list. +A two week window will be provided to members of the private distributor list from candidate patch +availability until the security release date. It is expected that distributors will normally be able +to perform a release within this time window. If there are exceptional circumstances, the Envoy +security team will raise this window to four weeks. The release window will be reduced if the +security issue is public or embargo is broken. + ### Fix Disclosure Process With the fix development underway, the Fix Lead needs to come up with an overall communication plan @@ -104,7 +110,7 @@ patches, understand exact mitigation steps, etc. to require early disclosure to distributors. Generally this Private Distributor Disclosure process should be reserved for remotely exploitable or privilege escalation issues. Otherwise, this process can be skipped. -- The Fix Lead will email the patches to envoy-distributors-announce@googlegroups.com so +- The Fix Lead will email the patches to cncf-envoy-distributors-announce@lists.cncf.io so distributors can prepare builds to be available to users on the day of the issue's announcement. Distributors should read about the [Private Distributors List](#private-distributors-list) to find out the requirements for being added to this list. @@ -149,7 +155,7 @@ individuals to find out about security issues. ### Embargo Policy -The information members receive on envoy-distributors-announce must not be made public, shared, nor +The information members receive on cncf-envoy-distributors-announce must not be made public, shared, nor even hinted at anywhere beyond the need-to-know within your specific team except with the list's explicit approval. This holds true until the public disclosure date/time that was agreed upon by the list. Members of the list and others may not use the information for anything other than getting the @@ -158,6 +164,11 @@ issue fixed for your respective distribution's users. Before any information from the list is shared with respective members of your team required to fix said issue, they must agree to the same terms and only find out information on a need-to-know basis. +The embargo applies to information shared, source code and binary images. **It is a violation of the +embargo policy to share binary distributions of the security fixes before the public release date.** +This includes, but is not limited to, Envoy binaries and Docker images. It is expected that +distributors have a method to stage and validate new binaries without exposing them publicly. + In the unfortunate event you share the information beyond what is allowed by this policy, you _must_ urgently inform the envoy-security@googlegroups.com mailing list of exactly what information leaked and to whom. A retrospective will take place after the leak so we can assess how to prevent making the @@ -185,23 +196,50 @@ could be in the form of the following: ### Membership Criteria -To be eligible for the envoy-distributors-announce mailing list, your +To be eligible for the cncf-envoy-distributors-announce mailing list, your distribution should: -1. Be an actively maintained distribution of Envoy components OR offer Envoy as a publicly - available service in which the product clearly states that it is built on top of Envoy. E.g., - "SuperAwesomeLinuxDistro" which offers Envoy pre-built packages OR - "SuperAwesomeCloudProvider's Envoy as a Service (EaaS)". A cloud service that uses Envoy for a - product but does not publicly say they are using Envoy does not qualify. -2. Have a user base not limited to your own organization. +1. Be either: + 1. An actively maintained distribution of Envoy components. An example is + "SuperAwesomeLinuxDistro" which offers Envoy pre-built packages. Another + example is "SuperAwesomeServiceMesh" which offers a service mesh product + that includes Envoy as a component. + + OR + + 2. Offer Envoy as a publicly available infrastructure or platform service, in + which the product clearly states (e.g. public documentation, blog posts, + marketing copy, etc.) that it is built on top of Envoy. E.g., + "SuperAwesomeCloudProvider's Envoy as a Service (EaaS)". An infrastructure + service that uses Envoy for a product but does not publicly say they are + using Envoy does not qualify. This is essentially IaaS or PaaS, if you use + Envoy to support a SaaS, e.g. "SuperAwesomeCatVideoService", this does not + qualify. +2. Have a user or customer base not limited to your own organization. We will use the size + of the user or customer base as part of the criteria to determine + eligibility. 3. Have a publicly verifiable track record up to present day of fixing security issues. 4. Not be a downstream or rebuild of another distribution. 5. Be a participant and active contributor in the community. -6. Accept the [Embargo Policy](#embargo-policy) that is outlined above. +6. Accept the [Embargo Policy](#embargo-policy) that is outlined above. You must + have a way to privately stage and validate your updates that does not violate + the embargo. 7. Be willing to [contribute back](#contributing-back) as outlined above. -8. Have someone already on the list vouch for the person requesting membership +8. Be able to perform a security release of your product within a two week window from candidate fix + patch availability. +9. Have someone already on the list vouch for the person requesting membership on behalf of your distribution. +10. Nominate an e-mail alias or list for your organization to receive updates. This should not be + an individual user address, but instead a list that can be maintained by your organization as + individuals come and go. A good example is envoy-security@seven.com, a bad example is + acidburn@seven.com. You must accept the invite sent to this address or you will not receive any + e-mail updates. This e-mail address will be [shared with the Envoy community](#Members). + +Note that Envoy maintainers are members of the Envoy security team. [Members of the Envoy security +team](OWNERS.md#envoy-security-team) and the organizations that they represents are implicitly +included in the private distributor list. These organizations do not need to meet the above list of +criteria with the exception of the acceptance of the embargo policy. ### Requesting to Join @@ -214,7 +252,7 @@ Here is a pseudo example: ``` To: envoy-security@googlegroups.com -Subject: Seven-Corp Membership to envoy-distributors-announce +Subject: Seven-Corp Membership to cncf-envoy-distributors-announce Below are each criterion and why I think we, Seven-Corp, qualify. @@ -224,10 +262,13 @@ Below are each criterion and why I think we, Seven-Corp, qualify. We distribute the "Seven" distribution of Envoy [link]. We have been doing this since 1999 before proxies were even cool. -> 2. Have a user base not limited to your own organization. +> 2. Have a user or customer base not limited to your own organization. Please specify an +> approximate size of your user or customer base, including the number of +> production deployments. Our user base spans of the extensive "Seven" community. We have a slack and -GitHub repos and mailing lists where the community hangs out. [links] +GitHub repos and mailing lists where the community hangs out. We have ~2000 +customers, of which approximately 400 are using Seven in production. [links] > 3. Have a publicly verifiable track record up to present day of fixing security issues. @@ -245,7 +286,9 @@ Our members, Acidburn, Cereal, and ZeroCool are outstanding members and are well known throughout the Envoy community. Especially for their contributions in hacking the Gibson. -> 6. Accept the Embargo Policy that is outlined above. +> 6. Accept the Embargo Policy that is outlined above. You must + have a way to privately stage and validate your updates that does not violate + the embargo. We accept. @@ -253,9 +296,34 @@ We accept. We are definitely willing to help! -> 8. Have someone already on the list vouch for the person requesting membership - on behalf of your distribution. +> 8. Be able to perform a security release of your product within a two week window from candidate fix + patch availability. + +We affirm we can spin out new security releases within a 2 week window. -CrashOverride will vouch for Acidburn joining the list on behalf of the "Seven" -distribution. +> 9. Have someone already on the list vouch for the person requesting membership +> on behalf of your distribution. + +CrashOverride will vouch for the "Seven" distribution joining the distribution list. + +> 10. Nominate an e-mail alias or list for your organization to receive updates. This should not be + an individual user address, but instead a list that can be maintained by your organization as + individuals come and go. A good example is envoy-security@seven.com, a bad example is + acidburn@seven.com. You must accept the invite sent to this address or you will not receive any + e-mail updates. This e-mail address will be shared with the Envoy community. ``` + +### Members + +| E-mail | Organization | +|-------------------------------------------|:-------------:| +| envoy-security-team@aspenmesh.io | Aspen Mesh | +| aws-app-mesh-security@amazon.com | AWS | +| security@cilium.io | Cilium | +| vulnerabilityreports@cloudfoundry.org | Cloud Foundry | +| secalert@datawire.io | Datawire | +| google-internal-envoy-security@google.com | Google | +| vulnerabilities@discuss.istio.io | Istio | +| secalert@redhat.com | Red Hat | +| envoy-security@solo.io | solo.io | +| envoy-security@tetrate.io | Tetrate | diff --git a/STYLE.md b/STYLE.md index c82e0f253198d..1a31a95c2cc57 100644 --- a/STYLE.md +++ b/STYLE.md @@ -83,10 +83,10 @@ annotations](https://github.com/abseil/abseil-cpp/blob/master/absl/base/thread_annotations.h), such as `GUARDED_BY`, should be used for shared state guarded by locks/mutexes. -* Functions intended to be local to a cc file should be declared in an anonymonus namespace, +* Functions intended to be local to a cc file should be declared in an anonymous namespace, rather than using the 'static' keyword. Note that the [Google C++ style guide](https://google.github.io/styleguide/cppguide.html#Unnamed_Namespaces_and_Static_Variables) - allows either, but in Envoy we prefer annonymous namespaces. + allows either, but in Envoy we prefer anonymous namespaces. * Braces are required for all control statements include single line if, while, etc. statements. # Error handling diff --git a/VERSION b/VERSION index a01185b4d67a2..1f724bf455d78 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.10.0-dev +1.11.0-dev diff --git a/WORKSPACE b/WORKSPACE index ecd0a358f1639..5609189bd56df 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -1,5 +1,9 @@ workspace(name = "envoy") +load("//bazel:api_repositories.bzl", "envoy_api_dependencies") + +envoy_api_dependencies() + load("//bazel:repositories.bzl", "GO_VERSION", "envoy_dependencies") load("//bazel:cc_configure.bzl", "cc_configure") @@ -11,11 +15,7 @@ rules_foreign_cc_dependencies() cc_configure() -load("@envoy_api//bazel:repositories.bzl", "api_dependencies") - -api_dependencies() - -load("@io_bazel_rules_go//go:def.bzl", "go_register_toolchains", "go_rules_dependencies") +load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies") go_rules_dependencies() diff --git a/api/CONTRIBUTING.md b/api/CONTRIBUTING.md index 8bd23ab6bc5dd..02f8536906b57 100644 --- a/api/CONTRIBUTING.md +++ b/api/CONTRIBUTING.md @@ -33,7 +33,7 @@ Or to use a hermetic Docker container: ``` This process builds RST documentation directly from the proto files, merges it with the static RST -files, and then runs [Sphinx](http://www.sphinx-doc.org/en/stable/rest.html) over the entire tree to +files, and then runs [Sphinx](https://www.sphinx-doc.org/en/stable/rest.html) over the entire tree to produce the final documentation. The generated RST files are not committed as they are regenerated every time the documentation is built. diff --git a/api/STYLE.md b/api/STYLE.md index 887a6c53a45b8..0289c5f85af27 100644 --- a/api/STYLE.md +++ b/api/STYLE.md @@ -123,7 +123,7 @@ In addition, the following conventions should be followed: ``` * The [Breaking Change - Policy](https://github.com/envoyproxy/envoy/blob/master//CONTRIBUTING.md#breaking-change-policy) describes + Policy](https://github.com/envoyproxy/envoy/blob/master/CONTRIBUTING.md#breaking-change-policy) describes API versioning, deprecation and compatibility. ## Package organization diff --git a/api/XDS_PROTOCOL.md b/api/XDS_PROTOCOL.md deleted file mode 100644 index 798daa239011c..0000000000000 --- a/api/XDS_PROTOCOL.md +++ /dev/null @@ -1,348 +0,0 @@ -# xDS REST and gRPC protocol - -Envoy discovers its various dynamic resources via the filesystem or by querying -one or more management servers. Collectively, these discovery services and their -corresponding APIs are referred to as _xDS_. Resources are requested via -_subscriptions_, by specifying a filesystem path to watch, initiating gRPC -streams or polling a REST-JSON URL. The latter two methods involve sending -requests with a -[`DiscoveryRequest`](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/discovery.proto#discoveryrequest) -proto payload. Resources are delivered in a -[`DiscoveryResponse`](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/discovery.proto#discoveryresponse) -proto payload in all methods. We discuss each type of subscription below. - -## Filesystem subscriptions - -The simplest approach to delivering dynamic configuration is to place it at a -well known path specified in the -[`ConfigSource`](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/core/config_source.proto#core-configsource). -Envoy will use `inotify` (`kqueue` on macOS) to monitor the file for changes -and parse the `DiscoveryResponse` proto in the file on update. Binary -protobufs, JSON, YAML and proto text are supported formats for the -`DiscoveryResponse`. - -There is no mechanism available for filesystem subscriptions to ACK/NACK updates -beyond stats counters and logs. The last valid configuration for an xDS API will -continue to apply if an configuration update rejection occurs. - -## Streaming gRPC subscriptions - -### Singleton resource type discovery - -A gRPC -[`ApiConfigSource`](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/core/config_source.proto#core-apiconfigsource) -can be specified independently for each xDS API, pointing at an upstream -cluster corresponding to a management server. This will initiate an independent -bidirectional gRPC stream for each xDS resource type, potentially to distinct -management servers. API delivery is eventually consistent. See -[ADS](#aggregated-discovery-service) below for situations in which explicit -control of sequencing is required. - -#### Type URLs - -Each xDS API is concerned with resources of a given type. There is a 1:1 -correspondence between an xDS API and a resource type. That is: - -* [LDS: `envoy.api.v2.Listener`](envoy/api/v2/lds.proto) -* [RDS: `envoy.api.v2.RouteConfiguration`](envoy/api/v2/rds.proto) -* [CDS: `envoy.api.v2.Cluster`](envoy/api/v2/cds.proto) -* [EDS: `envoy.api.v2.ClusterLoadAssignment`](envoy/api/v2/eds.proto) -* [SDS: `envoy.api.v2.Auth.Secret`](envoy/api/v2/auth/cert.proto) - -The concept of [_type -URLs_](https://developers.google.com/protocol-buffers/docs/proto3#any) appears -below, and takes the form `type.googleapis.com/`, e.g. -`type.googleapis.com/envoy.api.v2.Cluster` for CDS. In various requests from -Envoy and responses by the management server, the resource type URL is stated. - -#### ACK/NACK and versioning - -Each stream begins with a `DiscoveryRequest` from Envoy, specifying the list of -resources to subscribe to, the type URL corresponding to the subscribed -resources, the node identifier and an empty `version_info`. An example EDS request -might be: - -```yaml -version_info: -node: { id: envoy } -resource_names: -- foo -- bar -type_url: type.googleapis.com/envoy.api.v2.ClusterLoadAssignment -response_nonce: -``` - -The management server may reply either immediately or when the requested -resources are available with a `DiscoveryResponse`, e.g.: - -```yaml -version_info: X -resources: -- foo ClusterLoadAssignment proto encoding -- bar ClusterLoadAssignment proto encoding -type_url: type.googleapis.com/envoy.api.v2.ClusterLoadAssignment -nonce: A -``` - -After processing the `DiscoveryResponse`, Envoy will send a new request on the -stream, specifying the last version successfully applied and the nonce provided -by the management server. If the update was successfully applied, the -`version_info` will be __X__, as indicated in the sequence diagram: - -![Version update after ACK](diagrams/simple-ack.svg) - -In this sequence diagram, and below, the following format is used to abbreviate -messages: -* `DiscoveryRequest`: (V=`version_info`,R=`resource_names`,N=`response_nonce`,T=`type_url`) -* `DiscoveryResponse`: (V=`version_info`,R=`resources`,N=`nonce`,T=`type_url`) - -The version provides Envoy and the management server a shared notion of the -currently applied configuration, as well as a mechanism to ACK/NACK -configuration updates. If Envoy had instead rejected configuration update __X__, -it would reply with -[`error_detail`](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/discovery.proto#envoy-api-field-discoveryrequest-error-detail) -populated and its previous version, which in this case was the empty -initial version. The error_detail has more details around the exact error message -populated in the message field: - -![No version update after NACK](diagrams/simple-nack.svg) - -Later, an API update may succeed at a new version __Y__: - -![ACK after NACK](diagrams/later-ack.svg) - -Each stream has its own notion of versioning, there is no shared versioning -across resource types. When ADS is not used, even each resource of a given -resource type may have a -distinct version, since the Envoy API allows distinct EDS/RDS resources to point -at different `ConfigSource`s. - -#### When to send an update - -The management server should only send updates to the Envoy client when the -resources in the `DiscoveryResponse` have changed. Envoy replies to any -`DiscoveryResponse` with a `DiscoveryRequest` containing the ACK/NACK -immediately after it has been either accepted or rejected. If the management -server provides the same set of resources rather than waiting for a change to -occur, it will cause Envoy and the management server to spin and have a severe -performance impact. - -Within a stream, new `DiscoveryRequest`s supersede any prior `DiscoveryRequest`s -having the same resource type. This means that the management server only needs -to respond to the latest `DiscoveryRequest` on each stream for any given resource -type. - -#### Resource hints - -The `resource_names` specified in the `DiscoveryRequest` are a hint. Some -resource types, e.g. `Cluster`s and `Listener`s will specify an empty -`resource_names` list, since Envoy is interested in learning about all the -`Cluster`s (CDS) and `Listener`s (LDS) that the management server(s) know about -corresponding to its node identification. Other resource types, e.g. -`RouteConfiguration`s (RDS) and `ClusterLoadAssignment`s (EDS), follow from -earlier CDS/LDS updates and Envoy is able to explicitly enumerate these -resources. - -LDS/CDS resource hints will always be empty and it is expected that the -management server will provide the complete state of the LDS/CDS resources in -each response. An absent `Listener` or `Cluster` will be deleted. - -For EDS/RDS, the management server does not need to supply every requested -resource and may also supply additional, unrequested resources. `resource_names` -is only a hint. Envoy will silently ignore any superfluous resources. When a -requested resource is missing in a RDS or EDS update, Envoy will retain the last -known value for this resource. The management server may be able to infer all -the required EDS/RDS resources from the `node` identification in the -`DiscoveryRequest`, in which case this hint may be discarded. An empty EDS/RDS -`DiscoveryResponse` is effectively a nop from the perspective of the respective -resources in the Envoy. - -When a `Listener` or `Cluster` is deleted, its corresponding EDS and RDS -resources are also deleted inside the Envoy instance. In order for EDS resources -to be known or tracked by Envoy, there must exist an applied `Cluster` -definition (e.g. sourced via CDS). A similar relationship exists between RDS and -`Listeners` (e.g. sourced via LDS). - -For EDS/RDS, Envoy may either generate a distinct stream for each resource of a -given type (e.g. if each `ConfigSource` has its own distinct upstream cluster -for a management server), or may combine together multiple resource requests for -a given resource type when they are destined for the same management server. -While this is left to implementation specifics, management servers should be capable -of handling one or more `resource_names` for a given resource type in each -request. Both sequence diagrams below are valid for fetching two EDS resources -`{foo, bar}`: - -![Multiple EDS requests on the same stream](diagrams/eds-same-stream.svg) -![Multiple EDS requests on distinct streams](diagrams/eds-distinct-stream.svg) - -#### Resource updates - -As discussed above, Envoy may update the list of `resource_names` it presents to -the management server in each `DiscoveryRequest` that ACK/NACKs a specific -`DiscoveryResponse`. In addition, Envoy may later issue additional -`DiscoveryRequest`s at a given `version_info` to update the management server -with new resource hints. For example, if Envoy is at EDS version __X__ and knows -only about cluster `foo`, but then receives a CDS update and learns about `bar` -in addition, it may issue an additional `DiscoveryRequest` for __X__ with -`{foo,bar}` as `resource_names`. - -![CDS response leads to EDS resource hint update](diagrams/cds-eds-resources.svg) - -There is a race condition that may arise here; if after a resource hint update -is issued by Envoy at __X__, but before the management server processes the -update it replies with a new version __Y__, the resource hint update may be -interpreted as a rejection of __Y__ by presenting an __X__ `version_info`. To -avoid this, the management server provides a `nonce` that Envoy uses to indicate -the specific `DiscoveryResponse` each `DiscoveryRequest` corresponds to: - -![EDS update race motivates nonces](diagrams/update-race.svg) - -The management server should not send a `DiscoveryResponse` for any -`DiscoveryRequest` that has a stale nonce. A nonce becomes stale following a -newer nonce being presented to Envoy in a `DiscoveryResponse`. A management -server does not need to send an update until it determines a new version is -available. Earlier requests at a version then also become stale. It may process -multiple `DiscoveryRequests` at a version until a new version is ready. - -![Requests become stale](diagrams/stale-requests.svg) - -An implication of the above resource update sequencing is that Envoy does not -expect a `DiscoveryResponse` for every `DiscoveryRequest` it issues. - -#### Eventual consistency considerations - -Since Envoy's xDS APIs are eventually consistent, traffic may drop briefly -during updates. For example, if only cluster __X__ is known via CDS/EDS, -a `RouteConfiguration` references cluster __X__ -and is then adjusted to cluster __Y__ just before the CDS/EDS update -providing __Y__, traffic will be blackholed until __Y__ is known about by the -Envoy instance. - -For some applications, a temporary drop of traffic is acceptable, retries at the -client or by other Envoy sidecars will hide this drop. For other scenarios where -drop can't be tolerated, traffic drop could have been avoided by providing a -CDS/EDS update with both __X__ and __Y__, then the RDS update repointing from -__X__ to __Y__ and then a CDS/EDS update dropping __X__. - -In general, to avoid traffic drop, sequencing of updates should follow a -`make before break` model, wherein -* CDS updates (if any) must always be pushed first. -* EDS updates (if any) must arrive after CDS updates for the respective clusters. -* LDS updates must arrive after corresponding CDS/EDS updates. -* RDS updates related to the newly added listeners must arrive in the end. -* Stale CDS clusters and related EDS endpoints (ones no longer being - referenced) can then be removed. - -xDS updates can be pushed independently if no new clusters/routes/listeners -are added or if it's acceptable to temporarily drop traffic during -updates. Note that in case of LDS updates, the listeners will be warmed -before they receive traffic, i.e. the dependent routes are fetched through -RDS if configured. Clusters are warmed when adding/removing/updating -clusters. On the other hand, routes are not warmed, i.e., the management -plane must ensure that clusters referenced by a route are in place, before -pushing the updates for a route. - -### Aggregated Discovery Services (ADS) - -It's challenging to provide the above guarantees on sequencing to avoid traffic -drop when management servers are distributed. ADS allow a single management -server, via a single gRPC stream, to deliver all API updates. This provides the -ability to carefully sequence updates to avoid traffic drop. With ADS, a single -stream is used with multiple independent `DiscoveryRequest`/`DiscoveryResponse` -sequences multiplexed via the type URL. For any given type URL, the above -sequencing of `DiscoveryRequest` and `DiscoveryResponse` messages applies. An -example update sequence might look like: - -![EDS/CDS multiplexed on an ADS stream](diagrams/ads.svg) - -A single ADS stream is available per Envoy instance. - -An example minimal `bootstrap.yaml` fragment for ADS configuration is: - -```yaml -node: - id: -dynamic_resources: - cds_config: {ads: {}} - lds_config: {ads: {}} - ads_config: - api_type: GRPC - grpc_services: - envoy_grpc: - cluster_name: ads_cluster -static_resources: - clusters: - - name: ads_cluster - connect_timeout: { seconds: 5 } - type: STATIC - hosts: - - socket_address: - address: - port_value: - lb_policy: ROUND_ROBIN - http2_protocol_options: {} -admin: - ... - -``` - -### Incremental xDS - -Incremental xDS is a separate xDS endpoint available for ADS, CDS and RDS that -allows: - - * Incremental updates of the list of tracked resources by the xDS client. - This supports Envoy on-demand / lazily requesting additional resources. For - example, this may occur when a request corresponding to an unknown cluster - arrives. - * The xDS server can incremetally update the resources on the client. - This supports the goal of scalability of xDS resources. Rather than deliver - all 100k clusters when a single cluster is modified, the management server - only needs to deliver the single cluster that changed. - -An xDS incremental session is always in the context of a gRPC bidirectional -stream. This allows the xDS server to keep track of the state of xDS clients -connected to it. There is no REST version of Incremental xDS. - -In incremental xDS the nonce field is required and used to pair a -[`IncrementalDiscoveryResponse`](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/discovery.proto#discoveryrequest) -to a [`IncrementalDiscoveryRequest`](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/discovery.proto#discoveryrequest) -ACK or NACK. -Optionally, a response message level system_version_info is present for -debugging purposes only. - -`IncrementalDiscoveryRequest` can be sent in 3 situations: - 1. Initial message in a xDS bidirectional gRPC stream. - 2. As an ACK or NACK response to a previous `IncrementalDiscoveryResponse`. - In this case the `response_nonce` is set to the nonce value in the Response. - ACK or NACK is determined by the absence or presence of `error_detail`. - 3. Spontaneous `IncrementalDiscoveryRequest` from the client. - This can be done to dynamically add or remove elements from the tracked - `resource_names` set. In this case `response_nonce` must be omitted. - -In this first example the client connects and receives a first update that it -ACKs. The second update fails and the client NACKs the update. Later the xDS -client spontaneously requests the "wc" resource. - -![Incremental session example](diagrams/incremental.svg) - -On reconnect the xDS Incremental client may tell the server of its known resources -to avoid resending them over the network. - -![Incremental reconnect example](diagrams/incremental-reconnect.svg) - -## REST-JSON polling subscriptions - -Synchronous (long) polling via REST endpoints is also available for the xDS -singleton APIs. The above sequencing of messages is similar, except no -persistent stream is maintained to the management server. It is expected that -there is only a single outstanding request at any point in time, and as a result -the response nonce is optional in REST-JSON. The [JSON canonical transform of -proto3](https://developers.google.com/protocol-buffers/docs/proto3#json) is used -to encode `DiscoveryRequest` and `DiscoveryResponse` messages. ADS is not -available for REST-JSON polling. - -When the poll period is set to a small value, with the intention of long -polling, then there is also a requirement to avoid sending a `DiscoveryResponse` -[unless a change to the underlying resources has -occurred](#when-to-send-an-update). diff --git a/api/bazel/envoy_http_archive.bzl b/api/bazel/envoy_http_archive.bzl new file mode 100644 index 0000000000000..30207282847c7 --- /dev/null +++ b/api/bazel/envoy_http_archive.bzl @@ -0,0 +1,24 @@ +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +def envoy_http_archive(name, locations, **kwargs): + # `existing_rule_keys` contains the names of repositories that have already + # been defined in the Bazel workspace. By skipping repos with existing keys, + # users can override dependency versions by using standard Bazel repository + # rules in their WORKSPACE files. + existing_rule_keys = native.existing_rules().keys() + if name in existing_rule_keys: + # This repository has already been defined, probably because the user + # wants to override the version. Do nothing. + return + + loc_key = kwargs.pop("repository_key", name) + location = locations[loc_key] + + # HTTP tarball at a given URL. Add a BUILD file if requested. + http_archive( + name = name, + urls = location["urls"], + sha256 = location["sha256"], + strip_prefix = location.get("strip_prefix", ""), + **kwargs + ) diff --git a/api/bazel/repositories.bzl b/api/bazel/repositories.bzl index e524fcd14d8bf..724718f7495f2 100644 --- a/api/bazel/repositories.bzl +++ b/api/bazel/repositories.bzl @@ -1,43 +1,43 @@ -BAZEL_SKYLIB_RELEASE = "0.6.0" -BAZEL_SKYLIB_SHA256 = "eb5c57e4c12e68c0c20bc774bfbc60a568e800d025557bc4ea022c6479acc867" - -GOGOPROTO_RELEASE = "1.2.0" -GOGOPROTO_SHA256 = "957c8f03cf595525d2a667035d9865a0930b3d446be0ab6eb76972934f925b00" - -OPENCENSUS_RELEASE = "0.1.0" -OPENCENSUS_SHA256 = "4fd21cc6de63d7cb979fd749d8101ff425905aa0826fed26019d1c311fcf19a7" - -PGV_RELEASE = "0.0.13" -PGV_SHA256 = "dce6c8a43849d2abe4d5e40f16e9a476bca6b7a47e128db4458a52d748f4a5eb" - -GOOGLEAPIS_GIT_SHA = "d642131a6e6582fc226caf9893cb7fe7885b3411" # May 23, 2018 -GOOGLEAPIS_SHA = "16f5b2e8bf1e747a32f9a62e211f8f33c94645492e9bbd72458061d9a9de1f63" - -PROMETHEUS_GIT_SHA = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" # Nov 17, 2017 -PROMETHEUS_SHA = "783bdaf8ee0464b35ec0c8704871e1e72afa0005c3f3587f65d9d6694bf3911b" - load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") +load(":envoy_http_archive.bzl", "envoy_http_archive") +load(":repository_locations.bzl", "REPOSITORY_LOCATIONS") def api_dependencies(): - http_archive( - name = "bazel_skylib", - sha256 = BAZEL_SKYLIB_SHA256, - strip_prefix = "bazel-skylib-" + BAZEL_SKYLIB_RELEASE, - url = "https://github.com/bazelbuild/bazel-skylib/archive/" + BAZEL_SKYLIB_RELEASE + ".tar.gz", + envoy_http_archive( + "bazel_skylib", + locations = REPOSITORY_LOCATIONS, ) - http_archive( - name = "com_lyft_protoc_gen_validate", - sha256 = PGV_SHA256, - strip_prefix = "protoc-gen-validate-" + PGV_RELEASE, - url = "https://github.com/lyft/protoc-gen-validate/archive/v" + PGV_RELEASE + ".tar.gz", + envoy_http_archive( + "com_lyft_protoc_gen_validate", + locations = REPOSITORY_LOCATIONS, ) - http_archive( + envoy_http_archive( name = "googleapis", - strip_prefix = "googleapis-" + GOOGLEAPIS_GIT_SHA, - url = "https://github.com/googleapis/googleapis/archive/" + GOOGLEAPIS_GIT_SHA + ".tar.gz", - # TODO(dio): Consider writing a Skylark macro for importing Google API proto. - sha256 = GOOGLEAPIS_SHA, - build_file_content = """ + locations = REPOSITORY_LOCATIONS, + build_file_content = GOOGLEAPIS_BUILD_CONTENT, + ) + envoy_http_archive( + name = "com_github_gogo_protobuf", + locations = REPOSITORY_LOCATIONS, + build_file_content = GOGOPROTO_BUILD_CONTENT, + ) + envoy_http_archive( + name = "prometheus_metrics_model", + locations = REPOSITORY_LOCATIONS, + build_file_content = PROMETHEUSMETRICS_BUILD_CONTENT, + ) + envoy_http_archive( + name = "io_opencensus_trace", + locations = REPOSITORY_LOCATIONS, + build_file_content = OPENCENSUSTRACE_BUILD_CONTENT, + ) + envoy_http_archive( + name = "kafka_source", + locations = REPOSITORY_LOCATIONS, + build_file_content = KAFKASOURCE_BUILD_CONTENT, + ) + +GOOGLEAPIS_BUILD_CONTENT = """ load("@com_google_protobuf//:protobuf.bzl", "cc_proto_library", "py_proto_library") load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") @@ -120,6 +120,7 @@ cc_proto_library( ], default_runtime = "@com_google_protobuf//:protobuf", protoc = "@com_google_protobuf//:protoc", + linkstatic = 1, deps = ["@com_google_protobuf//:cc_wkt_protos"], visibility = ["//visibility:public"], ) @@ -194,15 +195,9 @@ py_proto_library( visibility = ["//visibility:public"], deps = ["@com_google_protobuf//:protobuf_python"], ) -""", - ) +""" - http_archive( - name = "com_github_gogo_protobuf", - sha256 = GOGOPROTO_SHA256, - strip_prefix = "protobuf-" + GOGOPROTO_RELEASE, - url = "https://github.com/gogo/protobuf/archive/v" + GOGOPROTO_RELEASE + ".tar.gz", - build_file_content = """ +GOGOPROTO_BUILD_CONTENT = """ load("@com_google_protobuf//:protobuf.bzl", "cc_proto_library", "py_proto_library") load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") @@ -255,15 +250,9 @@ py_proto_library( visibility = ["//visibility:public"], deps = ["@com_google_protobuf//:protobuf_python"], ) - """, - ) +""" - http_archive( - name = "prometheus_metrics_model", - strip_prefix = "client_model-" + PROMETHEUS_GIT_SHA, - url = "https://github.com/prometheus/client_model/archive/" + PROMETHEUS_GIT_SHA + ".tar.gz", - sha256 = PROMETHEUS_SHA, - build_file_content = """ +PROMETHEUSMETRICS_BUILD_CONTENT = """ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library") load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") @@ -281,15 +270,9 @@ go_proto_library( proto = ":client_model", visibility = ["//visibility:public"], ) - """, - ) +""" - http_archive( - name = "io_opencensus_trace", - sha256 = OPENCENSUS_SHA256, - strip_prefix = "opencensus-proto-" + OPENCENSUS_RELEASE + "/src/opencensus/proto/trace/v1", - url = "https://github.com/census-instrumentation/opencensus-proto/archive/v" + OPENCENSUS_RELEASE + ".tar.gz", - build_file_content = """ +OPENCENSUSTRACE_BUILD_CONTENT = """ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library") load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") @@ -307,5 +290,16 @@ go_proto_library( proto = ":trace_model", visibility = ["//visibility:public"], ) - """, - ) +""" + +KAFKASOURCE_BUILD_CONTENT = """ + +filegroup( + name = "request_protocol_files", + srcs = glob([ + "*Request.json", + ]), + visibility = ["//visibility:public"], +) + +""" diff --git a/api/bazel/repository_locations.bzl b/api/bazel/repository_locations.bzl new file mode 100644 index 0000000000000..b3bcb016651bc --- /dev/null +++ b/api/bazel/repository_locations.bzl @@ -0,0 +1,57 @@ +BAZEL_SKYLIB_RELEASE = "0.8.0" +BAZEL_SKYLIB_SHA256 = "2ef429f5d7ce7111263289644d233707dba35e39696377ebab8b0bc701f7818e" + +GOGOPROTO_RELEASE = "1.2.1" +GOGOPROTO_SHA256 = "99e423905ba8921e86817607a5294ffeedb66fdd4a85efce5eb2848f715fdb3a" + +OPENCENSUS_RELEASE = "0.1.0" +OPENCENSUS_SHA256 = "4fd21cc6de63d7cb979fd749d8101ff425905aa0826fed26019d1c311fcf19a7" + +PGV_RELEASE = "0.0.14" +PGV_SHA256 = "c45e629e8c174886a73ec251b94d5470526c7c1e2596cf17755065aed15b9254" + +GOOGLEAPIS_GIT_SHA = "d642131a6e6582fc226caf9893cb7fe7885b3411" # May 23, 2018 +GOOGLEAPIS_SHA = "16f5b2e8bf1e747a32f9a62e211f8f33c94645492e9bbd72458061d9a9de1f63" + +PROMETHEUS_GIT_SHA = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" # Nov 17, 2017 +PROMETHEUS_SHA = "783bdaf8ee0464b35ec0c8704871e1e72afa0005c3f3587f65d9d6694bf3911b" + +KAFKA_SOURCE_SHA = "ae7a1696c0a0302b43c5b21e515c37e6ecd365941f68a510a7e442eebddf39a1" # 2.2.0-rc2 + +REPOSITORY_LOCATIONS = dict( + bazel_skylib = dict( + sha256 = BAZEL_SKYLIB_SHA256, + urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/" + BAZEL_SKYLIB_RELEASE + "/bazel-skylib." + BAZEL_SKYLIB_RELEASE + ".tar.gz"], + ), + com_lyft_protoc_gen_validate = dict( + sha256 = PGV_SHA256, + strip_prefix = "protoc-gen-validate-" + PGV_RELEASE, + urls = ["https://github.com/envoyproxy/protoc-gen-validate/archive/v" + PGV_RELEASE + ".tar.gz"], + ), + googleapis = dict( + # TODO(dio): Consider writing a Skylark macro for importing Google API proto. + sha256 = GOOGLEAPIS_SHA, + strip_prefix = "googleapis-" + GOOGLEAPIS_GIT_SHA, + urls = ["https://github.com/googleapis/googleapis/archive/" + GOOGLEAPIS_GIT_SHA + ".tar.gz"], + ), + com_github_gogo_protobuf = dict( + sha256 = GOGOPROTO_SHA256, + strip_prefix = "protobuf-" + GOGOPROTO_RELEASE, + urls = ["https://github.com/gogo/protobuf/archive/v" + GOGOPROTO_RELEASE + ".tar.gz"], + ), + prometheus_metrics_model = dict( + sha256 = PROMETHEUS_SHA, + strip_prefix = "client_model-" + PROMETHEUS_GIT_SHA, + urls = ["https://github.com/prometheus/client_model/archive/" + PROMETHEUS_GIT_SHA + ".tar.gz"], + ), + io_opencensus_trace = dict( + sha256 = OPENCENSUS_SHA256, + strip_prefix = "opencensus-proto-" + OPENCENSUS_RELEASE + "/src/opencensus/proto/trace/v1", + urls = ["https://github.com/census-instrumentation/opencensus-proto/archive/v" + OPENCENSUS_RELEASE + ".tar.gz"], + ), + kafka_source = dict( + sha256 = KAFKA_SOURCE_SHA, + strip_prefix = "kafka-2.2.0-rc2/clients/src/main/resources/common/message", + urls = ["https://github.com/apache/kafka/archive/2.2.0-rc2.zip"], + ), +) diff --git a/api/docs/BUILD b/api/docs/BUILD index 6445a9687e887..11965bd89e65e 100644 --- a/api/docs/BUILD +++ b/api/docs/BUILD @@ -33,9 +33,12 @@ proto_library( "//envoy/config/accesslog/v2:als", "//envoy/config/accesslog/v2:file", "//envoy/config/bootstrap/v2:bootstrap", + "//envoy/config/cluster/redis:redis_cluster", "//envoy/config/common/tap/v2alpha:common", "//envoy/config/filter/accesslog/v2:accesslog", + "//envoy/config/filter/dubbo/router/v2alpha1:router", "//envoy/config/filter/http/buffer/v2:buffer", + "//envoy/config/filter/http/csrf/v2:csrf", "//envoy/config/filter/http/ext_authz/v2:ext_authz", "//envoy/config/filter/http/fault/v2:fault", "//envoy/config/filter/http/gzip/v2:gzip", @@ -52,6 +55,7 @@ proto_library( "//envoy/config/filter/http/transcoder/v2:transcoder", "//envoy/config/filter/listener/original_src/v2alpha1:original_src", "//envoy/config/filter/network/client_ssl_auth/v2:client_ssl_auth", + "//envoy/config/filter/network/dubbo_proxy/v2alpha1:dubbo_proxy", "//envoy/config/filter/network/ext_authz/v2:ext_authz", "//envoy/config/filter/network/http_connection_manager/v2:http_connection_manager", "//envoy/config/filter/network/mongo_proxy/v2:mongo_proxy", @@ -75,6 +79,7 @@ proto_library( "//envoy/data/accesslog/v2:accesslog", "//envoy/data/cluster/v2alpha:outlier_detection_event", "//envoy/data/core/v2alpha:health_check_event", + "//envoy/data/tap/v2alpha:common", "//envoy/data/tap/v2alpha:http", "//envoy/data/tap/v2alpha:transport", "//envoy/data/tap/v2alpha:wrapper", diff --git a/api/envoy/admin/v2alpha/BUILD b/api/envoy/admin/v2alpha/BUILD index a6b403fdd23ec..3806af07dd9fe 100644 --- a/api/envoy/admin/v2alpha/BUILD +++ b/api/envoy/admin/v2alpha/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/admin/v2alpha/clusters.proto b/api/envoy/admin/v2alpha/clusters.proto index be7011be03036..b74ace25b5cc7 100644 --- a/api/envoy/admin/v2alpha/clusters.proto +++ b/api/envoy/admin/v2alpha/clusters.proto @@ -78,6 +78,13 @@ message HostHealthStatus { // The host is currently being marked as degraded through active health checking. bool failed_active_degraded_check = 4; + // The host has been removed from service discovery, but is being stabilized due to active + // health checking. + bool pending_dynamic_removal = 5; + + // The host has not yet been health checked. + bool pending_active_hc = 6; + // Health status as reported by EDS. Note: only HEALTHY and UNHEALTHY are currently supported // here. // TODO(mrice32): pipe through remaining EDS health status possibilities. diff --git a/api/envoy/admin/v2alpha/memory.proto b/api/envoy/admin/v2alpha/memory.proto index f4158f4bb37a2..d86e448810561 100644 --- a/api/envoy/admin/v2alpha/memory.proto +++ b/api/envoy/admin/v2alpha/memory.proto @@ -10,7 +10,7 @@ option java_package = "io.envoyproxy.envoy.admin.v2alpha"; // Proto representation of the internal memory consumption of an Envoy instance. These represent // values extracted from an internal TCMalloc instance. For more information, see the section of the -// docs entitled ["Generic Tcmalloc Status"](http://gperftools.github.io/gperftools/tcmalloc.html). +// docs entitled ["Generic Tcmalloc Status"](https://gperftools.github.io/gperftools/tcmalloc.html). message Memory { // The number of bytes allocated by the heap for Envoy. This is an alias for diff --git a/api/envoy/admin/v2alpha/server_info.proto b/api/envoy/admin/v2alpha/server_info.proto index 18dcc70b805cc..0a4506f1676b8 100644 --- a/api/envoy/admin/v2alpha/server_info.proto +++ b/api/envoy/admin/v2alpha/server_info.proto @@ -114,11 +114,9 @@ message CommandLineOptions { // See :option:`--mode` for details. Mode mode = 19; - // See :option:`--max-stats` for details. - uint64 max_stats = 20; - - // See :option:`--max-obj-name-len` for details. - uint64 max_obj_name_len = 21; + // max_stats and max_obj_name_len are now unused and have no effect. + uint64 max_stats = 20 [deprecated = true]; + uint64 max_obj_name_len = 21 [deprecated = true]; // See :option:`--disable-hot-restart` for details. bool disable_hot_restart = 22; @@ -128,4 +126,7 @@ message CommandLineOptions { // See :option:`--restart-epoch` for details. uint32 restart_epoch = 24; + + // See :option:`--cpuset-threads` for details. + bool cpuset_threads = 25; } diff --git a/api/envoy/api/v2/BUILD b/api/envoy/api/v2/BUILD index 261d140819985..3cc2d6b2c2ecc 100644 --- a/api/envoy/api/v2/BUILD +++ b/api/envoy/api/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") licenses(["notice"]) # Apache 2 @@ -129,6 +129,7 @@ api_proto_library_internal( deps = [ ":discovery", "//envoy/api/v2/core:base", + "//envoy/api/v2/core:config_source", "//envoy/api/v2/route", ], ) @@ -139,6 +140,28 @@ api_go_grpc_library( deps = [ ":discovery_go_proto", "//envoy/api/v2/core:base_go_proto", + "//envoy/api/v2/core:config_source_go_proto", "//envoy/api/v2/route:route_go_proto", ], ) + +api_proto_library_internal( + name = "srds", + srcs = ["srds.proto"], + has_services = 1, + visibility = [":friends"], + deps = [ + ":discovery", + "//envoy/api/v2/core:base", + "//envoy/api/v2/route", + ], +) + +api_go_grpc_library( + name = "srds", + proto = ":srds", + deps = [ + ":discovery_go_proto", + "//envoy/api/v2/core:base_go_proto", + ], +) diff --git a/api/envoy/api/v2/auth/BUILD b/api/envoy/api/v2/auth/BUILD index 55f522c0085a9..acc28aacff053 100644 --- a/api/envoy/api/v2/auth/BUILD +++ b/api/envoy/api/v2/auth/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/api/v2/auth/cert.proto b/api/envoy/api/v2/auth/cert.proto index 8bc2ee37b849d..526caf2928294 100644 --- a/api/envoy/api/v2/auth/cert.proto +++ b/api/envoy/api/v2/auth/cert.proto @@ -248,7 +248,7 @@ message CertificateValidationContext { google.protobuf.BoolValue require_signed_certificate_timestamp = 6; // An optional `certificate revocation list - // `_ + // `_ // (in PEM format). If specified, Envoy will verify that the presented peer // certificate has not been revoked by this CRL. If this DataSource contains // multiple CRLs, all of them will be used. diff --git a/api/envoy/api/v2/cds.proto b/api/envoy/api/v2/cds.proto index cca6de9e0d670..74ae29fb11b64 100644 --- a/api/envoy/api/v2/cds.proto +++ b/api/envoy/api/v2/cds.proto @@ -30,14 +30,14 @@ import "validate/validate.proto"; import "gogoproto/gogo.proto"; option (gogoproto.equal_all) = true; +option (gogoproto.stable_marshaler_all) = true; // Return list of all clusters this proxy will load balance to. service ClusterDiscoveryService { rpc StreamClusters(stream DiscoveryRequest) returns (stream DiscoveryResponse) { } - rpc IncrementalClusters(stream IncrementalDiscoveryRequest) - returns (stream IncrementalDiscoveryResponse) { + rpc DeltaClusters(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { } rpc FetchClusters(DiscoveryRequest) returns (DiscoveryResponse) { @@ -51,16 +51,13 @@ service ClusterDiscoveryService { // [#protodoc-title: Clusters] // Configuration for a single upstream cluster. -// [#comment:next free field: 38] +// [#comment:next free field: 39] message Cluster { // Supplies the name of the cluster which must be unique across all clusters. // The cluster name is used when emitting // :ref:`statistics ` if :ref:`alt_stat_name // ` is not provided. // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics. - // By default, the maximum length of a cluster name is limited to 60 - // characters. This limit can be increased by setting the - // :option:`--max-obj-name-len` command line argument to the desired value. string name = 1 [(validate.rules).string.min_bytes = 1]; // An optional alternative to the cluster name to be used while emitting stats. @@ -95,9 +92,25 @@ message Cluster { // for an explanation. ORIGINAL_DST = 4; } - // The :ref:`service discovery type ` - // to use for resolving the cluster. - DiscoveryType type = 2 [(validate.rules).enum.defined_only = true]; + + // Extended cluster type. + message CustomClusterType { + // The type of the cluster to instantiate. The name must match a supported cluster type. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // Cluster specific configuration which depends on the cluster being instantiated. + // See the supported cluster for further documentation. + google.protobuf.Any typed_config = 2; + } + + oneof cluster_discovery_type { + // The :ref:`service discovery type ` + // to use for resolving the cluster. + DiscoveryType type = 2 [(validate.rules).enum.defined_only = true]; + + // The custom cluster type. + CustomClusterType cluster_type = 38; + } // Only valid when discovery type is EDS. message EdsClusterConfig { @@ -178,7 +191,7 @@ message Cluster { // :ref:`STRICT_DNS` // or :ref:`LOGICAL_DNS` clusters. // This field supersedes :ref:`hosts` field. - // [#comment:TODO(dio): Deprecate the hosts field and add it to DEPRECATED.md + // [#comment:TODO(dio): Deprecate the hosts field and add it to :ref:`deprecated log` // once load_assignment is implemented.] // // .. attention:: @@ -235,7 +248,7 @@ message Cluster { // for upstream connections. The key should match the extension filter name, such as // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on // specific options. - map extension_protocol_options = 35 [deprecated = true]; + map extension_protocol_options = 35; // The extension_protocol_options field is used to provide extension-specific protocol options // for upstream connections. The key should match the extension filter name, such as @@ -385,6 +398,14 @@ message Cluster { // going to an individual locality if said locality is disproportionally affected by the // subset predicate. bool scale_locality_weight = 5; + + // If true, when a fallback policy is configured and its corresponding subset fails to find + // a host this will cause any host to be selected instead. + // + // This is useful when using the default subset as the fallback policy, given the default + // subset might become empty. With this option enabled, if that happens the LB will attempt + // to select a host from the entire cluster. + bool panic_mode_any = 6; } // Configuration for load balancing subsetting. @@ -400,24 +421,13 @@ message Cluster { // Specific configuration for the :ref:`RingHash` // load balancing policy. message RingHashLbConfig { - // Minimum hash ring size, i.e. total virtual nodes. A larger size - // will provide better request distribution since each host in the - // cluster will have more virtual nodes. Defaults to 1024. In the case - // that total number of hosts is greater than the minimum, each host will - // be allocated a single virtual node. This field is limited to 8M to bound - // resource use. + // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each + // provided host) the better the request distribution will reflect the desired weights. Defaults + // to 1024 entries, and limited to 8M entries. See also + // :ref:`maximum_ring_size`. google.protobuf.UInt64Value minimum_ring_size = 1 [(validate.rules).uint64.lte = 8388608]; - // [#not-implemented-hide:] Hide from docs. - message DeprecatedV1 { - // Defaults to false, meaning that `xxHash `_ - // is to hash hosts onto the ketama ring. - google.protobuf.BoolValue use_std_hash = 1; - } - - // Deprecated settings from v1 config. - // [#not-implemented-hide:] Hide from docs. - DeprecatedV1 deprecated_v1 = 2 [deprecated = true]; + reserved 2; // The hash function used to hash hosts onto the ketama ring. enum HashFunction { @@ -432,6 +442,11 @@ message Cluster { // The hash function used to hash hosts onto the ketama ring. The value defaults to // :ref:`XX_HASH`. HashFunction hash_function = 3 [(validate.rules).enum.defined_only = true]; + + // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered + // to further constrain resource use. See also + // :ref:`minimum_ring_size`. + google.protobuf.UInt64Value maximum_ring_size = 4 [(validate.rules).uint64.lte = 8388608]; } // Specific configuration for the @@ -511,6 +526,27 @@ message Cluster { // because merging those updates isn't currently safe. See // https://github.com/envoyproxy/envoy/pull/3941. google.protobuf.Duration update_merge_window = 4; + + // If set to true, Envoy will not consider new hosts when computing load balancing weights until + // they have been health checked for the first time. This will have no effect unless + // active health checking is also configured. + // + // Ignoring a host means that for any load balancing calculations that adjust weights based + // on the ratio of eligible hosts and total hosts (priority spillover, locality weighting and + // panic mode) Envoy will exclude these hosts in the denominator. + // + // For example, with hosts in two priorities P0 and P1, where P0 looks like + // {healthy, unhealthy (new), unhealthy (new)} + // and where P1 looks like + // {healthy, healthy} + // all traffic will still hit P0, as 1 / (3 - 2) = 1. + // + // Enabling this will allow scaling up the number of hosts for a given cluster without entering + // panic mode or triggering priority spillover, assuming the hosts pass the first health check. + // + // If panic mode is triggered, new hosts are still eligible for traffic; they simply do not + // contribute to the calculation when deciding whether panic mode is enabled or not. + bool ignore_new_hosts_until_first_hc = 5; } // Common configuration for all load balancer implementations. diff --git a/api/envoy/api/v2/cluster/BUILD b/api/envoy/api/v2/cluster/BUILD index a3b091dea5f28..ab34f59d0e4e7 100644 --- a/api/envoy/api/v2/cluster/BUILD +++ b/api/envoy/api/v2/cluster/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/api/v2/cluster/circuit_breaker.proto b/api/envoy/api/v2/cluster/circuit_breaker.proto index 50b20c08e5e7e..f219fa07b4feb 100644 --- a/api/envoy/api/v2/cluster/circuit_breaker.proto +++ b/api/envoy/api/v2/cluster/circuit_breaker.proto @@ -46,6 +46,18 @@ message CircuitBreakers { // The maximum number of parallel retries that Envoy will allow to the // upstream cluster. If not specified, the default is 3. google.protobuf.UInt32Value max_retries = 5; + + // If track_remaining is true, then stats will be published that expose + // the number of resources remaining until the circuit breakers open. If + // not specified, the default is false. + bool track_remaining = 6; + + // The maximum number of connection pools per cluster that Envoy will concurrently support at + // once. If not specified, the default is unlimited. Set this for clusters which create a + // large number of connection pools. See + // :ref:`Circuit Breaking ` for + // more details. + google.protobuf.UInt32Value max_connection_pools = 7; } // If multiple :ref:`Thresholds` diff --git a/api/envoy/api/v2/core/BUILD b/api/envoy/api/v2/core/BUILD index 45251aebb4ba5..b324a8ad01907 100644 --- a/api/envoy/api/v2/core/BUILD +++ b/api/envoy/api/v2/core/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") licenses(["notice"]) # Apache 2 @@ -54,13 +54,19 @@ api_proto_library_internal( visibility = [ ":friends", ], - deps = [":base"], + deps = [ + ":base", + "//envoy/type:range", + ], ) api_go_proto_library( name = "health_check", proto = ":health_check", - deps = [":base_go_proto"], + deps = [ + ":base_go_proto", + "//envoy/type:range_go_proto", + ], ) api_proto_library_internal( diff --git a/api/envoy/api/v2/core/address.proto b/api/envoy/api/v2/core/address.proto index 6e76f5b721901..3d597f56bec56 100644 --- a/api/envoy/api/v2/core/address.proto +++ b/api/envoy/api/v2/core/address.proto @@ -53,10 +53,11 @@ message SocketAddress { // named resolver is capable of named port resolution. string named_port = 4; } - // The name of the resolver. This must have been registered with Envoy. If this is - // empty, a context dependent default applies. If address is a hostname this - // should be set for resolution other than DNS. If the address is a concrete - // IP address, no resolution will occur. + // The name of the custom resolver. This must have been registered with Envoy. If + // this is empty, a context dependent default applies. If the address is a concrete + // IP address, no resolution will occur. If address is a hostname this + // should be set for resolution other than DNS. Specifying a custom resolver with + // *STRICT_DNS* or *LOGICAL_DNS* will generate an error at runtime. string resolver_name = 5; // When binding to an IPv6 address above, this enables `IPv4 compatibility diff --git a/api/envoy/api/v2/core/base.proto b/api/envoy/api/v2/core/base.proto index 94c6bfbe9389f..6b4e931cfa405 100644 --- a/api/envoy/api/v2/core/base.proto +++ b/api/envoy/api/v2/core/base.proto @@ -17,6 +17,7 @@ import "gogoproto/gogo.proto"; import "envoy/type/percent.proto"; option (gogoproto.equal_all) = true; +option (gogoproto.stable_marshaler_all) = true; // [#protodoc-title: Common types] @@ -164,6 +165,11 @@ message HeaderValueOption { google.protobuf.BoolValue append = 2; } +// Wrapper for a set of headers. +message HeaderMap { + repeated HeaderValue headers = 1; +} + // Data source consisting of either a file or an inline value. message DataSource { oneof specifier { @@ -192,7 +198,7 @@ message TransportSocket { // Implementation specific configuration which depends on the implementation being instantiated. // See the supported transport socket implementations for further documentation. oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; + google.protobuf.Struct config = 2; google.protobuf.Any typed_config = 3; } diff --git a/api/envoy/api/v2/core/config_source.proto b/api/envoy/api/v2/core/config_source.proto index 3be59c1886b5f..8b6014dcbf9d3 100644 --- a/api/envoy/api/v2/core/config_source.proto +++ b/api/envoy/api/v2/core/config_source.proto @@ -32,6 +32,13 @@ message ApiConfigSource { REST = 1; // gRPC v2 API. GRPC = 2; + // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} + // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state + // with every update, the xDS server only sends what has changed since the last update. + // + // DELTA_GRPC is not yet entirely implemented! Initially, only CDS is available. + // Do not use for other xDSes. TODO(fredlas) update/remove this warning when appropriate. + DELTA_GRPC = 3; } ApiType api_type = 1 [(validate.rules).enum.defined_only = true]; // Cluster names should be used only with REST. If > 1 @@ -104,4 +111,14 @@ message ConfigSource { // source in the bootstrap configuration is used. AggregatedConfigSource ads = 3; } + + // Optional initialization timeout. + // When this timeout is specified, Envoy will wait no longer than the specified time for first + // config response on this xDS subscription during the :ref:`initialization process + // `. After reaching the timeout, Envoy will move to the next + // initialization phase, even if the first config is not delivered yet. The timer is activated + // when the xDS API subscription starts, and is disarmed on first config update or on error. 0 + // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another + // timeout applies). Default 0. + google.protobuf.Duration initial_fetch_timeout = 4; } diff --git a/api/envoy/api/v2/core/grpc_service.proto b/api/envoy/api/v2/core/grpc_service.proto index 0ee84e3b1077d..404791e1b3a51 100644 --- a/api/envoy/api/v2/core/grpc_service.proto +++ b/api/envoy/api/v2/core/grpc_service.proto @@ -84,7 +84,7 @@ message GrpcService { message MetadataCredentialsFromPlugin { string name = 1; oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; + google.protobuf.Struct config = 2; google.protobuf.Any typed_config = 3; } diff --git a/api/envoy/api/v2/core/health_check.proto b/api/envoy/api/v2/core/health_check.proto index 74b0249834598..edbcef7b52de3 100644 --- a/api/envoy/api/v2/core/health_check.proto +++ b/api/envoy/api/v2/core/health_check.proto @@ -7,6 +7,7 @@ option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2.core"; import "envoy/api/v2/core/base.proto"; +import "envoy/type/range.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; @@ -43,6 +44,11 @@ message HealthCheck { (gogoproto.stdduration) = true ]; + // An optional jitter amount in milliseconds. If specified, Envoy will start health + // checking after for a random time in ms between 0 and initial_jitter. This only + // applies to the first health check. + google.protobuf.Duration initial_jitter = 20; + // An optional jitter amount in milliseconds. If specified, during every // interval Envoy will add interval_jitter to the wait time. google.protobuf.Duration interval_jitter = 3; @@ -84,7 +90,7 @@ message HealthCheck { } } - // [#comment:next free field: 9] + // [#comment:next free field: 10] message HttpHealthCheck { // The value of the host header in the HTTP health check request. If // left empty (default value), the name of the cluster this health check is associated @@ -119,6 +125,11 @@ message HealthCheck { // If set, health checks will be made using http/2. bool use_http2 = 7; + + // Specifies a list of HTTP response statuses considered healthy. If provided, replaces default + // 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open + // semantics of :ref:`Int64Range `. + repeated envoy.type.Int64Range expected_statuses = 9; } message TcpHealthCheck { @@ -165,7 +176,7 @@ message HealthCheck { // A custom health checker specific configuration which depends on the custom health checker // being instantiated. See :api:`envoy/config/health_checker` for reference. oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; + google.protobuf.Struct config = 2; google.protobuf.Any typed_config = 3; } diff --git a/api/envoy/api/v2/core/protocol.proto b/api/envoy/api/v2/core/protocol.proto index 1f5027e7e8b8b..200b8517abd15 100644 --- a/api/envoy/api/v2/core/protocol.proto +++ b/api/envoy/api/v2/core/protocol.proto @@ -50,20 +50,20 @@ message Http1ProtocolOptions { } message Http2ProtocolOptions { - // `Maximum table size `_ + // `Maximum table size `_ // (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values // range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively disables header // compression. google.protobuf.UInt32Value hpack_table_size = 1; - // `Maximum concurrent streams `_ + // `Maximum concurrent streams `_ // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1) // and defaults to 2147483647. google.protobuf.UInt32Value max_concurrent_streams = 2 [(validate.rules).uint32 = {gte: 1, lte: 2147483647}]; // `Initial stream-level flow-control window - // `_ size. Valid values range from 65535 + // `_ size. Valid values range from 65535 // (2^16 - 1, HTTP/2 default) to 2147483647 (2^31 - 1, HTTP/2 maximum) and defaults to 268435456 // (256 * 1024 * 1024). // diff --git a/api/envoy/api/v2/discovery.proto b/api/envoy/api/v2/discovery.proto index ff04dc20f8b41..8743cc150ee2c 100644 --- a/api/envoy/api/v2/discovery.proto +++ b/api/envoy/api/v2/discovery.proto @@ -14,6 +14,7 @@ import "google/rpc/status.proto"; import "gogoproto/gogo.proto"; option (gogoproto.equal_all) = true; +option (gogoproto.stable_marshaler_all) = true; // [#protodoc-title: Common discovery API components] @@ -102,33 +103,32 @@ message DiscoveryResponse { core.ControlPlane control_plane = 6; } -// IncrementalDiscoveryRequest and IncrementalDiscoveryResponse are used in a -// new gRPC endpoint for Incremental xDS. The feature is not supported for REST -// management servers. +// DeltaDiscoveryRequest and DeltaDiscoveryResponse are used in a new gRPC +// endpoint for Delta xDS. // -// With Incremental xDS, the IncrementalDiscoveryResponses do not need to -// include a full snapshot of the tracked resources. Instead -// IncrementalDiscoveryResponses are a diff to the state of a xDS client. -// In Incremental XDS there are per resource versions which allows to track -// state at the resource granularity. -// An xDS Incremental session is always in the context of a gRPC bidirectional +// With Delta xDS, the DeltaDiscoveryResponses do not need to include a full +// snapshot of the tracked resources. Instead, DeltaDiscoveryResponses are a +// diff to the state of a xDS client. +// In Delta XDS there are per resource versions, which allow tracking state at +// the resource granularity. +// An xDS Delta session is always in the context of a gRPC bidirectional // stream. This allows the xDS server to keep track of the state of xDS clients // connected to it. // -// In Incremental xDS the nonce field is required and used to pair -// IncrementalDiscoveryResponse to a IncrementalDiscoveryRequest ACK or NACK. +// In Delta xDS the nonce field is required and used to pair +// DeltaDiscoveryResponse to a DeltaDiscoveryRequest ACK or NACK. // Optionally, a response message level system_version_info is present for // debugging purposes only. // -// IncrementalDiscoveryRequest can be sent in 3 situations: +// DeltaDiscoveryRequest can be sent in 3 situations: // 1. Initial message in a xDS bidirectional gRPC stream. -// 2. As a ACK or NACK response to a previous IncrementalDiscoveryResponse. +// 2. As a ACK or NACK response to a previous DeltaDiscoveryResponse. // In this case the response_nonce is set to the nonce value in the Response. // ACK or NACK is determined by the absence or presence of error_detail. -// 3. Spontaneous IncrementalDiscoveryRequest from the client. +// 3. Spontaneous DeltaDiscoveryRequest from the client. // This can be done to dynamically add or remove elements from the tracked // resource_names set. In this case response_nonce must be omitted. -message IncrementalDiscoveryRequest { +message DeltaDiscoveryRequest { // The node making the request. core.Node node = 1; @@ -138,35 +138,42 @@ message IncrementalDiscoveryRequest { // required for ADS. string type_url = 2; - // IncrementalDiscoveryRequests allow the client to add or remove individual + // DeltaDiscoveryRequests allow the client to add or remove individual // resources to the set of tracked resources in the context of a stream. // All resource names in the resource_names_subscribe list are added to the // set of tracked resources and all resource names in the resource_names_unsubscribe // list are removed from the set of tracked resources. - // Unlike in non incremental xDS, an empty resource_names_subscribe or + // Unlike in state-of-the-world xDS, an empty resource_names_subscribe or // resource_names_unsubscribe list simply means that no resources are to be // added or removed to the resource list. // The xDS server must send updates for all tracked resources but can also // send updates for resources the client has not subscribed to. This behavior - // is similar to non incremental xDS. - // These two fields can be set for all types of IncrementalDiscoveryRequests + // is similar to state-of-the-world xDS. + // These two fields can be set for all types of DeltaDiscoveryRequests // (initial, ACK/NACK or spontaneous). // + // NOTE: the server must respond with all resources listed in resource_names_subscribe, + // even if it believes the client has the most recent version of them. The reason: + // the client may have dropped them, but then regained interest before it had a chance + // to send the unsubscribe message. See DeltaSubscriptionStateTest.RemoveThenAdd. + // // A list of Resource names to add to the list of tracked resources. repeated string resource_names_subscribe = 3; // A list of Resource names to remove from the list of tracked resources. repeated string resource_names_unsubscribe = 4; - // This map must be populated when the IncrementalDiscoveryRequest is the - // first in a stream. The keys are the resources names of the xDS resources + // This map must be populated when the DeltaDiscoveryRequest is the + // first in a stream (assuming there are any resources - this field's purpose is to enable + // a session to continue in a reconnected gRPC stream, and so will not be used in the very + // first stream of a session). The keys are the resources names of the xDS resources // known to the xDS client. The values in the map are the associated resource // level version info. map initial_resource_versions = 5; - // When the IncrementalDiscoveryRequest is a ACK or NACK message in response - // to a previous IncrementalDiscoveryResponse, the response_nonce must be the - // nonce in the IncrementalDiscoveryResponse. + // When the DeltaDiscoveryRequest is a ACK or NACK message in response + // to a previous DeltaDiscoveryResponse, the response_nonce must be the + // nonce in the DeltaDiscoveryResponse. // Otherwise response_nonce must be omitted. string response_nonce = 6; @@ -176,24 +183,31 @@ message IncrementalDiscoveryRequest { google.rpc.Status error_detail = 7; } -message IncrementalDiscoveryResponse { +message DeltaDiscoveryResponse { // The version of the response data (used for debugging). string system_version_info = 1; // The response resources. These are typed resources that match the type url - // in the IncrementalDiscoveryRequest. + // in the DeltaDiscoveryRequest. repeated Resource resources = 2 [(gogoproto.nullable) = false]; // Resources names of resources that have be deleted and to be removed from the xDS Client. // Removed resources for missing resources can be ignored. repeated string removed_resources = 6; - // The nonce provides a way for IncrementalDiscoveryRequests to uniquely - // reference a IncrementalDiscoveryResponse. The nonce is required. + // The nonce provides a way for DeltaDiscoveryRequests to uniquely + // reference a DeltaDiscoveryResponse. The nonce is required. string nonce = 5; } message Resource { + // The resource's name, to distinguish it from others of the same type of resource. + string name = 3; + + // [#not-implemented-hide:] + // The aliases are a list of other names that this resource can go by. + repeated string aliases = 4; + // The resource level version. It allows xDS to track the state of individual // resources. string version = 1; diff --git a/api/envoy/api/v2/eds.proto b/api/envoy/api/v2/eds.proto index fcef8c5f95fc9..2f8fd7a4186dd 100644 --- a/api/envoy/api/v2/eds.proto +++ b/api/envoy/api/v2/eds.proto @@ -17,8 +17,10 @@ import "google/api/annotations.proto"; import "validate/validate.proto"; import "gogoproto/gogo.proto"; import "google/protobuf/wrappers.proto"; +import "google/protobuf/duration.proto"; option (gogoproto.equal_all) = true; +option (gogoproto.stable_marshaler_all) = true; // [#protodoc-title: EDS] // Endpoint discovery :ref:`architecture overview ` @@ -42,9 +44,10 @@ service EndpointDiscoveryService { // // With EDS, each cluster is treated independently from a LB perspective, with // LB taking place between the Localities within a cluster and at a finer -// granularity between the hosts within a locality. For a given cluster, the -// effective weight of a host is its load_balancing_weight multiplied by the -// load_balancing_weight of its Locality. +// granularity between the hosts within a locality. The percentage of traffic +// for each endpoint is determined by both its load_balancing_weight, and the +// load_balancing_weight of its locality. First, a locality will be selected, +// then an endpoint within that locality will be chose based on its weight. message ClusterLoadAssignment { // Name of the cluster. This will be the :ref:`service_name // ` value if specified @@ -105,6 +108,12 @@ message ClusterLoadAssignment { // Read more at :ref:`priority levels ` and // :ref:`localities `. google.protobuf.UInt32Value overprovisioning_factor = 3 [(validate.rules).uint32.gt = 0]; + + // The max time until which the endpoints from this assignment can be used. + // If no new assignments are received before this time expires the endpoints + // are considered stale and should be marked unhealthy. + // Defaults to 0 which means endpoints never go stale. + google.protobuf.Duration endpoint_stale_after = 4 [(validate.rules).duration.gt.seconds = 0]; } // Load balancing policy settings. diff --git a/api/envoy/api/v2/endpoint/BUILD b/api/envoy/api/v2/endpoint/BUILD index 0d7c38b680a09..0dead0f570339 100644 --- a/api/envoy/api/v2/endpoint/BUILD +++ b/api/envoy/api/v2/endpoint/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/api/v2/endpoint/endpoint.proto b/api/envoy/api/v2/endpoint/endpoint.proto index 1d5d07f54d422..28136c2b867f9 100644 --- a/api/envoy/api/v2/endpoint/endpoint.proto +++ b/api/envoy/api/v2/endpoint/endpoint.proto @@ -132,4 +132,12 @@ message LocalityLbEndpoints { // // Priorities should range from 0 (highest) to N (lowest) without skipping. uint32 priority = 5 [(validate.rules).uint32 = {lte: 128}]; + + // Optional: Per locality proximity value which indicates how close this + // locality is from the source locality. This value only provides ordering + // information (lower the value, closer it is to the source locality). + // This will be consumed by load balancing schemes that need proximity order + // to determine where to route the requests. + // [#not-implemented-hide:] + google.protobuf.UInt32Value proximity = 6; } diff --git a/api/envoy/api/v2/endpoint/load_report.proto b/api/envoy/api/v2/endpoint/load_report.proto index 58580d4203913..df3fd6071e4f7 100644 --- a/api/envoy/api/v2/endpoint/load_report.proto +++ b/api/envoy/api/v2/endpoint/load_report.proto @@ -25,15 +25,6 @@ message UpstreamLocalityStats { // collected from. Zone and region names could be empty if unknown. core.Locality locality = 1; - // The total number of requests sent by this Envoy since the last report. This - // information is aggregated over all the upstream Endpoints. total_requests - // can be inferred from: - // - // .. code-block:: none - // - // total_requests = total_successful_requests + total_requests_in_progress + - // total_error_requests - // // The total number of requests successfully completed by the endpoints in the // locality. uint64 total_successful_requests = 2; @@ -45,6 +36,11 @@ message UpstreamLocalityStats { // aggregated over all endpoints in the locality. uint64 total_error_requests = 4; + // The total number of requests that were issued by this Envoy since + // the last report. This information is aggregated over all the + // upstream endpoints in the locality. + uint64 total_issued_requests = 8; + // Stats for multi-dimensional load balancing. repeated EndpointLoadMetricStats load_metric_stats = 5; @@ -66,16 +62,6 @@ message UpstreamEndpointStats { // endpoint. Envoy will pass this directly to the management server. google.protobuf.Struct metadata = 6; - // The total number of requests successfully completed by the endpoint. A - // single HTTP or gRPC request or stream is counted as one request. A TCP - // connection is also treated as one request. There is no explicit - // total_requests field below for an endpoint, but it may be inferred from: - // - // .. code-block:: none - // - // total_requests = total_successful_requests + total_requests_in_progress + - // total_error_requests - // // The total number of requests successfully completed by the endpoints in the // locality. These include non-5xx responses for HTTP, where errors // originate at the client and the endpoint responded successfully. For gRPC, @@ -97,6 +83,11 @@ message UpstreamEndpointStats { // - DataLoss uint64 total_error_requests = 4; + // The total number of requests that were issued to this endpoint + // since the last report. A single TCP connection, HTTP or gRPC + // request or stream is counted as one request. + uint64 total_issued_requests = 7; + // Stats for multi-dimensional load balancing. repeated EndpointLoadMetricStats load_metric_stats = 5; } @@ -117,23 +108,23 @@ message EndpointLoadMetricStats { // Per cluster load stats. Envoy reports these stats a management server in a // :ref:`LoadStatsRequest` // [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. +// Next ID: 7 message ClusterStats { // The name of the cluster. string cluster_name = 1 [(validate.rules).string.min_bytes = 1]; + // The eds_cluster_config service_name of the cluster. + // It's possible that two clusters send the same service_name to EDS, + // in that case, the management server is supposed to do aggregation on the load reports. + string cluster_service_name = 6; + // Need at least one. repeated UpstreamLocalityStats upstream_locality_stats = 2 [(validate.rules).repeated .min_items = 1]; // Cluster-level stats such as total_successful_requests may be computed by // summing upstream_locality_stats. In addition, below there are additional - // cluster-wide stats. The following total_requests equality holds at the - // cluster-level: - // - // .. code-block:: none - // - // sum_locality(total_successful_requests) + sum_locality(total_requests_in_progress) + - // sum_locality(total_error_requests) + total_dropped_requests` + // cluster-wide stats. // // The total number of dropped requests. This covers requests // deliberately dropped by the drop_overload policy and circuit breaking. diff --git a/api/envoy/api/v2/lds.proto b/api/envoy/api/v2/lds.proto index 5b2f8bbc1b976..2ecfce3f50103 100644 --- a/api/envoy/api/v2/lds.proto +++ b/api/envoy/api/v2/lds.proto @@ -46,9 +46,6 @@ message Listener { // The unique name by which this listener is known. If no name is provided, // Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically // updated or removed via :ref:`LDS ` a unique name must be provided. - // By default, the maximum length of a listener's name is limited to 60 characters. This limit can - // be increased by setting the :option:`--max-obj-name-len` command line argument to the desired - // value. string name = 1; // The address that the listener should listen on. In general, the address must be unique, though @@ -177,11 +174,5 @@ message Listener { // To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter. google.protobuf.UInt32Value tcp_fast_open_queue_length = 12; - // If true, the order of write filters will be reversed to that of filters - // configured in the filter chain. Otherwise, it will keep the existing - // order. Note: this is a bug fix for Envoy, which is designed to have the - // reversed order of write filters to that of read ones, (see - // https://github.com/envoyproxy/envoy/issues/4599 for details). When we - // remove this field, Envoy will have the same behavior when it sets true. - google.protobuf.BoolValue bugfix_reverse_write_filter_order = 14 [deprecated = true]; + reserved 14; } diff --git a/api/envoy/api/v2/listener/BUILD b/api/envoy/api/v2/listener/BUILD index bfa6a1407107f..9eb0c0ec982ff 100644 --- a/api/envoy/api/v2/listener/BUILD +++ b/api/envoy/api/v2/listener/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/api/v2/listener/listener.proto b/api/envoy/api/v2/listener/listener.proto index 96b7668af2560..b43bcd8d509d4 100644 --- a/api/envoy/api/v2/listener/listener.proto +++ b/api/envoy/api/v2/listener/listener.proto @@ -41,7 +41,7 @@ message Filter { // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; + google.protobuf.Struct config = 2; google.protobuf.Any typed_config = 4; } @@ -210,7 +210,7 @@ message ListenerFilter { // Filter specific configuration which depends on the filter being instantiated. // See the supported filters for further documentation. oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; + google.protobuf.Struct config = 2; google.protobuf.Any typed_config = 3; } diff --git a/api/envoy/api/v2/ratelimit/BUILD b/api/envoy/api/v2/ratelimit/BUILD index 6e640b04986c6..5f2a9201463d1 100644 --- a/api/envoy/api/v2/ratelimit/BUILD +++ b/api/envoy/api/v2/ratelimit/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/api/v2/rds.proto b/api/envoy/api/v2/rds.proto index 8d41b384ba9bd..18147b68174d3 100644 --- a/api/envoy/api/v2/rds.proto +++ b/api/envoy/api/v2/rds.proto @@ -9,6 +9,7 @@ option java_package = "io.envoyproxy.envoy.api.v2"; option java_generic_services = true; import "envoy/api/v2/core/base.proto"; +import "envoy/api/v2/core/config_source.proto"; import "envoy/api/v2/discovery.proto"; import "envoy/api/v2/route/route.proto"; @@ -33,8 +34,7 @@ service RouteDiscoveryService { rpc StreamRoutes(stream DiscoveryRequest) returns (stream DiscoveryResponse) { } - rpc IncrementalRoutes(stream IncrementalDiscoveryRequest) - returns (stream IncrementalDiscoveryResponse) { + rpc DeltaRoutes(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { } rpc FetchRoutes(DiscoveryRequest) returns (DiscoveryResponse) { @@ -45,7 +45,23 @@ service RouteDiscoveryService { } } -// [#comment:next free field: 9] +// Virtual Host Discovery Service (VHDS) is used to dynamically update the list of virtual hosts for +// a given RouteConfiguration. If VHDS is configured a virtual host list update will be triggered +// during the processing of an HTTP request if a route for the request cannot be resolved. The +// :ref:`resource_names_subscribe ` +// field contains a list of virtual host names or aliases to track. The contents of an alias would +// be the contents of a *host* or *authority* header used to make an http request. An xDS server +// will match an alias to a virtual host based on the content of :ref:`domains' +// ` field. The *resource_names_unsubscribe* field contains +// a list of virtual host names that have been `unsubscribed +// `_ +// from the routing table associated with the RouteConfiguration. +service VirtualHostDiscoveryService { + rpc DeltaVirtualHosts(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { + } +} + +// [#comment:next free field: 10] message RouteConfiguration { // The name of the route configuration. For example, it might match // :ref:`route_config_name @@ -56,6 +72,15 @@ message RouteConfiguration { // An array of virtual hosts that make up the route table. repeated route.VirtualHost virtual_hosts = 2 [(gogoproto.nullable) = false]; + // An array of virtual hosts will be dynamically loaded via the VHDS API. + // Both *virtual_hosts* and *vhds* fields will be used when present. *virtual_hosts* can be used + // for a base routing table or for infrequently changing virtual hosts. *vhds* is used for + // on-demand discovery of virtual hosts. The contents of these two fields will be merged to + // generate a routing table for a given RouteConfiguration, with *vhds* derived configuration + // taking precedence. + // [#not-implemented-hide:] + Vhds vhds = 9; + // Optionally specifies a list of HTTP headers that the connection manager // will consider to be internal only. If they are found on external requests they will be cleaned // prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more @@ -103,3 +128,10 @@ message RouteConfiguration { // using CDS with a static route table). google.protobuf.BoolValue validate_clusters = 7; } + +// [#not-implemented-hide:] +message Vhds { + // Configuration source specifier for VHDS. + envoy.api.v2.core.ConfigSource config_source = 1 + [(validate.rules).message.required = true, (gogoproto.nullable) = false]; +} \ No newline at end of file diff --git a/api/envoy/api/v2/route/BUILD b/api/envoy/api/v2/route/BUILD index 5bc60102532e4..2fec56ae389b4 100644 --- a/api/envoy/api/v2/route/BUILD +++ b/api/envoy/api/v2/route/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") licenses(["notice"]) # Apache 2 @@ -8,6 +8,7 @@ api_proto_library_internal( visibility = ["//envoy/api/v2:friends"], deps = [ "//envoy/api/v2/core:base", + "//envoy/type:percent", "//envoy/type:range", ], ) @@ -17,6 +18,7 @@ api_go_proto_library( proto = ":route", deps = [ "//envoy/api/v2/core:base_go_proto", + "//envoy/type:percent_go_proto", "//envoy/type:range_go_proto", ], ) diff --git a/api/envoy/api/v2/route/route.proto b/api/envoy/api/v2/route/route.proto index f752ad14e26e1..10ba8f6b4b7b4 100644 --- a/api/envoy/api/v2/route/route.proto +++ b/api/envoy/api/v2/route/route.proto @@ -9,6 +9,7 @@ option go_package = "route"; option java_generic_services = true; import "envoy/api/v2/core/base.proto"; +import "envoy/type/percent.proto"; import "envoy/type/range.proto"; import "google/protobuf/any.proto"; @@ -20,6 +21,7 @@ import "validate/validate.proto"; import "gogoproto/gogo.proto"; option (gogoproto.equal_all) = true; +option (gogoproto.stable_marshaler_all) = true; // [#protodoc-title: HTTP route] // * Routing :ref:`architecture overview ` @@ -37,17 +39,21 @@ message VirtualHost { string name = 1 [(validate.rules).string.min_bytes = 1]; // A list of domains (host/authority header) that will be matched to this - // virtual host. Wildcard hosts are supported in the form of ``*.foo.com`` or - // ``*-bar.foo.com``. + // virtual host. Wildcard hosts are supported in the suffix or prefix form. + // + // Domain search order: + // 1. Exact domain names: ``www.foo.com``. + // 2. Suffix domain wildcards: ``*.foo.com`` or ``*-bar.foo.com``. + // 3. Prefix domain wildcards: ``foo.*`` or ``foo-*``. + // 4. Special wildcard ``*`` matching any domain. // // .. note:: // // The wildcard will not match the empty string. // e.g. ``*-bar.foo.com`` will match ``baz-bar.foo.com`` but not ``-bar.foo.com``. - // Additionally, a special entry ``*`` is allowed which will match any - // host/authority header. Only a single virtual host in the entire route - // configuration can match on ``*``. A domain must be unique across all virtual - // hosts or the config will fail to load. + // The longest wildcards match first. + // Only a single virtual host in the entire route configuration can match on ``*``. A domain + // must be unique across all virtual hosts or the config will fail to load. repeated string domains = 2 [(validate.rules).repeated .min_items = 1]; // The list of routes that will be matched, in order, for incoming requests. @@ -115,7 +121,7 @@ message VirtualHost { // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter // specific; see the :ref:`HTTP filter documentation ` // for if and how it is utilized. - map per_filter_config = 12 [deprecated = true]; + map per_filter_config = 12; // The per_filter_config field can be used to provide virtual host-specific // configurations for filters. The key should match the filter name, such as @@ -138,6 +144,12 @@ message VirtualHost { // route level entry will take precedence over this config and it'll be treated // independently (e.g.: values are not inherited). RetryPolicy retry_policy = 16; + + // Indicates the hedge policy for all routes in this virtual host. Note that setting a + // route level entry will take precedence over this config and it'll be treated + // independently (e.g.: values are not inherited). + // [#not-implemented-hide:] + HedgePolicy hedge_policy = 17; } // A route is both a specification of how to match a request as well as an indication of what to do @@ -182,7 +194,7 @@ message Route { // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter // specific; see the :ref:`HTTP filter documentation ` for // if and how it is utilized. - map per_filter_config = 8 [deprecated = true]; + map per_filter_config = 8; // The per_filter_config field can be used to provide route-specific // configurations for filters. The key should match the filter name, such as @@ -279,7 +291,7 @@ message WeightedCluster { // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter // specific; see the :ref:`HTTP filter documentation ` // for if and how it is utilized. - map per_filter_config = 8 [deprecated = true]; + map per_filter_config = 8; // The per_filter_config field can be used to provide weighted cluster-specific // configurations for filters. The key should match the filter name, such as @@ -323,7 +335,7 @@ message RouteMatch { // regex must match the *:path* header once the query string is removed. The entire path // (without the query string) must match the regex. The rule will not match if only a // subsequence of the *:path* header matches the regex. The regex grammar is defined `here - // `_. + // `_. // // Examples: // @@ -562,8 +574,7 @@ message RouteAction { // fires, the stream is terminated with a 408 Request Timeout error code if no // upstream response header has been received, otherwise a stream reset // occurs. - google.protobuf.Duration idle_timeout = 24 - [(validate.rules).duration.gt = {}, (gogoproto.stdduration) = true]; + google.protobuf.Duration idle_timeout = 24 [(gogoproto.stdduration) = true]; // Indicates that the route has a retry policy. Note that if this is set, // it'll take precedence over the virtual host level retry policy entirely @@ -627,14 +638,9 @@ message RouteAction { // https://github.com/lyft/protoc-gen-validate/issues/42 is resolved.] core.RoutingPriority priority = 11; - // [#not-implemented-hide:] - repeated core.HeaderValueOption request_headers_to_add = 12 [deprecated = true]; - - // [#not-implemented-hide:] - repeated core.HeaderValueOption response_headers_to_add = 18 [deprecated = true]; - - // [#not-implemented-hide:] - repeated string response_headers_to_remove = 19 [deprecated = true]; + reserved 12; + reserved 18; + reserved 19; // Specifies a set of rate limit configurations that could be applied to the // route. @@ -759,6 +765,15 @@ message RouteAction { // time gaps between gRPC request and response in gRPC streaming mode. google.protobuf.Duration max_grpc_timeout = 23 [(gogoproto.stdduration) = true]; + // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting + // the provided duration from the header. This is useful in allowing Envoy to set its global + // timeout to be less than that of the deadline imposed by the calling client, which makes it more + // likely that Envoy will handle the timeout instead of having the call canceled by the client. + // The offset will only be applied if the provided grpc_timeout is greater than the offset. This + // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning + // infinity). + google.protobuf.Duration grpc_timeout_offset = 28 [(gogoproto.stdduration) = true]; + // Allows enabling and disabling upgrades on a per-route basis. // This overrides any enabled/disabled upgrade filter chain specified in the // HttpConnectionManager @@ -781,9 +796,16 @@ message RouteAction { HANDLE_INTERNAL_REDIRECT = 1; } InternalRedirectAction internal_redirect_action = 26; + + // Indicates that the route has a hedge policy. Note that if this is set, + // it'll take precedence over the virtual host level hedge policy entirely + // (e.g.: policies are not merged, most internal one becomes the enforced policy). + // [#not-implemented-hide:] + HedgePolicy hedge_policy = 27; } // HTTP retry :ref:`architecture overview `. +// [#comment:next free field: 9] message RetryPolicy { // Specifies the conditions under which retry takes place. These are the same // conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and @@ -811,7 +833,7 @@ message RetryPolicy { message RetryPriority { string name = 1 [(validate.rules).string.min_bytes = 1]; oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; + google.protobuf.Struct config = 2; google.protobuf.Any typed_config = 3; } @@ -825,7 +847,7 @@ message RetryPolicy { message RetryHostPredicate { string name = 1 [(validate.rules).string.min_bytes = 1]; oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; + google.protobuf.Struct config = 2; google.protobuf.Any typed_config = 3; } @@ -844,6 +866,53 @@ message RetryPolicy { // HTTP status codes that should trigger a retry in addition to those specified by retry_on. repeated uint32 retriable_status_codes = 7; + + message RetryBackOff { + // Specifies the base interval between retries. This parameter is required and must be greater + // than zero. Values less than 1 ms are rounded up to 1 ms. + // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's + // back-off algorithm. + google.protobuf.Duration base_interval = 1 [ + (validate.rules).duration = { + required: true, + gt: {seconds: 0} + }, + (gogoproto.stdduration) = true + ]; + + // Specifies the maximum interval between retries. This parameter is optional, but must be + // greater than or equal to the `base_interval` if set. The default is 10 times the + // `base_interval`. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion + // of Envoy's back-off algorithm. + google.protobuf.Duration max_interval = 2 + [(validate.rules).duration.gt = {seconds: 0}, (gogoproto.stdduration) = true]; + } + + // Specifies parameters that control retry back off. This parameter is optional, in which case the + // default base interval is 25 milliseconds or, if set, the current value of the + // `upstream.base_retry_backoff_ms` runtime parameter. The default maximum interval is 10 times + // the base interval. The documentation for :ref:`config_http_filters_router_x-envoy-max-retries` + // describes Envoy's back-off algorithm. + RetryBackOff retry_back_off = 8; +} + +// HTTP request hedging TODO(mpuncel) docs +// [#not-implemented-hide:] +message HedgePolicy { + // Specifies the number of initial requests that should be sent upstream. + // Must be at least 1. + // Defaults to 1. + google.protobuf.UInt32Value initial_requests = 1 [(validate.rules).uint32.gte = 1]; + + // Specifies a probability that an additional upstream request should be sent + // on top of what is specified by initial_requests. + // Defaults to 0. + envoy.type.FractionalPercent additional_request_chance = 2; + + // Indicates that a hedged request should be sent when the per-try timeout + // is hit. This will only occur if the retry policy also indicates that a + // timed out request should be retried. Defaults to false. + bool hedge_on_per_try_timeout = 3; } message RedirectAction { @@ -951,7 +1020,7 @@ message Decorator { message VirtualCluster { // Specifies a regex pattern to use for matching requests. The entire path of the request // must match the regex. The regex grammar used is defined `here - // `_. + // `_. // // Examples: // @@ -1147,7 +1216,7 @@ message HeaderMatcher { // If specified, this regex string is a regular expression rule which implies the entire request // header value must match the regex. The rule will not match if only a subsequence of the // request header value matches the regex. The regex grammar used in the value field is defined - // `here `_. + // `here `_. // // Examples: // @@ -1169,7 +1238,7 @@ message HeaderMatcher { // "-1somestring" envoy.type.Int64Range range_match = 6; - // If specified, header match will be be performed based on whether the header is in the + // If specified, header match will be performed based on whether the header is in the // request. bool present_match = 7; diff --git a/api/envoy/api/v2/srds.proto b/api/envoy/api/v2/srds.proto new file mode 100644 index 0000000000000..9038cb1e32574 --- /dev/null +++ b/api/envoy/api/v2/srds.proto @@ -0,0 +1,142 @@ +syntax = "proto3"; + +package envoy.api.v2; + +option java_outer_classname = "SrdsProto"; +option java_package = "io.envoyproxy.envoy.api.v2"; +option java_multiple_files = true; +option java_generic_services = true; + +import "envoy/api/v2/discovery.proto"; + +import "google/api/annotations.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: HTTP scoped routing configuration] +// * Routing :ref:`architecture overview ` +// +// .. attention:: +// +// The Scoped RDS API is not yet fully implemented and *should not* be enabled in +// :ref:`envoy_api_msg_config.filter.network.http_connection_manager.v2.HttpConnectionManager`. +// +// TODO(AndresGuedez): Update :ref:`arch_overview_http_routing` with scoped routing overview and +// configuration details. + +// The Scoped Routes Discovery Service (SRDS) API distributes +// :ref:`ScopedRouteConfiguration` resources. Each +// ScopedRouteConfiguration resource represents a "routing scope" containing a mapping that allows +// the HTTP connection manager to dynamically assign a routing table (specified via +// a :ref:`RouteConfiguration` message) to each HTTP request. +// [#proto-status: experimental] +service ScopedRoutesDiscoveryService { + rpc StreamScopedRoutes(stream DiscoveryRequest) returns (stream DiscoveryResponse) { + } + + rpc DeltaScopedRoutes(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { + } + + rpc FetchScopedRoutes(DiscoveryRequest) returns (DiscoveryResponse) { + option (google.api.http) = { + post: "/v2/discovery:scoped-routes" + body: "*" + }; + } +} + +// Specifies a routing scope, which associates a +// :ref:`Key` to a +// :ref:`envoy_api_msg_RouteConfiguration` (identified by its resource name). +// +// The HTTP connection manager builds up a table consisting of these Key to RouteConfiguration +// mappings, and looks up the RouteConfiguration to use per request according to the algorithm +// specified in the +// :ref:`scope_key_builder` +// assigned to the HttpConnectionManager. +// +// For example, with the following configurations (in YAML): +// +// HttpConnectionManager config: +// +// .. code:: +// +// ... +// scoped_routes: +// name: foo-scoped-routes +// scope_key_builder: +// fragments: +// - header_value_extractor: +// name: X-Route-Selector +// element_separator: , +// element: +// separator: = +// key: vip +// +// ScopedRouteConfiguration resources (specified statically via +// :ref:`scoped_route_configurations_list` +// or obtained dynamically via SRDS): +// +// .. code:: +// +// (1) +// name: route-scope1 +// route_configuration_name: route-config1 +// key: +// fragments: +// - string_key: 172.10.10.20 +// +// (2) +// name: route-scope2 +// route_configuration_name: route-config2 +// key: +// fragments: +// - string_key: 172.20.20.30 +// +// A request from a client such as: +// +// .. code:: +// +// GET / HTTP/1.1 +// Host: foo.com +// X-Route-Selector: vip=172.10.10.20 +// +// would result in the routing table defined by the `route-config1` RouteConfiguration being +// assigned to the HTTP request/stream. +// +// [#comment:next free field: 4] +// [#proto-status: experimental] +message ScopedRouteConfiguration { + // The name assigned to the routing scope. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // Specifies a key which is matched against the output of the + // :ref:`scope_key_builder` + // specified in the HttpConnectionManager. The matching is done per HTTP request and is dependent + // on the order of the fragments contained in the Key. + message Key { + message Fragment { + oneof type { + option (validate.required) = true; + + // A string to match against. + string string_key = 1; + } + } + + // The ordered set of fragments to match against. The order must match the fragments in the + // corresponding + // :ref:`scope_key_builder`. + repeated Fragment fragments = 1 [(validate.rules).repeated .min_items = 1]; + } + + // The resource name to use for a :ref:`envoy_api_msg_DiscoveryRequest` to an RDS server to + // fetch the :ref:`envoy_api_msg_RouteConfiguration` associated with this scope. + string route_configuration_name = 2 [(validate.rules).string.min_bytes = 1]; + + // The key to match against. + Key key = 3 [(validate.rules).message.required = true]; +} diff --git a/api/envoy/config/accesslog/v2/BUILD b/api/envoy/config/accesslog/v2/BUILD index ba599a4f93d74..d152681ec7e94 100644 --- a/api/envoy/config/accesslog/v2/BUILD +++ b/api/envoy/config/accesslog/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/bootstrap/v2/BUILD b/api/envoy/config/bootstrap/v2/BUILD index 2a9c9b4b5d42f..b6b8c84779602 100644 --- a/api/envoy/config/bootstrap/v2/BUILD +++ b/api/envoy/config/bootstrap/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/bootstrap/v2/bootstrap.proto b/api/envoy/config/bootstrap/v2/bootstrap.proto index 51a773dccda5f..4b6d51d0e55d3 100644 --- a/api/envoy/config/bootstrap/v2/bootstrap.proto +++ b/api/envoy/config/bootstrap/v2/bootstrap.proto @@ -21,10 +21,10 @@ import "envoy/api/v2/lds.proto"; import "envoy/config/trace/v2/trace.proto"; import "envoy/config/metrics/v2/stats.proto"; import "envoy/config/overload/v2alpha/overload.proto"; -import "envoy/config/ratelimit/v2/rls.proto"; import "envoy/config/wasm/v2/wasm.proto"; import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; import "validate/validate.proto"; import "gogoproto/gogo.proto"; @@ -109,10 +109,7 @@ message Bootstrap { // tracing will be performed. envoy.config.trace.v2.Tracing tracing = 9; - // Configuration for an external rate limit service provider. If not - // specified, any calls to the rate limit service will immediately return - // success. - envoy.config.ratelimit.v2.RateLimitServiceConfig rate_limit_service = 10 [deprecated = true]; + reserved 10; // Configuration for the runtime configuration provider. If not specified, a // ā€œnullā€ provider will be used which will result in all defaults being used. @@ -124,8 +121,16 @@ message Bootstrap { // Optional overload manager configuration. envoy.config.overload.v2alpha.OverloadManager overload_manager = 15; + // Enable :ref:`stats for event dispatcher `, defaults to false. + // Note that this records a value for each iteration of the event loop on every thread. This + // should normally be minimal overhead, but when using + // :ref:`statsd `, it will send each observed value + // over the wire individually because the statsd protocol doesn't have any way to represent a + // histogram summary. Be aware that this can be a very large volume of data. + bool enable_dispatcher_stats = 16; + // Configuration for an wasm service provider(s). - repeated envoy.config.wasm.v2.WasmConfig wasm_service = 16; + repeated envoy.config.wasm.v2.WasmConfig wasm_service = 17; } // Administration interface :ref:`operations documentation @@ -205,8 +210,8 @@ message Runtime { // symbolic link. An atomic link swap is used when a new tree should be // switched to. This parameter specifies the path to the symbolic link. Envoy // will watch the location for changes and reload the file system tree when - // they happen. - string symlink_root = 1 [(validate.rules).string.min_bytes = 1]; + // they happen. If this parameter is not set, there will be no disk based runtime. + string symlink_root = 1; // Specifies the subdirectory to load within the root directory. This is // useful if multiple systems share the same delivery mechanism. Envoy @@ -220,4 +225,10 @@ message Runtime { // Sometimes it is useful to have a per service cluster directory for runtime // configuration. See below for exactly how the override directory is used. string override_subdirectory = 3; + + // Static base runtime. This will be :ref:`overridden + // ` by other runtime layers, e.g. + // disk or admin. This follows the :ref:`runtime protobuf JSON representation + // encoding `. + google.protobuf.Struct base = 4; } diff --git a/api/envoy/config/cluster/redis/BUILD b/api/envoy/config/cluster/redis/BUILD new file mode 100644 index 0000000000000..42e2d408e3584 --- /dev/null +++ b/api/envoy/config/cluster/redis/BUILD @@ -0,0 +1,8 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "redis_cluster", + srcs = ["redis_cluster.proto"], +) diff --git a/api/envoy/config/cluster/redis/redis_cluster.proto b/api/envoy/config/cluster/redis/redis_cluster.proto new file mode 100644 index 0000000000000..2644288c40d2d --- /dev/null +++ b/api/envoy/config/cluster/redis/redis_cluster.proto @@ -0,0 +1,54 @@ +syntax = "proto3"; + +package envoy.config.cluster.redis; + +option java_outer_classname = "RedisClusterProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.cluster.redis"; +option go_package = "v2"; + +import "google/protobuf/duration.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +// [#protodoc-title: Redis Cluster Configuration] +// This cluster adds support for `Redis Cluster `_, as part +// of :ref:`Envoy's support for Redis Cluster `. +// +// Redis Cluster is an extension of Redis which supports sharding and high availability (where a +// shard that loses its master fails over to a replica, and designates it as the new master). +// However, as there is no unified frontend or proxy service in front of Redis Cluster, the client +// (in this case Envoy) must locally maintain the state of the Redis Cluster, specifically the +// topology. A random node in the cluster is queried for the topology using the `CLUSTER SLOTS +// command `_. This result is then stored locally, and +// updated at user-configured intervals. +// +// Example: +// +// .. code-block:: yaml +// +// name: name +// connect_timeout: 0.25s +// dns_lookup_family: V4_ONLY +// hosts: +// - socket_address: +// address: foo.bar.com +// port_value: 22120 +// cluster_type: +// name: envoy.clusters.redis +// typed_config: +// "@type": type.googleapis.com/google.protobuf.Struct +// value: +// cluster_refresh_rate: 30s +// cluster_refresh_timeout: 0.5s + +message RedisClusterConfig { + // Interval between successive topology refresh requests. If not set, this defaults to 5s. + google.protobuf.Duration cluster_refresh_rate = 1 + [(validate.rules).duration.gt = {}, (gogoproto.stdduration) = true]; + + // Timeout for topology refresh request. If not set, this defaults to 3s. + google.protobuf.Duration cluster_refresh_timeout = 2 + [(validate.rules).duration.gt = {}, (gogoproto.stdduration) = true]; +} diff --git a/api/envoy/config/common/tap/v2alpha/BUILD b/api/envoy/config/common/tap/v2alpha/BUILD index dec8795316b85..4b780575154e4 100644 --- a/api/envoy/config/common/tap/v2alpha/BUILD +++ b/api/envoy/config/common/tap/v2alpha/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/filter/accesslog/v2/BUILD b/api/envoy/config/filter/accesslog/v2/BUILD index 3eedcf397000e..fdbf376af177a 100644 --- a/api/envoy/config/filter/accesslog/v2/BUILD +++ b/api/envoy/config/filter/accesslog/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/filter/accesslog/v2/accesslog.proto b/api/envoy/config/filter/accesslog/v2/accesslog.proto index da699432ee177..fffd2251eff3f 100644 --- a/api/envoy/config/filter/accesslog/v2/accesslog.proto +++ b/api/envoy/config/filter/accesslog/v2/accesslog.proto @@ -37,7 +37,7 @@ message AccessLog { // #. "envoy.http_grpc_access_log": :ref:`HttpGrpcAccessLogConfig // ` oneof config_type { - google.protobuf.Struct config = 3 [deprecated = true]; + google.protobuf.Struct config = 3; google.protobuf.Any typed_config = 4; } @@ -73,6 +73,9 @@ message AccessLogFilter { // Response flag filter. ResponseFlagFilter response_flag_filter = 9; + + // gRPC status filter. + GrpcStatusFilter grpc_status_filter = 10; } } @@ -187,7 +190,39 @@ message ResponseFlagFilter { "UAEX", "RLSE", "DC", - "URX" + "URX", + "SI" ] }]; } + +// Filters gRPC requests based on their response status. If a gRPC status is not provided, the +// filter will infer the status from the HTTP status code. +message GrpcStatusFilter { + enum Status { + OK = 0; + CANCELED = 1; + UNKNOWN = 2; + INVALID_ARGUMENT = 3; + DEADLINE_EXCEEDED = 4; + NOT_FOUND = 5; + ALREADY_EXISTS = 6; + PERMISSION_DENIED = 7; + RESOURCE_EXHAUSTED = 8; + FAILED_PRECONDITION = 9; + ABORTED = 10; + OUT_OF_RANGE = 11; + UNIMPLEMENTED = 12; + INTERNAL = 13; + UNAVAILABLE = 14; + DATA_LOSS = 15; + UNAUTHENTICATED = 16; + } + + // Logs only responses that have any one of the gRPC statuses in this field. + repeated Status statuses = 1 [(validate.rules).repeated .items.enum.defined_only = true]; + + // If included and set to true, the filter will instead block all responses with a gRPC status or + // inferred gRPC status enumerated in statuses, and allow all other responses. + bool exclude = 2; +} diff --git a/api/envoy/config/filter/dubbo/router/v2alpha1/BUILD b/api/envoy/config/filter/dubbo/router/v2alpha1/BUILD new file mode 100644 index 0000000000000..51c69c0d5b20f --- /dev/null +++ b/api/envoy/config/filter/dubbo/router/v2alpha1/BUILD @@ -0,0 +1,8 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "router", + srcs = ["router.proto"], +) diff --git a/api/envoy/config/filter/dubbo/router/v2alpha1/router.proto b/api/envoy/config/filter/dubbo/router/v2alpha1/router.proto new file mode 100644 index 0000000000000..37a5542a17bbf --- /dev/null +++ b/api/envoy/config/filter/dubbo/router/v2alpha1/router.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package envoy.config.filter.dubbo.router.v2alpha1; + +option java_outer_classname = "RouterProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.dubbo.router.v2alpha1"; +option go_package = "v2alpha1"; + +// [#protodoc-title: Router] +// Dubbo router :ref:`configuration overview `. + +message Router { +} diff --git a/api/envoy/config/filter/fault/v2/BUILD b/api/envoy/config/filter/fault/v2/BUILD index c7f8a36eb23fd..35419a9902b33 100644 --- a/api/envoy/config/filter/fault/v2/BUILD +++ b/api/envoy/config/filter/fault/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/filter/fault/v2/fault.proto b/api/envoy/config/filter/fault/v2/fault.proto index 7c6e405ba9704..f27f9d446267f 100644 --- a/api/envoy/config/filter/fault/v2/fault.proto +++ b/api/envoy/config/filter/fault/v2/fault.proto @@ -19,19 +19,25 @@ import "gogoproto/gogo.proto"; // Delay specification is used to inject latency into the // HTTP/gRPC/Mongo/Redis operation or delay proxying of TCP connections. message FaultDelay { + // Fault delays are controlled via an HTTP header (if applicable). See the + // :ref:`http fault filter ` documentation for + // more information. + message HeaderDelay { + } + enum FaultDelayType { - // Fixed delay (step function). + // Unused and deprecated. FIXED = 0; } - // Delay type to use (fixed|exponential|..). Currently, only fixed delay (step function) is - // supported. - FaultDelayType type = 1 [(validate.rules).enum.defined_only = true]; + // Unused and deprecated. Will be removed in the next release. + FaultDelayType type = 1 [deprecated = true]; reserved 2; oneof fault_delay_secifier { option (validate.required) = true; + // Add a fixed delay before forwarding the operation upstream. See // https://developers.google.com/protocol-buffers/docs/proto3#json for // the JSON/YAML Duration mapping. For HTTP/Mongo/Redis, the specified @@ -40,8 +46,39 @@ message FaultDelay { // for the specified period. This is required if type is FIXED. google.protobuf.Duration fixed_delay = 3 [(validate.rules).duration.gt = {}, (gogoproto.stdduration) = true]; + + // Fault delays are controlled via an HTTP header (if applicable). + HeaderDelay header_delay = 5; + } + + // The percentage of operations/connections/requests on which the delay will be injected. + type.FractionalPercent percentage = 4; +} + +// Describes a rate limit to be applied. +message FaultRateLimit { + // Describes a fixed/constant rate limit. + message FixedLimit { + // The limit supplied in KiB/s. + uint64 limit_kbps = 1 [(validate.rules).uint64.gte = 1]; + } + + // Rate limits are controlled via an HTTP header (if applicable). See the + // :ref:`http fault filter ` documentation for + // more information. + message HeaderLimit { + } + + oneof limit_type { + option (validate.required) = true; + + // A fixed rate limit. + FixedLimit fixed_limit = 1; + + // Rate limits are controlled via an HTTP header (if applicable). + HeaderLimit header_limit = 3; } - // The percentage of operations/connection requests on which the delay will be injected. - envoy.type.FractionalPercent percentage = 4; + // The percentage of operations/connections/requests on which the rate limit will be injected. + type.FractionalPercent percentage = 2; } diff --git a/api/envoy/config/filter/http/buffer/v2/BUILD b/api/envoy/config/filter/http/buffer/v2/BUILD index 0460c2d43e3ef..e59429af9ace4 100644 --- a/api/envoy/config/filter/http/buffer/v2/BUILD +++ b/api/envoy/config/filter/http/buffer/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/filter/http/buffer/v2/buffer.proto b/api/envoy/config/filter/http/buffer/v2/buffer.proto index 6da5cbb94746a..a203d9d98cc25 100644 --- a/api/envoy/config/filter/http/buffer/v2/buffer.proto +++ b/api/envoy/config/filter/http/buffer/v2/buffer.proto @@ -7,7 +7,6 @@ option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.buffer.v2"; option go_package = "v2"; -import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "validate/validate.proto"; diff --git a/api/envoy/config/filter/http/csrf/v2/BUILD b/api/envoy/config/filter/http/csrf/v2/BUILD new file mode 100644 index 0000000000000..b236868c2cc07 --- /dev/null +++ b/api/envoy/config/filter/http/csrf/v2/BUILD @@ -0,0 +1,9 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "csrf", + srcs = ["csrf.proto"], + deps = ["//envoy/api/v2/core:base"], +) diff --git a/api/envoy/config/filter/http/csrf/v2/csrf.proto b/api/envoy/config/filter/http/csrf/v2/csrf.proto new file mode 100644 index 0000000000000..eed59de5edd13 --- /dev/null +++ b/api/envoy/config/filter/http/csrf/v2/csrf.proto @@ -0,0 +1,43 @@ +syntax = "proto3"; + +package envoy.config.filter.http.csrf.v2; + +option java_outer_classname = "CsrfPolicyProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.http.csrf.v2"; +option go_package = "v2"; + +import "envoy/api/v2/core/base.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +// [#protodoc-title: CSRF] +// Cross-Site Request Forgery :ref:`configuration overview `. + +// CSRF filter config. +message CsrfPolicy { + // Specify if CSRF is enabled. + // + // More information on how this can be controlled via runtime can be found + // :ref:`here `. + // + // .. note:: + // + // This field defaults to 100/:ref:`HUNDRED + // `. + envoy.api.v2.core.RuntimeFractionalPercent filter_enabled = 1 + [(validate.rules).message.required = true]; + + // Specifies that CSRF policies will be evaluated and tracked, but not enforced. + // This is intended to be used when filter_enabled is off. + // + // More information on how this can be controlled via runtime can be found + // :ref:`here `. + // + // .. note:: + // + // This field defaults to 100/:ref:`HUNDRED + // `. + envoy.api.v2.core.RuntimeFractionalPercent shadow_enabled = 2; +} diff --git a/api/envoy/config/filter/http/ext_authz/v2/BUILD b/api/envoy/config/filter/http/ext_authz/v2/BUILD index ca46dd0ca979b..a22408bb1be7e 100644 --- a/api/envoy/config/filter/http/ext_authz/v2/BUILD +++ b/api/envoy/config/filter/http/ext_authz/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto b/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto index 2f5423afd4a83..4c9b4d9c82897 100644 --- a/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto +++ b/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto @@ -3,8 +3,8 @@ syntax = "proto3"; package envoy.config.filter.http.ext_authz.v2; option java_outer_classname = "ExtAuthzProto"; -option java_package = "io.envoyproxy.envoy.config.filter.http.ext_authz.v2"; option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.http.ext_authz.v2"; option go_package = "v2"; import "envoy/api/v2/core/base.proto"; @@ -14,9 +14,13 @@ import "envoy/api/v2/core/http_uri.proto"; import "envoy/type/matcher/string.proto"; import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.stable_marshaler_all) = true; // [#protodoc-title: External Authorization] -// ExtAuthz :ref:`configuration overview `. +// External Authorization :ref:`configuration overview `. + message ExtAuthz { // External authorization service configuration. oneof services { @@ -40,6 +44,43 @@ message ExtAuthz { // Note that errors can be *always* tracked in the :ref:`stats // `. bool failure_mode_allow = 2; + + // Sets the package version the gRPC service should use. This is particularly + // useful when transitioning from alpha to release versions assuming that both definitions are + // semantically compatible. Deprecation note: This field is deprecated and should only be used for + // version upgrade. See release notes for more details. + bool use_alpha = 4 [deprecated = true]; + + // Enables filter to buffer the client request body and send it within the authorization request. + // A ``x-envoy-auth-partial-body: false|true`` metadata header will be added to the authorization + // request message indicating if the body data is partial. + BufferSettings with_request_body = 5; + + // Clears route cache in order to allow the external authorization service to correctly affect + // routing decisions. Filter clears all cached routes when: + // + // 1. The field is set to *true*. + // + // 2. The status returned from the authorization service is a HTTP 200 or gRPC 0. + // + // 3. At least one *authorization response header* is added to the client request, or is used for + // altering another client request header. + // + bool clear_route_cache = 6; +} + +// Configuration for buffering the request data. +message BufferSettings { + // Sets the maximum size of a message body that the filter will hold in memory. Envoy will return + // *HTTP 413* and will *not* initiate the authorization process when buffer reaches the number + // set in this field. Note that this setting will have precedence over :ref:`failure_mode_allow + // `. + uint32 max_request_bytes = 1 [(validate.rules).uint32.gt = 0]; + + // When this field is true, Envoy will buffer the message until *max_request_bytes* is reached. + // The authorization request will be dispatched and no 413 HTTP error will be returned by the + // filter. + bool allow_partial_message = 2; } // HttpService is used for raw HTTP communication between the filter and the authorization service. @@ -134,7 +175,7 @@ message ExtAuthzPerRoute { // external authorization server on specific virtual hosts \ routes. For example, adding a context // extension on the virtual host level can give the ext-authz server information on what virtual // host is used without needing to parse the host header. If CheckSettings is specified in multiple -// per-filter-configs, they will be merged in order, and the result will be be used. +// per-filter-configs, they will be merged in order, and the result will be used. message CheckSettings { // Context extensions to set on the CheckRequest's // :ref:`AttributeContext.context_extensions` diff --git a/api/envoy/config/filter/http/fault/v2/BUILD b/api/envoy/config/filter/http/fault/v2/BUILD index d2c935a116b54..e561e88196b9c 100644 --- a/api/envoy/config/filter/http/fault/v2/BUILD +++ b/api/envoy/config/filter/http/fault/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/filter/http/fault/v2/fault.proto b/api/envoy/config/filter/http/fault/v2/fault.proto index aaffbc1f316ab..bc491580bb152 100644 --- a/api/envoy/config/filter/http/fault/v2/fault.proto +++ b/api/envoy/config/filter/http/fault/v2/fault.proto @@ -11,6 +11,8 @@ import "envoy/api/v2/route/route.proto"; import "envoy/config/filter/fault/v2/fault.proto"; import "envoy/type/percent.proto"; +import "google/protobuf/wrappers.proto"; + import "validate/validate.proto"; // [#protodoc-title: Fault Injection] @@ -28,13 +30,13 @@ message FaultAbort { // The percentage of requests/operations/connections that will be aborted with the error code // provided. - envoy.type.FractionalPercent percentage = 3; + type.FractionalPercent percentage = 3; } message HTTPFault { // If specified, the filter will inject delays based on the values in the - // object. At least *abort* or *delay* must be specified. - envoy.config.filter.fault.v2.FaultDelay delay = 1; + // object. + filter.fault.v2.FaultDelay delay = 1; // If specified, the filter will abort requests based on the values in // the object. At least *abort* or *delay* must be specified. @@ -63,4 +65,27 @@ message HTTPFault { // ` header and compared // against downstream_nodes list. repeated string downstream_nodes = 5; + + // The maximum number of faults that can be active at a single time via the configured fault + // filter. Note that because this setting can be overridden at the route level, it's possible + // for the number of active faults to be greater than this value (if injected via a different + // route). If not specified, defaults to unlimited. This setting can be overridden via + // `runtime ` and any faults that are not injected + // due to overflow will be indicated via the `faults_overflow + // ` stat. + // + // .. attention:: + // Like other :ref:`circuit breakers ` in Envoy, this is a fuzzy + // limit. It's possible for the number of active faults to rise slightly above the configured + // amount due to the implementation details. + google.protobuf.UInt32Value max_active_faults = 6; + + // The response rate limit to be applied to the response body of the stream. When configured, + // the percentage can be overridden by the :ref:`fault.http.rate_limit.response_percent + // ` runtime key. + // + // .. attention:: + // This is a per-stream limit versus a connection level limit. This means that concurrent streams + // will each get an independent limit. + filter.fault.v2.FaultRateLimit response_rate_limit = 7; } diff --git a/api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/BUILD b/api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/BUILD index 105a035bd85da..7c1deb713c34c 100644 --- a/api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/BUILD +++ b/api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto b/api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto index eb1dfd68fe2d9..0c33b6d077a16 100644 --- a/api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto +++ b/api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto @@ -1,15 +1,18 @@ syntax = "proto3"; -package envoy.extensions.filter.http.grpc_http1_reverse_bridge.v2alpha1; +package envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.extensions.filter.http.grpc_http1_reverse_bridge.v2alpha1"; +option java_package = "io.envoyproxy.envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1"; option go_package = "v2"; import "validate/validate.proto"; -// [#protodoc-title: Extensions gRPC Http1 Reverse Bridge] +// [#protodoc-title: gRPC HTTP/1.1 Reverse Bridge] +// gRPC HTTP/1.1 Reverse Bridge :ref:`configuration overview +// `. + // gRPC reverse bridge filter configuration message FilterConfig { // The content-type to pass to the upstream when the gRPC bridge filter is applied. diff --git a/api/envoy/config/filter/http/gzip/v2/BUILD b/api/envoy/config/filter/http/gzip/v2/BUILD index 79c1076d7c77e..e34d73c51c217 100644 --- a/api/envoy/config/filter/http/gzip/v2/BUILD +++ b/api/envoy/config/filter/http/gzip/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/filter/http/header_to_metadata/v2/BUILD b/api/envoy/config/filter/http/header_to_metadata/v2/BUILD index 67b45090a654f..3f8503acbe65c 100644 --- a/api/envoy/config/filter/http/header_to_metadata/v2/BUILD +++ b/api/envoy/config/filter/http/header_to_metadata/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/filter/http/health_check/v2/BUILD b/api/envoy/config/filter/http/health_check/v2/BUILD index 37152bde6f020..9dc0af2df16fb 100644 --- a/api/envoy/config/filter/http/health_check/v2/BUILD +++ b/api/envoy/config/filter/http/health_check/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/filter/http/health_check/v2/health_check.proto b/api/envoy/config/filter/http/health_check/v2/health_check.proto index c951f08c1314b..bc8433732d72c 100644 --- a/api/envoy/config/filter/http/health_check/v2/health_check.proto +++ b/api/envoy/config/filter/http/health_check/v2/health_check.proto @@ -16,6 +16,8 @@ import "envoy/type/percent.proto"; import "validate/validate.proto"; import "gogoproto/gogo.proto"; +option (gogoproto.stable_marshaler_all) = true; + // [#protodoc-title: Health check] // Health check :ref:`configuration overview `. @@ -32,7 +34,7 @@ message HealthCheck { // If operating in non-pass-through mode, specifies a set of upstream cluster // names and the minimum percentage of servers in each of those clusters that - // must be healthy in order for the filter to return a 200. + // must be healthy or degraded in order for the filter to return a 200. map cluster_min_healthy_percentages = 4; // Specifies a set of health check request headers to match on. The health check filter will diff --git a/api/envoy/config/filter/http/ip_tagging/v2/BUILD b/api/envoy/config/filter/http/ip_tagging/v2/BUILD index 8a6c0ee5be259..4c7001972e25d 100644 --- a/api/envoy/config/filter/http/ip_tagging/v2/BUILD +++ b/api/envoy/config/filter/http/ip_tagging/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto b/api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto index 4d0b4fe64de7e..4f5da60150f35 100644 --- a/api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto +++ b/api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto @@ -9,8 +9,6 @@ option go_package = "v2"; import "envoy/api/v2/core/address.proto"; -import "google/protobuf/wrappers.proto"; - import "validate/validate.proto"; // [#protodoc-title: IP tagging] diff --git a/api/envoy/config/filter/http/jwt_authn/v2alpha/BUILD b/api/envoy/config/filter/http/jwt_authn/v2alpha/BUILD index 90863e3f5bed2..d637732d32cb8 100644 --- a/api/envoy/config/filter/http/jwt_authn/v2alpha/BUILD +++ b/api/envoy/config/filter/http/jwt_authn/v2alpha/BUILD @@ -1,6 +1,6 @@ licenses(["notice"]) # Apache 2 -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") api_proto_library_internal( name = "jwt_authn", diff --git a/api/envoy/config/filter/http/jwt_authn/v2alpha/README.md b/api/envoy/config/filter/http/jwt_authn/v2alpha/README.md index 9d083389a5aea..c390a4d5ce506 100644 --- a/api/envoy/config/filter/http/jwt_authn/v2alpha/README.md +++ b/api/envoy/config/filter/http/jwt_authn/v2alpha/README.md @@ -29,3 +29,38 @@ If a custom location is desired, `from_headers` or `from_params` can be used to ## HTTP header to pass successfully verified JWT If a JWT is valid, its payload will be passed to the backend in a new HTTP header specified in `forward_payload_header` field. Its value is base64 encoded JWT payload in JSON. + + +## Further header options + +In addition to the `name` field, which specifies the HTTP header name, +the `from_headers` section can specify an optional `value_prefix` value, as in: + +```yaml + from_headers: + - name: bespoke + value_prefix: jwt_value +``` + +The above will cause the jwt_authn filter to look for the JWT in the `bespoke` header, following the tag `jwt_value`. + +Any non-JWT characters (i.e., anything _other than_ alphanumerics, `_`, `-`, and `.`) will be skipped, +and all following, contiguous, JWT-legal chars will be taken as the JWT. + +This means all of the following will return a JWT of `eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk`: + +```text +bespoke: jwt_value=eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk + +bespoke: {"jwt_value": "eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk"} + +bespoke: beta:true,jwt_value:"eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk",trace=1234 +``` + +The header `name` may be `Authorization`. + +The `value_prefix` must match exactly, i.e., case-sensitively. +If the `value_prefix` is not found, the header is skipped: not considered as a source for a JWT token. + +If there are no JWT-legal characters after the `value_prefix`, the entire string after it +is taken to be the JWT token. This is unlikely to succeed; the error will reported by the JWT parser. \ No newline at end of file diff --git a/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto b/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto index b60d74a652b87..2f8a0ec29c170 100644 --- a/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto +++ b/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto @@ -12,8 +12,13 @@ import "envoy/api/v2/core/http_uri.proto"; import "envoy/api/v2/route/route.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/empty.proto"; -import "google/protobuf/wrappers.proto"; import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.stable_marshaler_all) = true; + +// [#protodoc-title: JWT Authentication] +// JWT Authentication :ref:`configuration overview `. // Please see following for JWT authentication flow: // @@ -334,6 +339,32 @@ message RequirementRule { JwtRequirement requires = 2; } +// This message specifies Jwt requirements based on stream_info.filterState. +// This FilterState should use `Router::StringAccessor` object to set a string value. +// Other HTTP filters can use it to specify Jwt requirements dynamically. +// +// Example: +// +// .. code-block:: yaml +// +// name: jwt_selector +// requires: +// issuer_1: +// provider_name: issuer1 +// issuer_2: +// provider_name: issuer2 +// +// If a filter set "jwt_selector" with "issuer_1" to FilterState for a request, +// jwt_authn filter will use JwtRequirement{"provider_name": "issuer1"} to verify. +message FilterStateRule { + // The filter state name to retrieve the `Router::StringAccessor` object. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // A map of string keys to requirements. The string key is the string value + // in the FilterState with the name specified in the *name* field above. + map requires = 3; +} + // This is the Envoy HTTP filter config for JWT authentication. // // For example: @@ -427,4 +458,10 @@ message JwtAuthentication { // - provider_name: provider2 // repeated RequirementRule rules = 2; + + // This message specifies Jwt requirements based on stream_info.filterState. + // Other HTTP filters can use it to specify Jwt requirements dynamically. + // The *rules* field above is checked first, if it could not find any matches, + // check this one. + FilterStateRule filter_state_rules = 3; } diff --git a/api/envoy/config/filter/http/lua/v2/BUILD b/api/envoy/config/filter/http/lua/v2/BUILD index d399bc5b066be..6daf0c82f1748 100644 --- a/api/envoy/config/filter/http/lua/v2/BUILD +++ b/api/envoy/config/filter/http/lua/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/filter/http/rate_limit/v2/BUILD b/api/envoy/config/filter/http/rate_limit/v2/BUILD index ea14e697f6302..d8fb8e72ffece 100644 --- a/api/envoy/config/filter/http/rate_limit/v2/BUILD +++ b/api/envoy/config/filter/http/rate_limit/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto b/api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto index e9ea6fbb12753..9d93e4a255bd5 100644 --- a/api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto +++ b/api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto @@ -55,7 +55,6 @@ message RateLimit { // Configuration for an external rate limit service provider. If not // specified, any calls to the rate limit service will immediately return // success. - // [#comment:TODO(ramaraochavali): Make this required as part of cleanup of deprecated ratelimit - // service config in bootstrap.] - envoy.config.ratelimit.v2.RateLimitServiceConfig rate_limit_service = 7; + envoy.config.ratelimit.v2.RateLimitServiceConfig rate_limit_service = 7 + [(validate.rules).message.required = true]; } diff --git a/api/envoy/config/filter/http/rbac/v2/BUILD b/api/envoy/config/filter/http/rbac/v2/BUILD index d325e3bcde2d7..9b95d4072130d 100644 --- a/api/envoy/config/filter/http/rbac/v2/BUILD +++ b/api/envoy/config/filter/http/rbac/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/filter/http/router/v2/BUILD b/api/envoy/config/filter/http/router/v2/BUILD index 38697ac806806..990d8154afada 100644 --- a/api/envoy/config/filter/http/router/v2/BUILD +++ b/api/envoy/config/filter/http/router/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/filter/http/squash/v2/BUILD b/api/envoy/config/filter/http/squash/v2/BUILD index 8cf2c80dde1e7..86bd4e8cfb659 100644 --- a/api/envoy/config/filter/http/squash/v2/BUILD +++ b/api/envoy/config/filter/http/squash/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/filter/http/tap/v2alpha/BUILD b/api/envoy/config/filter/http/tap/v2alpha/BUILD index 7a826408eab06..f84625a7da734 100644 --- a/api/envoy/config/filter/http/tap/v2alpha/BUILD +++ b/api/envoy/config/filter/http/tap/v2alpha/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/filter/http/transcoder/v2/BUILD b/api/envoy/config/filter/http/transcoder/v2/BUILD index eddef5a7ebd03..8ecd7759a5ca6 100644 --- a/api/envoy/config/filter/http/transcoder/v2/BUILD +++ b/api/envoy/config/filter/http/transcoder/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/filter/http/transcoder/v2/transcoder.proto b/api/envoy/config/filter/http/transcoder/v2/transcoder.proto index fecd55d694b66..a7f092d346551 100644 --- a/api/envoy/config/filter/http/transcoder/v2/transcoder.proto +++ b/api/envoy/config/filter/http/transcoder/v2/transcoder.proto @@ -92,4 +92,26 @@ message GrpcJsonTranscoder { // binding for ``foo`` is not defined. Adding ``foo`` to ``ignored_query_parameters`` will allow // the same request to be mapped to ``GetShelf``. repeated string ignored_query_parameters = 6; + + // Whether to route methods without the ``google.api.http`` option. + // + // Example : + // + // .. code-block:: proto + // + // package bookstore; + // + // service Bookstore { + // rpc GetShelf(GetShelfRequest) returns (Shelf) {} + // } + // + // message GetShelfRequest { + // int64 shelf = 1; + // } + // + // message Shelf {} + // + // The client could ``post`` a json body ``{"shelf": 1234}`` with the path of + // ``/bookstore.Bookstore/GetShelfRequest`` to call ``GetShelfRequest``. + bool auto_mapping = 7; } diff --git a/api/envoy/config/filter/listener/original_src/v2alpha1/BUILD b/api/envoy/config/filter/listener/original_src/v2alpha1/BUILD index 9b15b1ae0b39e..e064545b21cde 100644 --- a/api/envoy/config/filter/listener/original_src/v2alpha1/BUILD +++ b/api/envoy/config/filter/listener/original_src/v2alpha1/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/filter/network/client_ssl_auth/v2/BUILD b/api/envoy/config/filter/network/client_ssl_auth/v2/BUILD index a6d31d6396111..dad2d7fea2627 100644 --- a/api/envoy/config/filter/network/client_ssl_auth/v2/BUILD +++ b/api/envoy/config/filter/network/client_ssl_auth/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/BUILD b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/BUILD index 2e88b619e5242..e3e83a7046847 100644 --- a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/BUILD +++ b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/BUILD @@ -1,8 +1,8 @@ -load("//bazel:api_build_system.bzl", "api_proto_library") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 -api_proto_library( +api_proto_library_internal( name = "dubbo_proxy", srcs = [ "dubbo_proxy.proto", diff --git a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto index e639830794741..5b0995ba0022d 100644 --- a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto +++ b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto @@ -15,7 +15,9 @@ import "validate/validate.proto"; import "gogoproto/gogo.proto"; // [#protodoc-title: Dubbo Proxy] -// Dubbo Proxy filter configuration. +// Dubbo Proxy :ref:`configuration overview `. + +// [#comment:next free field: 6] message DubboProxy { // The human readable prefix to use when emitting statistics. string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; @@ -36,10 +38,12 @@ message DubboProxy { repeated DubboFilter dubbo_filters = 5; } +// Dubbo Protocol types supported by Envoy. enum ProtocolType { Dubbo = 0; // the default protocol. } +// Dubbo Serialization types supported by Envoy. enum SerializationType { Hessian2 = 0; // the default serialization protocol. } diff --git a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto index 2c3d80ae3608c..84b6d3fc5c174 100644 --- a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto +++ b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto @@ -16,8 +16,12 @@ import "google/protobuf/wrappers.proto"; import "validate/validate.proto"; import "gogoproto/gogo.proto"; -// [#protodoc-title: Dubbo route configuration] +option (gogoproto.stable_marshaler_all) = true; +// [#protodoc-title: Dubbo Proxy Route Configuration] +// Dubbo Proxy :ref:`configuration overview `. + +// [#comment:next free field: 6] message RouteConfiguration { // The name of the route configuration. Reserved for future use in asynchronous route discovery. string name = 1; @@ -36,6 +40,7 @@ message RouteConfiguration { repeated Route routes = 5 [(gogoproto.nullable) = false]; } +// [#comment:next free field: 3] message Route { // Route matching parameters. RouteMatch match = 1 [(validate.rules).message.required = true, (gogoproto.nullable) = false]; @@ -44,6 +49,35 @@ message Route { RouteAction route = 2 [(validate.rules).message.required = true, (gogoproto.nullable) = false]; } +// [#comment:next free field: 3] +message RouteMatch { + // Method level routing matching. + MethodMatch method = 1; + + // Specifies a set of headers that the route should match on. The router will check the request’s + // headers against all the specified headers in the route config. A match will happen if all the + // headers in the route are present in the request with the same values (or based on presence if + // the value field is not in the config). + repeated envoy.api.v2.route.HeaderMatcher headers = 2; +} + +// [#comment:next free field: 3] +message RouteAction { + oneof cluster_specifier { + option (validate.required) = true; + + // Indicates the upstream cluster to which the request should be routed. + string cluster = 1; + + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. + // Currently ClusterWeight only supports the name and weight fields. + envoy.api.v2.route.WeightedCluster weighted_clusters = 2; + } +} + +// [#comment:next free field: 5] message MethodMatch { // The name of the method. envoy.type.matcher.StringMatcher name = 1; @@ -64,8 +98,7 @@ message MethodMatch { // Examples: // // * For range [-10,0), route will match for header value -1, but not for 0, - // "somestring", 10.9, - // "-1somestring" + // "somestring", 10.9, "-1somestring" envoy.type.Int64Range range_match = 4; } } @@ -75,32 +108,3 @@ message MethodMatch { // The value is the parameter matching type. map params_match = 2; } - -message RouteMatch { - // Method level routing matching. - MethodMatch method = 1; - - // Specifies a set of headers that the route should match on. The router will check the request’s - // headers against all the specified headers in the route config. A match will happen if all the - // headers in the route are present in the request with the same values (or based on presence if - // the value field is not in the config). - repeated envoy.api.v2.route.HeaderMatcher headers = 2; -} - -// [#comment:next free field: 2] -message RouteAction { - oneof cluster_specifier { - option (validate.required) = true; - - // Indicates the upstream cluster to which the request should be routed. - string cluster = 1; - - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. - // - // .. note:: - // Currently ClusterWeight only supports the name and weight fields. - envoy.api.v2.route.WeightedCluster weighted_clusters = 2; - } -} diff --git a/api/envoy/config/filter/network/ext_authz/v2/BUILD b/api/envoy/config/filter/network/ext_authz/v2/BUILD index 4d716dee9744a..96184437fa54a 100644 --- a/api/envoy/config/filter/network/ext_authz/v2/BUILD +++ b/api/envoy/config/filter/network/ext_authz/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/filter/network/http_connection_manager/v2/BUILD b/api/envoy/config/filter/network/http_connection_manager/v2/BUILD index c89ea09ad2909..95d3811f426af 100644 --- a/api/envoy/config/filter/network/http_connection_manager/v2/BUILD +++ b/api/envoy/config/filter/network/http_connection_manager/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") licenses(["notice"]) # Apache 2 @@ -7,6 +7,7 @@ api_proto_library_internal( srcs = ["http_connection_manager.proto"], deps = [ "//envoy/api/v2:rds", + "//envoy/api/v2:srds", "//envoy/api/v2/core:base", "//envoy/api/v2/core:config_source", "//envoy/api/v2/core:protocol", @@ -20,6 +21,7 @@ api_go_proto_library( proto = ":http_connection_manager", deps = [ "//envoy/api/v2:rds_go_grpc", + "//envoy/api/v2:srds_go_grpc", "//envoy/api/v2/core:base_go_proto", "//envoy/api/v2/core:config_source_go_proto", "//envoy/api/v2/core:protocol_go_proto", diff --git a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto index 5fb51dd823187..be0b3926ff795 100644 --- a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +++ b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto @@ -10,6 +10,7 @@ option go_package = "v2"; import "envoy/api/v2/core/config_source.proto"; import "envoy/api/v2/core/protocol.proto"; import "envoy/api/v2/rds.proto"; +import "envoy/api/v2/srds.proto"; import "envoy/config/filter/accesslog/v2/accesslog.proto"; import "envoy/type/percent.proto"; @@ -24,7 +25,7 @@ import "gogoproto/gogo.proto"; // [#protodoc-title: HTTP connection manager] // HTTP connection manager :ref:`configuration overview `. -// [#comment:next free field: 30] +// [#comment:next free field: 32] message HttpConnectionManager { enum CodecType { option (gogoproto.goproto_enum_prefix) = false; @@ -61,6 +62,11 @@ message HttpConnectionManager { // The route table for the connection manager is static and is specified in this property. envoy.api.v2.RouteConfiguration route_config = 4; + + // A route table will be dynamically assigned to each request based on request attributes + // (e.g., the value of a header). The "routing scopes" (i.e., route tables) and "scope keys" are + // specified in this message. + ScopedRoutes scoped_routes = 31; } // A list of individual HTTP filters that make up the filter chain for @@ -116,6 +122,10 @@ message HttpConnectionManager { // :ref:`HTTP Connection Manager `. // Default: 100% envoy.type.Percent overall_sampling = 5; + + // Whether to annotate spans with additional data. If true, spans will include logs for stream + // events. + bool verbose = 6; } // Presence of the object defines whether the connection manager @@ -133,14 +143,13 @@ message HttpConnectionManager { // header in responses. If not set, the default is *envoy*. string server_name = 10; - // The maximum request headers size for incoming connections. The default max - // is 60K, based on default settings for http codecs. For HTTP1, the current - // limit set by http_parser is 80K. for HTTP2, the default allowed header - // block in nghttp2 is 64K. The max configurable setting is 64K in order to - // stay under both codec limits. - // Requests that exceed this size will receive a 431 response. + // The maximum request headers size for incoming connections. + // If unconfigured, the default max request headers allowed is 60 KiB. + // Requests that exceed this limit will receive a 431 response. + // The max configurable limit is 96 KiB, based on current implementation + // constraints. google.protobuf.UInt32Value max_request_headers_kb = 29 - [(validate.rules).uint32.gt = 0, (validate.rules).uint32.lte = 64]; + [(validate.rules).uint32.gt = 0, (validate.rules).uint32.lte = 96]; // The idle timeout for connections managed by the connection manager. The // idle timeout is defined as the period in which there are no active @@ -197,8 +206,14 @@ message HttpConnectionManager { // The delayed close timeout is for downstream connections managed by the HTTP connection manager. // It is defined as a grace period after connection close processing has been locally initiated - // during which Envoy will flush the write buffers for the connection and await the peer to close - // (i.e., a TCP FIN/RST is received by Envoy from the downstream connection). + // during which Envoy will wait for the peer to close (i.e., a TCP FIN/RST is received by Envoy + // from the downstream connection) prior to Envoy closing the socket associated with that + // connection. + // NOTE: This timeout is enforced even when the socket associated with the downstream connection + // is pending a flush of the write buffer. However, any progress made writing data to the socket + // will restart the timer associated with this timeout. This means that the total grace period for + // a socket in this state will be + // +. // // Delaying Envoy's connection close and giving the peer the opportunity to initiate the close // sequence mitigates a race condition that exists when downstream clients do not drain/process @@ -210,8 +225,15 @@ message HttpConnectionManager { // // The default timeout is 1000 ms if this option is not specified. // - // A value of 0 will completely disable delayed close processing, and the downstream connection's - // socket will be closed immediately after the write flush is completed. + // .. NOTE:: + // To be useful in avoiding the race condition described above, this timeout must be set + // to *at least* +<100ms to account for + // a reasonsable "worst" case processing time for a full iteration of Envoy's event loop>. + // + // .. WARNING:: + // A value of 0 will completely disable delayed close processing. When disabled, the downstream + // connection's socket will be closed immediately after the write flush is completed or will + // never close if the write flush does not complete. google.protobuf.Duration delayed_close_timeout = 26 [(gogoproto.stdduration) = true]; // Configuration for :ref:`HTTP access logs ` @@ -342,6 +364,7 @@ message HttpConnectionManager { // :ref:`http_connection_manager.represent_ipv4_remote_address_as_ipv4_mapped_ipv6 // ` for runtime // control. + // [#not-implemented-hide:] bool represent_ipv4_remote_address_as_ipv4_mapped_ipv6 = 20; // The configuration for HTTP upgrades. @@ -374,14 +397,20 @@ message HttpConnectionManager { }; repeated UpgradeConfig upgrade_configs = 23; - // If true, the order of encoder filters will be reversed to that of filters - // configured in the HTTP filter chain. Otherwise, it will keep the existing - // order. - // Note: this is a bug fix for Envoy, which is designed to have the reversed - // order of encode filters to that of decode ones, (see - // https://github.com/envoyproxy/envoy/issues/4599 for details). When we remove this field, envoy - // will have the same behavior when it sets true. - google.protobuf.BoolValue bugfix_reverse_encode_order = 27 [deprecated = true]; + reserved 27; + + // Should paths be normalized according to RFC 3986 before any processing of + // requests by HTTP filters or routing? This affects the upstream *:path* header + // as well. For paths that fail this check, Envoy will respond with 400 to + // paths that are malformed. This defaults to false currently but will default + // true in the future. When not specified, this value may be overridden by the + // runtime variable + // :ref:`http_connection_manager.normalize_path`. + // See `Normalization and Comparison ` + // for details of normalization. + // Note that Envoy does not perform + // `case normalization ` + google.protobuf.BoolValue normalize_path = 30; } message Rds { @@ -396,6 +425,119 @@ message Rds { string route_config_name = 2 [(validate.rules).string.min_bytes = 1]; } +// This message is used to work around the limitations with 'oneof' and repeated fields. +message ScopedRouteConfigurationsList { + repeated envoy.api.v2.ScopedRouteConfiguration scoped_route_configurations = 1 + [(validate.rules).repeated .min_items = 1]; +} + +message ScopedRoutes { + // The name assigned to the scoped routing configuration. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // Specifies the mechanism for constructing "scope keys" based on HTTP request attributes. These + // keys are matched against a set of :ref:`Key` + // objects assembled from :ref:`ScopedRouteConfiguration` + // messages distributed via SRDS (the Scoped Route Discovery Service) or assigned statically via + // :ref:`scoped_route_configurations_list`. + // + // Upon receiving a request's headers, the Router will build a key using the algorithm specified + // by this message. This key will be used to look up the routing table (i.e., the + // :ref:`RouteConfiguration`) to use for the request. + message ScopeKeyBuilder { + // Specifies the mechanism for constructing key fragments which are composed into scope keys. + message FragmentBuilder { + // Specifies how the value of a header should be extracted. + // The following example maps the structure of a header to the fields in this message. + // + // .. code:: + // + // <0> <1> <-- index + // X-Header: a=b;c=d + // | || | + // | || \----> + // | || + // | |\----> + // | | + // | \----> + // | + // \----> + // + // Each 'a=b' key-value pair constitutes an 'element' of the header field. + message HeaderValueExtractor { + // The name of the header field to extract the value from. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // The element separator (e.g., ';' separates 'a;b;c;d'). + // Default: empty string. This causes the entirety of the header field to be extracted. + // If this field is set to an empty string and 'index' is used in the oneof below, 'index' + // must be set to 0. + string element_separator = 2; + + // Specifies a header field's key value pair to match on. + message KvElement { + // The separator between key and value (e.g., '=' separates 'k=v;...'). + string separator = 1 [(validate.rules).string.min_bytes = 1]; + + // The key to match on. + string key = 2 [(validate.rules).string.min_bytes = 1]; + } + + oneof extract_type { + // Specifies the zero based index of the element to extract. + uint32 index = 3; + + // Specifies the key value pair to extract the value from. + KvElement element = 4; + } + } + + oneof type { + option (validate.required) = true; + + // Specifies how a header field's value should be extracted. + HeaderValueExtractor header_value_extractor = 1; + } + } + + // The final scope key consists of the ordered union of these fragments. + repeated FragmentBuilder fragments = 1 [(validate.rules).repeated .min_items = 1]; + } + + // The algorithm to use for constructing a scope key for each request. + ScopeKeyBuilder scope_key_builder = 2 [(validate.rules).message.required = true]; + + // Configuration source specifier for RDS. + // This config source is used to subscribe to RouteConfiguration resources specified in + // ScopedRouteConfiguration messages. + envoy.api.v2.core.ConfigSource rds_config_source = 3 + [(validate.rules).message.required = true, (gogoproto.nullable) = false]; + + oneof config_specifier { + option (validate.required) = true; + + // The set of routing scopes corresponding to the HCM. A scope is assigned to a request by + // matching a key constructed from the request's attributes according to the algorithm specified + // by the + // :ref:`ScopeKeyBuilder` + // in this message. + ScopedRouteConfigurationsList scoped_route_configurations_list = 4; + + // The set of routing scopes associated with the HCM will be dynamically loaded via the SRDS + // API. A scope is assigned to a request by matching a key constructed from the request's + // attributes according to the algorithm specified by the + // :ref:`ScopeKeyBuilder` + // in this message. + ScopedRds scoped_rds = 5; + } +} + +message ScopedRds { + // Configuration source specifier for scoped RDS. + envoy.api.v2.core.ConfigSource scoped_rds_config_source = 1 + [(validate.rules).message.required = true, (gogoproto.nullable) = false]; +} + message HttpFilter { // The name of the filter to instantiate. The name must match a supported // filter. The built-in filters are: @@ -403,14 +545,21 @@ message HttpFilter { // [#comment:TODO(mattklein123): Auto generate the following list] // * :ref:`envoy.buffer ` // * :ref:`envoy.cors ` + // * :ref:`envoy.ext_authz ` // * :ref:`envoy.fault ` + // * :ref:`envoy.filters.http.csrf ` + // * :ref:`envoy.filters.http.header_to_metadata ` + // * :ref:`envoy.filters.http.grpc_http1_reverse_bridge \ + // ` + // * :ref:`envoy.filters.http.jwt_authn ` + // * :ref:`envoy.filters.http.rbac ` + // * :ref:`envoy.filters.http.tap ` // * :ref:`envoy.gzip ` // * :ref:`envoy.http_dynamo_filter ` // * :ref:`envoy.grpc_http1_bridge ` // * :ref:`envoy.grpc_json_transcoder ` // * :ref:`envoy.grpc_web ` // * :ref:`envoy.health_check ` - // * :ref:`envoy.header_to_metadata ` // * :ref:`envoy.ip_tagging ` // * :ref:`envoy.lua ` // * :ref:`envoy.rate_limit ` @@ -421,7 +570,7 @@ message HttpFilter { // Filter specific configuration which depends on the filter being instantiated. See the supported // filters for further documentation. oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; + google.protobuf.Struct config = 2; google.protobuf.Any typed_config = 4; } diff --git a/api/envoy/config/filter/network/mongo_proxy/v2/BUILD b/api/envoy/config/filter/network/mongo_proxy/v2/BUILD index 69b0f85e156d0..5535f010179d4 100644 --- a/api/envoy/config/filter/network/mongo_proxy/v2/BUILD +++ b/api/envoy/config/filter/network/mongo_proxy/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/filter/network/mysql_proxy/v1alpha1/BUILD b/api/envoy/config/filter/network/mysql_proxy/v1alpha1/BUILD index 7c6eef81a7f8b..fde664838c930 100644 --- a/api/envoy/config/filter/network/mysql_proxy/v1alpha1/BUILD +++ b/api/envoy/config/filter/network/mysql_proxy/v1alpha1/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/filter/network/rate_limit/v2/BUILD b/api/envoy/config/filter/network/rate_limit/v2/BUILD index 8840043b218fb..08d5db95b1171 100644 --- a/api/envoy/config/filter/network/rate_limit/v2/BUILD +++ b/api/envoy/config/filter/network/rate_limit/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto b/api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto index 0ceab0ebd6d54..6a1b795580c81 100644 --- a/api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto +++ b/api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto @@ -42,7 +42,6 @@ message RateLimit { // Configuration for an external rate limit service provider. If not // specified, any calls to the rate limit service will immediately return // success. - // [#comment:TODO(ramaraochavali): Make this required as part of cleanup of deprecated ratelimit - // service config in bootstrap.] - envoy.config.ratelimit.v2.RateLimitServiceConfig rate_limit_service = 6; + envoy.config.ratelimit.v2.RateLimitServiceConfig rate_limit_service = 6 + [(validate.rules).message.required = true]; } diff --git a/api/envoy/config/filter/network/rbac/v2/BUILD b/api/envoy/config/filter/network/rbac/v2/BUILD index d325e3bcde2d7..9b95d4072130d 100644 --- a/api/envoy/config/filter/network/rbac/v2/BUILD +++ b/api/envoy/config/filter/network/rbac/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/filter/network/redis_proxy/v2/BUILD b/api/envoy/config/filter/network/redis_proxy/v2/BUILD index c35e219b44659..55ba69f1c4c22 100644 --- a/api/envoy/config/filter/network/redis_proxy/v2/BUILD +++ b/api/envoy/config/filter/network/redis_proxy/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto b/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto index 250b034d0712d..eec8c3f409544 100644 --- a/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto +++ b/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto @@ -22,7 +22,13 @@ message RedisProxy { // Name of cluster from cluster manager. See the :ref:`configuration section // ` of the architecture overview for recommendations on // configuring the backing cluster. - string cluster = 2 [(validate.rules).string.min_bytes = 1]; + // + // .. attention:: + // + // This field is deprecated. Use a :ref:`catch-all + // cluster` + // instead. + string cluster = 2 [deprecated = true]; // Redis connection pool settings. message ConnPoolSettings { @@ -46,8 +52,93 @@ message RedisProxy { // * '{user1000}.following' and '{user1000}.followers' **will** be sent to the same upstream // * '{user1000}.following' and '{user1001}.following' **might** be sent to the same upstream bool enable_hashtagging = 2; + + // Accept `moved and ask redirection + // `_ errors from upstream + // redis servers, and retry commands to the specified target server. The target server does not + // need to be known to the cluster manager. If the command cannot be redirected, then the + // original error is passed downstream unchanged. By default, this support is not enabled. + bool enable_redirection = 3; + + // Maximum size of encoded request buffer before flush is triggered and encoded requests + // are sent upstream. If this is unset, the buffer flushes whenever it receives data + // and performs no batching. + // This feature makes it possible for multiple clients to send requests to Envoy and have + // them batched- for example if one is running several worker processes, each with its own + // Redis connection. There is no benefit to using this with a single downstream process. + // Recommended size (if enabled) is 1024 bytes. + uint32 max_buffer_size_before_flush = 4; + + // The encoded request buffer is flushed N milliseconds after the first request has been + // encoded, unless the buffer size has already exceeded `max_buffer_size_before_flush`. + // If `max_buffer_size_before_flush` is not set, this flush timer is not used. Otherwise, + // the timer should be set according to the number of clients, overall request rate and + // desired maximum latency for a single command. For example, if there are many requests + // being batched together at a high rate, the buffer will likely be filled before the timer + // fires. Alternatively, if the request rate is lower the buffer will not be filled as often + // before the timer fires. + // If `max_buffer_size_before_flush` is set, but `buffer_flush_timeout` is not, the latter + // defaults to 3ms. + google.protobuf.Duration buffer_flush_timeout = 5 [(gogoproto.stdduration) = true]; } - // Network settings for the connection pool to the upstream cluster. + // Network settings for the connection pool to the upstream clusters. ConnPoolSettings settings = 3 [(validate.rules).message.required = true]; + + // Indicates that latency stat should be computed in microseconds. By default it is computed in + // milliseconds. + bool latency_in_micros = 4; + + message PrefixRoutes { + message Route { + // String prefix that must match the beginning of the keys. Envoy will always favor the + // longest match. + string prefix = 1 [(validate.rules).string.min_bytes = 1]; + + // Indicates if the prefix needs to be removed from the key when forwarded. + bool remove_prefix = 2; + + // Upstream cluster to forward the command to. + string cluster = 3 [(validate.rules).string.min_bytes = 1]; + } + + // List of prefix routes. + repeated Route routes = 1 [(gogoproto.nullable) = false]; + + // Indicates that prefix matching should be case insensitive. + bool case_insensitive = 2; + + // Optional catch-all route to forward commands that doesn't match any of the routes. The + // catch-all route becomes required when no routes are specified. + string catch_all_cluster = 3; + } + + // List of **unique** prefixes used to separate keys from different workloads to different + // clusters. Envoy will always favor the longest match first in case of overlap. A catch-all + // cluster can be used to forward commands when there is no match. Time complexity of the + // lookups are in O(min(longest key prefix, key length)). + // + // Example: + // + // .. code-block:: yaml + // + // prefix_routes: + // routes: + // - prefix: "ab" + // cluster: "cluster_a" + // - prefix: "abc" + // cluster: "cluster_b" + // + // When using the above routes, the following prefixes would be sent to: + // + // * 'get abc:users' would retrive the key 'abc:users' from cluster_b. + // * 'get ab:users' would retrive the key 'ab:users' from cluster_a. + // * 'get z:users' would return a NoUpstreamHost error. A :ref:`catch-all + // cluster` + // would have retrieved the key from that cluster instead. + // + // See the :ref:`configuration section + // ` of the architecture overview for recommendations on + // configuring the backing clusters. + PrefixRoutes prefix_routes = 5 [(gogoproto.nullable) = false]; } diff --git a/api/envoy/config/filter/network/tcp_proxy/v2/BUILD b/api/envoy/config/filter/network/tcp_proxy/v2/BUILD index 7cb467d6fb10d..d77d910aceb2f 100644 --- a/api/envoy/config/filter/network/tcp_proxy/v2/BUILD +++ b/api/envoy/config/filter/network/tcp_proxy/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto b/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto index 12ce0d2757896..9eb8f4f078173 100644 --- a/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto +++ b/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto @@ -172,7 +172,8 @@ message TcpProxy { // list of strings with each string in CIDR notation. Source and destination ports are // specified as single strings containing a comma-separated list of ports and/or port ranges. // - DeprecatedV1 deprecated_v1 = 6 [deprecated = true]; + // Deprecation pending https://github.com/envoyproxy/envoy/issues/4457 + DeprecatedV1 deprecated_v1 = 6; // The maximum number of unsuccessful connection attempts that will be made before // giving up. If the parameter is not specified, 1 connection attempt will be made. diff --git a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/BUILD b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/BUILD index 24408194f1d0f..f758f7f580f5a 100644 --- a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/BUILD +++ b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto index 951a51e871621..0be6c337037f9 100644 --- a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto +++ b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto @@ -97,7 +97,7 @@ message ThriftFilter { // Filter specific configuration which depends on the filter being instantiated. See the supported // filters for further documentation. oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; + google.protobuf.Struct config = 2; google.protobuf.Any typed_config = 3; } diff --git a/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/BUILD b/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/BUILD new file mode 100644 index 0000000000000..8719f5083f126 --- /dev/null +++ b/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/BUILD @@ -0,0 +1,8 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "zookeeper_proxy", + srcs = ["zookeeper_proxy.proto"], +) diff --git a/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto b/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto new file mode 100644 index 0000000000000..6a8afdd12ec07 --- /dev/null +++ b/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto @@ -0,0 +1,33 @@ +syntax = "proto3"; + +package envoy.config.filter.network.zookeeper_proxy.v1alpha1; + +option java_outer_classname = "ZookeeperProxyProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.network.zookeeper_proxy.v1alpha1"; +option go_package = "v1alpha1"; + +import "validate/validate.proto"; +import "google/protobuf/wrappers.proto"; + +// [#protodoc-title: ZooKeeper proxy] +// ZooKeeper Proxy :ref:`configuration overview `. +message ZooKeeperProxy { + // The human readable prefix to use when emitting :ref:`statistics + // `. + string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; + + // [#not-implemented-hide:] The optional path to use for writing ZooKeeper access logs. + // If the access log field is empty, access logs will not be written. + string access_log = 2; + + // Messages — requests, responses and events — that are bigger than this value will + // be ignored. If it is not set, the default value is 1Mb. + // + // The value here should match the jute.maxbuffer property in your cluster configuration: + // + // https://zookeeper.apache.org/doc/r3.4.10/zookeeperAdmin.html#Unsafe+Options + // + // if that is set. If it isn't, ZooKeeper's default is also 1Mb. + google.protobuf.UInt32Value max_packet_bytes = 3; +} diff --git a/api/envoy/config/filter/thrift/rate_limit/v2alpha1/BUILD b/api/envoy/config/filter/thrift/rate_limit/v2alpha1/BUILD index 8840043b218fb..08d5db95b1171 100644 --- a/api/envoy/config/filter/thrift/rate_limit/v2alpha1/BUILD +++ b/api/envoy/config/filter/thrift/rate_limit/v2alpha1/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto b/api/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto index 7b4db2ae3d49d..15a50d553f9b6 100644 --- a/api/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto +++ b/api/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto @@ -46,7 +46,6 @@ message RateLimit { // Configuration for an external rate limit service provider. If not // specified, any calls to the rate limit service will immediately return // success. - // [#comment:TODO(ramaraochavali): Make this required as part of cleanup of deprecated ratelimit - // service config in bootstrap.] - envoy.config.ratelimit.v2.RateLimitServiceConfig rate_limit_service = 5; + envoy.config.ratelimit.v2.RateLimitServiceConfig rate_limit_service = 5 + [(validate.rules).message.required = true]; } diff --git a/api/envoy/config/filter/thrift/router/v2alpha1/BUILD b/api/envoy/config/filter/thrift/router/v2alpha1/BUILD index ce0ad0e254f03..51c69c0d5b20f 100644 --- a/api/envoy/config/filter/thrift/router/v2alpha1/BUILD +++ b/api/envoy/config/filter/thrift/router/v2alpha1/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/grpc_credential/v2alpha/BUILD b/api/envoy/config/grpc_credential/v2alpha/BUILD index ca0a71eaef6cc..4765215c4c649 100644 --- a/api/envoy/config/grpc_credential/v2alpha/BUILD +++ b/api/envoy/config/grpc_credential/v2alpha/BUILD @@ -1,6 +1,6 @@ licenses(["notice"]) # Apache 2 -load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") api_proto_library_internal( name = "file_based_metadata", diff --git a/api/envoy/config/health_checker/redis/v2/BUILD b/api/envoy/config/health_checker/redis/v2/BUILD index b784e8d150621..239d1f224fc6f 100644 --- a/api/envoy/config/health_checker/redis/v2/BUILD +++ b/api/envoy/config/health_checker/redis/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/metrics/v2/BUILD b/api/envoy/config/metrics/v2/BUILD index 9ae462617bcc5..157b09c4d814a 100644 --- a/api/envoy/config/metrics/v2/BUILD +++ b/api/envoy/config/metrics/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/metrics/v2/stats.proto b/api/envoy/config/metrics/v2/stats.proto index 8b85198f60cef..08172180b5451 100644 --- a/api/envoy/config/metrics/v2/stats.proto +++ b/api/envoy/config/metrics/v2/stats.proto @@ -35,7 +35,7 @@ message StatsSink { // Stats sink specific configuration which depends on the sink being instantiated. See // :ref:`StatsdSink ` for an example. oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; + google.protobuf.Struct config = 2; google.protobuf.Any typed_config = 3; } @@ -59,9 +59,8 @@ message StatsConfig { // If any default tags are specified twice, the config will be considered // invalid. // - // See `well_known_names.h - // `_ - // for a list of the default tags in Envoy. + // See :repo:`well_known_names.h ` for a list of the + // default tags in Envoy. // // If not provided, the value is assumed to be true. google.protobuf.BoolValue use_all_default_tags = 2; @@ -166,9 +165,8 @@ message StatsMatcher { message TagSpecifier { // Attaches an identifier to the tag values to identify the tag being in the // sink. Envoy has a set of default names and regexes to extract dynamic - // portions of existing stats, which can be found in `well_known_names.h - // `_ - // in the Envoy repository. If a :ref:`tag_name + // portions of existing stats, which can be found in :repo:`well_known_names.h + // ` in the Envoy repository. If a :ref:`tag_name // ` is provided in the config and // neither :ref:`regex ` or // :ref:`fixed_value ` were specified, diff --git a/api/envoy/config/overload/v2alpha/BUILD b/api/envoy/config/overload/v2alpha/BUILD index 100b93dfa017f..bfffb5639ca7d 100644 --- a/api/envoy/config/overload/v2alpha/BUILD +++ b/api/envoy/config/overload/v2alpha/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/overload/v2alpha/overload.proto b/api/envoy/config/overload/v2alpha/overload.proto index cf50275201a58..efdba5a09a727 100644 --- a/api/envoy/config/overload/v2alpha/overload.proto +++ b/api/envoy/config/overload/v2alpha/overload.proto @@ -32,7 +32,7 @@ message ResourceMonitor { // Configuration for the resource monitor being instantiated. oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; + google.protobuf.Struct config = 2; google.protobuf.Any typed_config = 3; } diff --git a/api/envoy/config/ratelimit/v2/BUILD b/api/envoy/config/ratelimit/v2/BUILD index 3c40fdb01ed39..be3fc1c212bba 100644 --- a/api/envoy/config/ratelimit/v2/BUILD +++ b/api/envoy/config/ratelimit/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/rbac/v2alpha/BUILD b/api/envoy/config/rbac/v2alpha/BUILD index c97a2f82378df..c2059893912c8 100644 --- a/api/envoy/config/rbac/v2alpha/BUILD +++ b/api/envoy/config/rbac/v2alpha/BUILD @@ -1,6 +1,6 @@ licenses(["notice"]) # Apache 2 -load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") api_proto_library_internal( name = "rbac", diff --git a/api/envoy/config/rbac/v2alpha/rbac.proto b/api/envoy/config/rbac/v2alpha/rbac.proto index 19b61473496a8..9d3c2e7277d50 100644 --- a/api/envoy/config/rbac/v2alpha/rbac.proto +++ b/api/envoy/config/rbac/v2alpha/rbac.proto @@ -1,6 +1,7 @@ syntax = "proto3"; import "validate/validate.proto"; +import "gogoproto/gogo.proto"; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/route/route.proto"; import "envoy/type/matcher/metadata.proto"; @@ -13,6 +14,8 @@ option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.rbac.v2alpha"; option go_package = "v2alpha"; +option (gogoproto.stable_marshaler_all) = true; + // [#protodoc-title: Role Based Access Control (RBAC)] // Role Based Access Control (RBAC) provides service-level and method-level access control for a diff --git a/api/envoy/config/resource_monitor/fixed_heap/v2alpha/BUILD b/api/envoy/config/resource_monitor/fixed_heap/v2alpha/BUILD index a69da8c415267..363d90f11808f 100644 --- a/api/envoy/config/resource_monitor/fixed_heap/v2alpha/BUILD +++ b/api/envoy/config/resource_monitor/fixed_heap/v2alpha/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/resource_monitor/injected_resource/v2alpha/BUILD b/api/envoy/config/resource_monitor/injected_resource/v2alpha/BUILD index c5d6c49f3b778..10abf09e9ef8f 100644 --- a/api/envoy/config/resource_monitor/injected_resource/v2alpha/BUILD +++ b/api/envoy/config/resource_monitor/injected_resource/v2alpha/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/retry/previous_priorities/BUILD b/api/envoy/config/retry/previous_priorities/BUILD index 796993b78753b..13a694af37d2d 100644 --- a/api/envoy/config/retry/previous_priorities/BUILD +++ b/api/envoy/config/retry/previous_priorities/BUILD @@ -1,6 +1,6 @@ licenses(["notice"]) # Apache 2 -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") api_proto_library_internal( name = "previous_priorities", diff --git a/api/envoy/config/trace/v2/BUILD b/api/envoy/config/trace/v2/BUILD index 518395f230707..2afbb6f263252 100644 --- a/api/envoy/config/trace/v2/BUILD +++ b/api/envoy/config/trace/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/trace/v2/trace.proto b/api/envoy/config/trace/v2/trace.proto index 10c25df5a97f8..e21766b9a2a02 100644 --- a/api/envoy/config/trace/v2/trace.proto +++ b/api/envoy/config/trace/v2/trace.proto @@ -43,7 +43,7 @@ message Tracing { // - :ref:`DynamicOtConfig ` // - :ref:`DatadogConfig ` oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; + google.protobuf.Struct config = 2; google.protobuf.Any typed_config = 3; } @@ -58,7 +58,7 @@ message LightstepConfig { string collector_cluster = 1 [(validate.rules).string.min_bytes = 1]; // File containing the access token to the `LightStep - // `_ API. + // `_ API. string access_token_file = 2 [(validate.rules).string.min_bytes = 1]; } diff --git a/api/envoy/config/transport_socket/alts/v2alpha/BUILD b/api/envoy/config/transport_socket/alts/v2alpha/BUILD index 03934075485a9..6cb181f202d2e 100644 --- a/api/envoy/config/transport_socket/alts/v2alpha/BUILD +++ b/api/envoy/config/transport_socket/alts/v2alpha/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/transport_socket/tap/v2alpha/BUILD b/api/envoy/config/transport_socket/tap/v2alpha/BUILD index e6dcebe3f1848..75810cd0c2693 100644 --- a/api/envoy/config/transport_socket/tap/v2alpha/BUILD +++ b/api/envoy/config/transport_socket/tap/v2alpha/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/data/accesslog/v2/BUILD b/api/envoy/data/accesslog/v2/BUILD index 8ecfdd5b6d119..d3ade88e922f8 100644 --- a/api/envoy/data/accesslog/v2/BUILD +++ b/api/envoy/data/accesslog/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/data/accesslog/v2/accesslog.proto b/api/envoy/data/accesslog/v2/accesslog.proto index 3f52910573129..f8058dedc3462 100644 --- a/api/envoy/data/accesslog/v2/accesslog.proto +++ b/api/envoy/data/accesslog/v2/accesslog.proto @@ -15,6 +15,8 @@ import "google/protobuf/wrappers.proto"; import "gogoproto/gogo.proto"; import "validate/validate.proto"; +option (gogoproto.stable_marshaler_all) = true; + // [#protodoc-title: gRPC access logs] // Envoy access logs describe incoming interaction with Envoy over a fixed // period of time, and typically cover a single request/response exchange, @@ -67,7 +69,6 @@ message AccessLogCommon { // This field is the local/destination address on which the request from the user was received. envoy.api.v2.core.Address downstream_local_address = 3; - // [#not-implemented-hide:] // If the connection is secure,S this field will contain TLS properties. TLSProperties tls_properties = 4; @@ -135,6 +136,12 @@ message AccessLogCommon { // that ID in this field and cross reference later. It can also be used to // determine if a canary endpoint was used or not. envoy.api.v2.core.Metadata metadata = 17; + + // If upstream connection failed due to transport socket (e.g. TLS handshake), provides the + // failure reason from the transport socket. The format of this field depends on the configured + // upstream transport socket. Common TLS failures are in + // :ref:`TLS trouble shooting `. + string upstream_transport_failure_reason = 18; } // Flags indicating occurrences during request/response processing. @@ -197,11 +204,14 @@ message ResponseFlags { // Indicates that the upstream retry limit was exceeded, resulting in a downstream error. bool upstream_retry_limit_exceeded = 16; + + // Indicates that the stream idle timeout was hit, resulting in a downstream 408. + bool stream_idle_timeout = 17; } -// [#not-implemented-hide:] // Properties of a negotiated TLS connection. message TLSProperties { + // [#not-implemented-hide:] enum TLSVersion { VERSION_UNSPECIFIED = 0; TLSv1 = 1; @@ -209,9 +219,11 @@ message TLSProperties { TLSv1_2 = 3; TLSv1_3 = 4; } + // [#not-implemented-hide:] // Version of TLS that was negotiated. TLSVersion tls_version = 1; + // [#not-implemented-hide:] // TLS cipher suite negotiated during handshake. The value is a // four-digit hex code defined by the IANA TLS Cipher Suite Registry // (e.g. ``009C`` for ``TLS_RSA_WITH_AES_128_GCM_SHA256``). @@ -221,6 +233,28 @@ message TLSProperties { // SNI hostname from handshake. string tls_sni_hostname = 3; + + message CertificateProperties { + message SubjectAltName { + oneof san { + string uri = 1; + // [#not-implemented-hide:] + string dns = 2; + } + } + + // SANs present in the certificate. + repeated SubjectAltName subject_alt_name = 1; + + // The subject field of the certificate. + string subject = 2; + } + + // Properties of the local certificate used to negotiate TLS. + CertificateProperties local_certificate_properties = 4; + + // Properties of the peer certificate used to negotiate TLS. + CertificateProperties peer_certificate_properties = 5; } message HTTPRequestProperties { @@ -298,4 +332,7 @@ message HTTPResponseProperties { // Map of trailers configured to be logged. map response_trailers = 5; + + // The HTTP response code details. + string response_code_details = 6; } diff --git a/api/envoy/data/cluster/v2alpha/BUILD b/api/envoy/data/cluster/v2alpha/BUILD index 5017649f9b0e1..00edd8294b6f2 100644 --- a/api/envoy/data/cluster/v2alpha/BUILD +++ b/api/envoy/data/cluster/v2alpha/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/data/core/v2alpha/BUILD b/api/envoy/data/core/v2alpha/BUILD index 740e4304cca72..8320031d8466f 100644 --- a/api/envoy/data/core/v2alpha/BUILD +++ b/api/envoy/data/core/v2alpha/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/data/tap/v2alpha/BUILD b/api/envoy/data/tap/v2alpha/BUILD index 2aecd2db76653..90cd317006d59 100644 --- a/api/envoy/data/tap/v2alpha/BUILD +++ b/api/envoy/data/tap/v2alpha/BUILD @@ -1,17 +1,28 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 +api_proto_library_internal( + name = "common", + srcs = ["common.proto"], +) + api_proto_library_internal( name = "transport", srcs = ["transport.proto"], - deps = ["//envoy/api/v2/core:address"], + deps = [ + ":common", + "//envoy/api/v2/core:address", + ], ) api_proto_library_internal( name = "http", srcs = ["http.proto"], - deps = ["//envoy/api/v2/core:base"], + deps = [ + ":common", + "//envoy/api/v2/core:base", + ], ) api_proto_library_internal( diff --git a/api/envoy/data/tap/v2alpha/common.proto b/api/envoy/data/tap/v2alpha/common.proto new file mode 100644 index 0000000000000..d913311842bb5 --- /dev/null +++ b/api/envoy/data/tap/v2alpha/common.proto @@ -0,0 +1,31 @@ +syntax = "proto3"; + +package envoy.data.tap.v2alpha; + +option java_outer_classname = "CommonProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.data.tap.v2alpha"; + +// [#protodoc-title: Tap common data] + +// Wrapper for tapped body data. This includes HTTP request/response body, transport socket received +// and transmitted data, etc. +message Body { + oneof body_type { + // Body data as bytes. By default, tap body data will be present in this field, as the proto + // `bytes` type can contain any valid byte. + bytes as_bytes = 1; + + // Body data as string. This field is only used when the :ref:`JSON_BODY_AS_STRING + // ` sink + // format type is selected. See the documentation for that option for why this is useful. + string as_string = 2; + } + + // Specifies whether body data has been truncated to fit within the specified + // :ref:`max_buffered_rx_bytes + // ` and + // :ref:`max_buffered_tx_bytes + // ` settings. + bool truncated = 3; +} diff --git a/api/envoy/data/tap/v2alpha/http.proto b/api/envoy/data/tap/v2alpha/http.proto index 253f1ad9e04e6..3e04e1c892537 100644 --- a/api/envoy/data/tap/v2alpha/http.proto +++ b/api/envoy/data/tap/v2alpha/http.proto @@ -7,6 +7,7 @@ option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.data.tap.v2alpha"; import "envoy/api/v2/core/base.proto"; +import "envoy/data/tap/v2alpha/common.proto"; // [#protodoc-title: HTTP tap data] @@ -17,8 +18,11 @@ message HttpBufferedTrace { // Message headers. repeated api.v2.core.HeaderValue headers = 1; + // Message body. + Body body = 2; + // Message trailers. - repeated api.v2.core.HeaderValue trailers = 2; + repeated api.v2.core.HeaderValue trailers = 3; } // Request message. @@ -27,3 +31,30 @@ message HttpBufferedTrace { // Response message. Message response = 2; } + +// A streamed HTTP trace segment. Multiple segments make up a full trace. +message HttpStreamedTraceSegment { + // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used + // for long term stable uniqueness. + uint64 trace_id = 1; + + oneof message_piece { + // Request headers. + api.v2.core.HeaderMap request_headers = 2; + + // Request body chunk. + Body request_body_chunk = 3; + + // Request trailers. + api.v2.core.HeaderMap request_trailers = 4; + + // Response headers. + api.v2.core.HeaderMap response_headers = 5; + + // Response body chunk. + Body response_body_chunk = 6; + + // Response trailers. + api.v2.core.HeaderMap response_trailers = 7; + } +} diff --git a/api/envoy/data/tap/v2alpha/transport.proto b/api/envoy/data/tap/v2alpha/transport.proto index 87e4b8d26018c..3b8c244b9baea 100644 --- a/api/envoy/data/tap/v2alpha/transport.proto +++ b/api/envoy/data/tap/v2alpha/transport.proto @@ -12,16 +12,15 @@ option java_package = "io.envoyproxy.envoy.data.tap.v2alpha"; option go_package = "v2"; import "envoy/api/v2/core/address.proto"; +import "envoy/data/tap/v2alpha/common.proto"; import "google/protobuf/timestamp.proto"; // Connection properties. message Connection { - // Global unique connection ID for Envoy session. Matches connection IDs used - // in Envoy logs. - uint64 id = 1; // Local address. envoy.api.v2.core.Address local_address = 2; + // Remote address. envoy.api.v2.core.Address remote_address = 3; } @@ -30,32 +29,69 @@ message Connection { message SocketEvent { // Timestamp for event. google.protobuf.Timestamp timestamp = 1; + // Data read by Envoy from the transport socket. message Read { // Binary data read. - bytes data = 1; + Body data = 1; + // TODO(htuch): Half-close for reads. } + // Data written by Envoy to the transport socket. message Write { // Binary data written. - bytes data = 1; + Body data = 1; + // Stream was half closed after this write. bool end_stream = 2; } + + // The connection was closed. + message Closed { + // TODO(mattklein123): Close event type. + } + // Read or write with content as bytes string. oneof event_selector { Read read = 2; Write write = 3; + Closed closed = 4; } } -// Sequence of read/write events that constitute a trace on a socket. -// Multiple Trace messages might be emitted for a given connection ID, with the -// sink (e.g. file set, network) responsible for later reassembly. -message SocketTrace { +// Sequence of read/write events that constitute a buffered trace on a socket. +message SocketBufferedTrace { + // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used + // for long term stable uniqueness. Matches connection IDs used in Envoy logs. + uint64 trace_id = 1; + // Connection properties. - Connection connection = 1; + Connection connection = 2; + // Sequence of observed events. - repeated SocketEvent events = 2; + repeated SocketEvent events = 3; + + // Set to true if read events were truncated due to the :ref:`max_buffered_rx_bytes + // ` setting. + bool read_truncated = 4; + + // Set to true if write events were truncated due to the :ref:`max_buffered_tx_bytes + // ` setting. + bool write_truncated = 5; +} + +// A streamed socket trace segment. Multiple segments make up a full trace. +message SocketStreamedTraceSegment { + // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used + // for long term stable uniqueness. Matches connection IDs used in Envoy logs. + uint64 trace_id = 1; + + oneof message_piece { + // Connection properties. + Connection connection = 2; + + // Socket event. + SocketEvent event = 3; + } } diff --git a/api/envoy/data/tap/v2alpha/wrapper.proto b/api/envoy/data/tap/v2alpha/wrapper.proto index f3c3c9a169964..a49cd3189b4d5 100644 --- a/api/envoy/data/tap/v2alpha/wrapper.proto +++ b/api/envoy/data/tap/v2alpha/wrapper.proto @@ -13,16 +13,22 @@ option java_package = "io.envoyproxy.envoy.data.tap.v2alpha"; // [#protodoc-title: Tap data wrappers] -// Wrapper for all fully buffered tap traces that Envoy emits. This is required for sending traces -// over gRPC APIs or more easily persisting binary messages to files. -message BufferedTraceWrapper { +// Wrapper for all fully buffered and streamed tap traces that Envoy emits. This is required for +// sending traces over gRPC APIs or more easily persisting binary messages to files. +message TraceWrapper { oneof trace { option (validate.required) = true; // An HTTP buffered tap trace. HttpBufferedTrace http_buffered_trace = 1; - // A buffered socket tap trace. - SocketTrace socket_buffered_trace = 2; + // An HTTP streamed tap trace segment. + HttpStreamedTraceSegment http_streamed_trace_segment = 2; + + // A socket buffered tap trace. + SocketBufferedTrace socket_buffered_trace = 3; + + // A socket streamed tap trace segment. + SocketStreamedTraceSegment socket_streamed_trace_segment = 4; } } diff --git a/api/envoy/service/accesslog/v2/BUILD b/api/envoy/service/accesslog/v2/BUILD index e6e389e22a02f..1dad9447048d5 100644 --- a/api/envoy/service/accesslog/v2/BUILD +++ b/api/envoy/service/accesslog/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/service/auth/v2/BUILD b/api/envoy/service/auth/v2/BUILD index 5faba48ac3dbc..57041668ddc8e 100644 --- a/api/envoy/service/auth/v2/BUILD +++ b/api/envoy/service/auth/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") licenses(["notice"]) # Apache 2 @@ -9,6 +9,7 @@ api_proto_library_internal( ], deps = [ "//envoy/api/v2/core:address", + "//envoy/api/v2/core:base", ], ) @@ -18,6 +19,7 @@ api_proto_library_internal( "external_auth.proto", ], has_services = 1, + visibility = ["//visibility:public"], deps = [ ":attribute_context", "//envoy/api/v2/core:base", diff --git a/api/envoy/service/auth/v2/attribute_context.proto b/api/envoy/service/auth/v2/attribute_context.proto index ed0dd33b7c749..f5b723e7b6331 100644 --- a/api/envoy/service/auth/v2/attribute_context.proto +++ b/api/envoy/service/auth/v2/attribute_context.proto @@ -9,6 +9,9 @@ option java_package = "io.envoyproxy.envoy.service.auth.v2"; import "envoy/api/v2/core/address.proto"; import "google/protobuf/timestamp.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.stable_marshaler_all) = true; // [#protodoc-title: Attribute Context ] @@ -83,7 +86,8 @@ message AttributeContext { // lowercased, because HTTP header keys are case-insensitive. map headers = 3; - // The HTTP URL path. + // The request target, as it appears in the first line of the HTTP request. This includes + // the URL path and query-string. No decoding is performed. string path = 4; // The HTTP request `Host` or 'Authority` header value. @@ -92,19 +96,25 @@ message AttributeContext { // The HTTP URL scheme, such as `http` and `https`. string scheme = 6; - // The HTTP URL query in the format of `name1=value`&name2=value2`, as it - // appears in the first line of the HTTP request. No decoding is performed. + // This field is always empty, and exists for compatibility reasons. The HTTP URL query is + // included in `path` field. string query = 7; - // The HTTP URL fragment, excluding leading `#`. No URL decoding is performed. + // This field is always empty, and exists for compatibility reasons. The URL fragment is + // not submitted as part of HTTP requests; it is unknowable. string fragment = 8; // The HTTP request size in bytes. If unknown, it must be -1. int64 size = 9; - // The network protocol used with the request, such as - // "http/1.1", "spdy/3", "h2", "h2c" + // The network protocol used with the request, such as "HTTP/1.0", "HTTP/1.1", or "HTTP/2". + // + // See :repo:`headers.h:ProtocolStrings ` for a list of all + // possible values. string protocol = 10; + + // The HTTP request body. + string body = 11; } // The source of a network activity, such as starting a TCP connection. diff --git a/api/envoy/service/auth/v2/external_auth.proto b/api/envoy/service/auth/v2/external_auth.proto index c08046f532815..0f723c98e46c2 100644 --- a/api/envoy/service/auth/v2/external_auth.proto +++ b/api/envoy/service/auth/v2/external_auth.proto @@ -40,11 +40,11 @@ message DeniedHttpResponse { envoy.type.HttpStatus status = 1 [(validate.rules).message.required = true]; // This field allows the authorization service to send HTTP response headers - // to the the downstream client. + // to the downstream client. repeated envoy.api.v2.core.HeaderValueOption headers = 2; // This field allows the authorization service to send a response body data - // to the the downstream client. + // to the downstream client. string body = 3; } diff --git a/api/envoy/service/auth/v2alpha/BUILD b/api/envoy/service/auth/v2alpha/BUILD new file mode 100644 index 0000000000000..1d9873a5ffa43 --- /dev/null +++ b/api/envoy/service/auth/v2alpha/BUILD @@ -0,0 +1,12 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "external_auth", + srcs = [ + "external_auth.proto", + ], + has_services = 1, + deps = ["//envoy/service/auth/v2:external_auth"], +) diff --git a/api/envoy/service/auth/v2alpha/external_auth.proto b/api/envoy/service/auth/v2alpha/external_auth.proto new file mode 100644 index 0000000000000..bdf0d2e4853d0 --- /dev/null +++ b/api/envoy/service/auth/v2alpha/external_auth.proto @@ -0,0 +1,25 @@ +syntax = "proto3"; + +package envoy.service.auth.v2alpha; + +option go_package = "v2alpha"; + +option java_multiple_files = true; +option java_generic_services = true; +option java_outer_classname = "CertsProto"; +option java_package = "io.envoyproxy.envoy.service.auth.v2alpha"; + +import "envoy/service/auth/v2/external_auth.proto"; + +// [#protodoc-title: Authorization Service ] + +// The authorization service request messages used by external authorization :ref:`network filter +// ` and :ref:`HTTP filter `. + +// A generic interface for performing authorization check on incoming +// requests to a networked service. +service Authorization { + // Performs authorization check based on the attributes associated with the + // incoming request, and returns status `OK` or not `OK`. + rpc Check(v2.CheckRequest) returns (v2.CheckResponse); +} diff --git a/api/envoy/service/discovery/v2/BUILD b/api/envoy/service/discovery/v2/BUILD index ac652cf1859a4..33392fa67f86f 100644 --- a/api/envoy/service/discovery/v2/BUILD +++ b/api/envoy/service/discovery/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/service/discovery/v2/ads.proto b/api/envoy/service/discovery/v2/ads.proto index 73f272191bd01..6a9d044ab4bdd 100644 --- a/api/envoy/service/discovery/v2/ads.proto +++ b/api/envoy/service/discovery/v2/ads.proto @@ -32,7 +32,7 @@ service AggregatedDiscoveryService { returns (stream envoy.api.v2.DiscoveryResponse) { } - rpc IncrementalAggregatedResources(stream envoy.api.v2.IncrementalDiscoveryRequest) - returns (stream envoy.api.v2.IncrementalDiscoveryResponse) { + rpc DeltaAggregatedResources(stream envoy.api.v2.DeltaDiscoveryRequest) + returns (stream envoy.api.v2.DeltaDiscoveryResponse) { } } diff --git a/api/envoy/service/load_stats/v2/BUILD b/api/envoy/service/load_stats/v2/BUILD index 66294100bf701..f126ebcb1d448 100644 --- a/api/envoy/service/load_stats/v2/BUILD +++ b/api/envoy/service/load_stats/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/service/metrics/v2/BUILD b/api/envoy/service/metrics/v2/BUILD index 6d14bfe414796..7f3921ced6294 100644 --- a/api/envoy/service/metrics/v2/BUILD +++ b/api/envoy/service/metrics/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/service/ratelimit/v2/BUILD b/api/envoy/service/ratelimit/v2/BUILD index d0e114ebdbecd..24278fbebc1f9 100644 --- a/api/envoy/service/ratelimit/v2/BUILD +++ b/api/envoy/service/ratelimit/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/service/tap/v2alpha/BUILD b/api/envoy/service/tap/v2alpha/BUILD index 0d46bdac25a8c..e2e67d5d7d78a 100644 --- a/api/envoy/service/tap/v2alpha/BUILD +++ b/api/envoy/service/tap/v2alpha/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/service/tap/v2alpha/common.proto b/api/envoy/service/tap/v2alpha/common.proto index b330b6c96ea31..c078e2a93a6a3 100644 --- a/api/envoy/service/tap/v2alpha/common.proto +++ b/api/envoy/service/tap/v2alpha/common.proto @@ -2,6 +2,8 @@ syntax = "proto3"; import "envoy/api/v2/route/route.proto"; +import "google/protobuf/wrappers.proto"; + import "validate/validate.proto"; package envoy.service.tap.v2alpha; @@ -77,11 +79,70 @@ message OutputConfig { // sink types are supported this constraint will be relaxed. repeated OutputSink sinks = 1 [(validate.rules).repeated = {min_items: 1, max_items: 1}]; - // [#comment:TODO(mattklein123): Output filtering. E.g., certain headers, truncated body, etc.] + // For buffered tapping, the maximum amount of received body that will be buffered prior to + // truncation. If truncation occurs, the :ref:`truncated + // ` field will be set. If not specified, the + // default is 1KiB. + google.protobuf.UInt32Value max_buffered_rx_bytes = 2; + + // For buffered tapping, the maximum amount of transmitted body that will be buffered prior to + // truncation. If truncation occurs, the :ref:`truncated + // ` field will be set. If not specified, the + // default is 1KiB. + google.protobuf.UInt32Value max_buffered_tx_bytes = 3; + + // Indicates whether taps produce a single buffered message per tap, or multiple streamed + // messages per tap in the emitted :ref:`TraceWrapper + // ` messages. Note that streamed tapping does not + // mean that no buffering takes place. Buffering may be required if data is processed before a + // match can be determined. See the HTTP tap filter :ref:`streaming + // ` documentation for more information. + bool streaming = 4; } // Tap output sink configuration. message OutputSink { + // Output format. All output is in the form of one or more :ref:`TraceWrapper + // ` messages. This enumeration indicates + // how those messages are written. Note that not all sinks support all output formats. See + // individual sink documentation for more information. + enum Format { + // Each message will be written as JSON. Any :ref:`body ` + // data will be present in the :ref:`as_bytes + // ` field. This means that body data will be + // base64 encoded as per the `proto3 JSON mappings + // `_. + JSON_BODY_AS_BYTES = 0; + + // Each message will be written as JSON. Any :ref:`body ` + // data will be present in the :ref:`as_string + // ` field. This means that body data will be + // string encoded as per the `proto3 JSON mappings + // `_. This format type is + // useful when it is known that that body is human readable (e.g., JSON over HTTP) and the + // user wishes to view it directly without being forced to base64 decode the body. + JSON_BODY_AS_STRING = 1; + + // Binary proto format. Note that binary proto is not self-delimiting. If a sink writes + // multiple binary messages without any length information the data stream will not be + // useful. However, for certain sinks that are self-delimiting (e.g., one message per file) + // this output format makes consumption simpler. + PROTO_BINARY = 2; + + // Messages are written as a sequence tuples, where each tuple is the message length encoded + // as a `protobuf 32-bit varint + // `_ + // followed by the binary message. The messages can be read back using the language specific + // protobuf coded stream implementation to obtain the message length and the message. + PROTO_BINARY_LENGTH_DELIMITED = 3; + + // Text proto format. + PROTO_TEXT = 4; + } + + // Sink output format. + Format format = 1 [(validate.rules).enum.defined_only = true]; + oneof output_sink_type { option (validate.required) = true; @@ -93,10 +154,10 @@ message OutputSink { // configured from the :http:post:`/tap` admin endpoint. Thus, if an extension has // been configured to receive tap configuration from some other source (e.g., static // file, XDS, etc.) configuring the streaming admin output type will fail. - StreamingAdminSink streaming_admin = 1; + StreamingAdminSink streaming_admin = 2; // Tap output will be written to a file per tap sink. - FilePerTapSink file_per_tap = 2; + FilePerTapSink file_per_tap = 3; } } @@ -110,13 +171,4 @@ message FilePerTapSink { // identifier distinguishing the recorded trace for stream instances (the Envoy // connection ID, HTTP stream ID, etc.). string path_prefix = 1 [(validate.rules).string.min_bytes = 1]; - - // File format. - enum Format { - // Binary proto format. - PROTO_BINARY = 0; - // Text proto format. - PROTO_TEXT = 1; - } - Format format = 2 [(validate.rules).enum.defined_only = true]; } diff --git a/api/envoy/service/trace/v2/BUILD b/api/envoy/service/trace/v2/BUILD index 49c935f12938d..245dc85af6a8d 100644 --- a/api/envoy/service/trace/v2/BUILD +++ b/api/envoy/service/trace/v2/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/type/BUILD b/api/envoy/type/BUILD index 150e226517b50..97f0fd424f363 100644 --- a/api/envoy/type/BUILD +++ b/api/envoy/type/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/type/http_status.proto b/api/envoy/type/http_status.proto index 9d9a6230c29bb..3f89ada09776b 100644 --- a/api/envoy/type/http_status.proto +++ b/api/envoy/type/http_status.proto @@ -9,7 +9,7 @@ option java_package = "io.envoyproxy.envoy.type"; import "validate/validate.proto"; // HTTP response codes supported in Envoy. -// For more details: http://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml +// For more details: https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml enum StatusCode { // Empty - This code not part of the HTTP status code specification, but it is needed for proto // `enum` type. diff --git a/api/envoy/type/matcher/BUILD b/api/envoy/type/matcher/BUILD index 2ace9de1c2328..ec4aa09b6c63c 100644 --- a/api/envoy/type/matcher/BUILD +++ b/api/envoy/type/matcher/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/type/matcher/string.proto b/api/envoy/type/matcher/string.proto index 02077522d8255..55f2171af53e0 100644 --- a/api/envoy/type/matcher/string.proto +++ b/api/envoy/type/matcher/string.proto @@ -41,7 +41,7 @@ message StringMatcher { // The input string must match the regular expression specified here. // The regex grammar is defined `here - // `_. + // `_. // // Examples: // diff --git a/api/test/build/BUILD b/api/test/build/BUILD index fae9de9c5bc9c..3260fd4181470 100644 --- a/api/test/build/BUILD +++ b/api/test/build/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_cc_test", "api_go_test") +load("@envoy_api//bazel:api_build_system.bzl", "api_cc_test", "api_go_test") licenses(["notice"]) # Apache 2 diff --git a/api/test/build/go_build_test.go b/api/test/build/go_build_test.go index d667a1a11daab..911d3ef39655f 100644 --- a/api/test/build/go_build_test.go +++ b/api/test/build/go_build_test.go @@ -4,17 +4,17 @@ import ( "testing" _ "github.com/envoyproxy/data-plane-api/api/ads" + _ "github.com/envoyproxy/data-plane-api/api/als" _ "github.com/envoyproxy/data-plane-api/api/bootstrap" _ "github.com/envoyproxy/data-plane-api/api/cds" _ "github.com/envoyproxy/data-plane-api/api/cert" _ "github.com/envoyproxy/data-plane-api/api/eds" _ "github.com/envoyproxy/data-plane-api/api/hds" _ "github.com/envoyproxy/data-plane-api/api/lds" + _ "github.com/envoyproxy/data-plane-api/api/metrics_service" _ "github.com/envoyproxy/data-plane-api/api/rds" _ "github.com/envoyproxy/data-plane-api/api/rls" _ "github.com/envoyproxy/data-plane-api/api/sds" - _ "github.com/envoyproxy/data-plane-api/api/als" - _ "github.com/envoyproxy/data-plane-api/api/metrics_service" _ "github.com/envoyproxy/data-plane-api/api/trace_service" ) diff --git a/api/test/validate/BUILD b/api/test/validate/BUILD index 2c98249c78859..0df5bf38177e3 100644 --- a/api/test/validate/BUILD +++ b/api/test/validate/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_cc_test", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_cc_test", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/test/validate/pgv_test.cc b/api/test/validate/pgv_test.cc index 464ad3d7f1e48..afb750833893e 100644 --- a/api/test/validate/pgv_test.cc +++ b/api/test/validate/pgv_test.cc @@ -54,7 +54,7 @@ template struct TestCase { // from data plane API. int main(int argc, char* argv[]) { envoy::config::bootstrap::v2::Bootstrap invalid_bootstrap; - invalid_bootstrap.mutable_runtime(); + invalid_bootstrap.mutable_static_resources()->add_clusters(); // This is a baseline test of the validation features we care about. It's // probably not worth adding in every filter and field that we want to valid // in the API upfront, but as regressions occur, this is the place to add the diff --git a/api/tools/data/tap2pcap_h2_ipv4.pb_text b/api/tools/data/tap2pcap_h2_ipv4.pb_text index 459b12c2467a5..19b3282bb52e9 100644 --- a/api/tools/data/tap2pcap_h2_ipv4.pb_text +++ b/api/tools/data/tap2pcap_h2_ipv4.pb_text @@ -1,70 +1,82 @@ socket_buffered_trace { -connection { - local_address { - socket_address { - address: "127.0.0.1" - port_value: 10000 + connection { + local_address { + socket_address { + address: "127.0.0.1" + port_value: 10000 + } } - } - remote_address { - socket_address { - address: "127.0.0.1" - port_value: 53288 + remote_address { + socket_address { + address: "127.0.0.1" + port_value: 53288 + } } } -} -events { - timestamp { - seconds: 1525207293 - nanos: 216737962 - } - read { - data: "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n\000\000\022\004\000\000\000\000\000\000\003\000\000\000d\000\004@\000\000\000\000\002\000\000\000\000\000\000\004\010\000\000\000\000\000?\377\000\001\000\000\036\001\005\000\000\000\001\202\204\206A\212\240\344\035\023\235\t\270\020\000\000z\210%\266P\303\253\266\362\340S\003*/*" - } -} -events { - timestamp { - seconds: 1525207293 - nanos: 230450657 - } - write { - data: "\000\000\006\004\000\000\000\000\000\000\004\020\000\000\000\000\000\000\004\001\000\000\000\000\000\000\004\010\000\000\000\000\000\017\377\000\001" - } -} -events { - timestamp { - seconds: 1525207293 - nanos: 230558250 - } - read { - data: "\000\000\000\004\001\000\000\000\000" - } -} -events { - timestamp { - seconds: 1525207293 - nanos: 345386933 + events { + timestamp { + seconds: 1525207293 + nanos: 216737962 + } + read { + data: { + as_bytes: "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n\000\000\022\004\000\000\000\000\000\000\003\000\000\000d\000\004@\000\000\000\000\002\000\000\000\000\000\000\004\010\000\000\000\000\000?\377\000\001\000\000\036\001\005\000\000\000\001\202\204\206A\212\240\344\035\023\235\t\270\020\000\000z\210%\266P\303\253\266\362\340S\003*/*" + } + } } - write { - data: "\000\025\223\001\004\000\000\000\001\210@\217\362\264\307<\324\025d\025\0101\352X\325J\177\211\3056\316p\232l\371!\301\000\216\362\264\307<\324\025b\371\254\266\032\222\324\237\377\277 \023n6\357\320\200\027]o\350@\013\300s\350@\013\302\177\351\326\302\333\241\372u\2612\363\237Ae\260\205\327>\202\313b\003\301\372\013-\211\226\333\372\013-\211\226\335\372\013-\211\267\033\372\013-\211\366[\372\013-\211\367\001\364\026[d\017\271\364\026[d/=\364\026[dL\271\364\026[e\221;\364\026[e\267\031\372\013-\262\353N\375\005\226\331x\014\375\005\226\331x\r}\005\226\331x \372\013-\262\360^\372\013-\262\373\200\372\013-\262\373\354\375\005\226\332\003\356}\005\226\332\013\216\375\005\226\332\013\316\375\005\226\332\020\232\372\013-\264&\203\364\026[hN\273\364\026[i\220;\364\026[i\226\231\372\013-\264\350\003\364\026[i\326\335\372\013-\264\360[\372\013-\264\363m}\005\226\332|-\375\005\226\332}\347~\202\313m\201\227~\202\313m\210\"\372\013-\266\'\032\372\013-\266\'\303\364\026[m\246\335\372\013-\266\330]\372\013-\266\333\356\375\005\226\333m\367\376\202\313m\270\340\276\202\313m\272\340~\202\313m\272\350~\202\313m\272\363?Ae\266\335y\377\240\262\333o8\017\240\262\333o\211\237\240\262\333\200\020>\202\313n\000L\375\005\226\334\003N}\005\226\334\003\257\375\005\226\334\010^\372\013-\270\027\201\364\026[p/\013\350,\266\340\234o\350,\266\343 o\350,\266\343L\271\364\026[q\246\201\364\026[q\300\207\350,\266\343\201o\350,\266\343\201w\350,\266\343\201\177\350,\266\343\216\003\350,\266\343\217\273\364\026[q\3215\364\026[q\326\235\372\013-\270\353\257\375\005\226\334u\367>\202\313n<\333?Ae\267\036u\377\240\262\333\217\200?Ae\267\037i\377\240\262\333\240e\317\240\262\333\240x_Ae\267B\313\337Ae\267B\320\276\202\313n\205\307\276\202\313n\205\347>\202\313n\210\017\375\005\226\335\020_\372\013-\272\313m\375\005\226\335h\017\375\005\226\335h!\372\013-\272\320\203\364\026[u\247\237\372\013-\272\343\301\372\013-\272\363-}\005\226\335|.\375\005\226\335}\227~\202\313n\276\343?Ae\267_}\237\240\262\333\300q\257\240\262\333\300t?Ae\267\200\353?Ae\267\200\370~\202\313o\004\017}\005\226\336\010C\364\026[x!s\350,\266\360\204\357\320Ym\3416\037\240\262\333\302}\237\240\262\333\314\205\377\240\262\333\314\272\377\320Ym\346\200\017\240\262\333\315\005\257\240\262\333\315\010?Ae\267\232\023\177Ae\267\234\020~\202\313o8\313\337Ae\267\234h_Ae\267\234}\377\240\262\333\317>\017\240\262\333\317\205\377\240\262\333\340\013\377Ae\267\300\313\337Ae\267\304\360\276\204\330\002&\335\372h\002e\240\277\364\320\004\373\301o\351\2402\020\201\377\246\200\310B\343_M\001\226\231d?M\001\226\236h\037M\001\227\237}\237\246\200\320\002\313\177M\001\246B\370>\232\003L\264\370\276\232\003M\274\320~\232\003N\270\360\276\232\003O2\323\277M\001\261:\340~\232\003m\262\343\177M\001\267\032p\037M\001\267\202\323\337M\001\267\336u\317\246\200\340\237y\277\246\200\343\254\270\327\323@q\346\337\027\323@q\360>\317\323@q\360>\337\323@q\367\031k\351\2408\373\214\271\364\320\034}\347\034\372h\016\204\016\203\351\240:\026\\\007\323@t.\274/\246\200\353o\005\317\246\200\353\217\274\347\323@u\360\t\357\246\200\353\342\003\237M\001\327\334m\317\246\200\353\3568\037\246\200\353\3568\377\323@x\014\211\257\246\200\360\033\013\337M\001\3406&\276\232\003\301\003/}4\007\204 \277\364\320\036d-;\364\320\036e\366\301\364\320\036i\240\263\364\320\036i\2417\364\320\036i\320\273\364\320\036m\327\201\364\320\036q\367]\372h\017>\026\305\364\320\037\000\202/\246\200\370\031\010>\232\003\355\276\020}4\007\334\023\315\3754\007\334\023\316}4\007\334y\306~\232\003\356>\323\337M\001\367B\333?M\001\367Zq\377\246\200\373\257\210_M\001\367_\023\377M\001\367\202\320>\232\003\3574\340>\232\003\3578\373\377M\001\367\235\020~\232\003\357:\'\276\232\010\000!9\364\320@\021\004?M\004\014\205\366~\232\010\031i\340\3754\0204\363b\372h m\226\\\372h m\247\303\364\320@\343-;\364\320@\343\2179\364\320@\343\317\203\351\240\201\326\302\337\323A\003\255\262\017\246\202\007_\013_M\004\017<\006\376\232\010\036y\227~\232\010\037d.}4\020>\323\340\372h!\013\340k\351\240\204/\266\337\323A\010_p?M\004-4\343\277M\004-6\353_M\004-6\373\277M\004-\211\340\3754\020\266\320A\364\320B\333\217\273\364\320B\343 o\351\240\205\306@\357\323A\013\241p?M\004.\205\320\3754\020\272\330Y\372h!u\360\265\364\320B\360\037\027\323A\013\315\264\367\323A\013\340d_M\004/\201\226\276\232\010_\003.}4\020\276\006\201\364\320B\370\035k\351\240\205\366\331g\351\240\205\366\336k\351\240\210\014\262/\246\202 6\373?M\004@q\300}4\021\004.\263\364\320D\020\272\337\323A\020Zi\277\246\202 \264\373?M\004Al/}4\021\005\307\334\372h\"\020\236\027\323A\023.4\037\246\202&\204\323\177M\004M2\370~\232\010\232}\340\3754\0216\323\257}4\0216\363.\3754\0216\373\242\372h\"p@\007\323A\023\202\023\337M\004N:\333_M\004N\201\347>\232\010\235\023L\3754\021:\310\032\372h\"u\2207\364\320D\353A{\351\240\211\326\302\337\323A\023\301\013\277M\004O\010-}4\021< \271\364\320D\363-\265\364\320D\363@\027\323A\023\315\272\017\246\202\'\233y\317\246\202\'\333\020>\232\010\237q\226~\232\010\237u\367>\232\013 \020\232\372h,\201\227\234\372h,\201\267\001\364\320Y\003n?\364\320Y\010B\327\323Ad.2/\246\202\310]e\337\246\202\310^e\257\246\202\310\200\320~\232\013\"\013\355\3754\026D\320^\372h,\211\246\205\364\320Y\023o9\364\320Y\023\217\275\364\320Y\023\317\275\364\320Yd.\013\351\240\262\313-\277\364\320Ye\240\275\364\320Ye\247\003\364\320Ye\260?\364\320Ye\306\\\372h,\262\343/}4\026Yy\266\276\232\013-2\343?M\005\226\232\023\337M\005\226\233q\237\246\202\313`\023\177M\005\226\300\363\337M\005\226\304\323\377M\005\226\332m\337\246\202\313m8\037\246\202\313m\272\347\323Ae\300>/\246\202\313\201e\277\246\202\313\201m\277\246\202\313\201|_M\005\227Z\013\177M\005\227Zq\257\246\202\313\255\205\237\246\202\313\255\276\357\323Ae\327D\357\323Ae\346\233o\351\240\262\363\341k\351\240\262\363\357\007\351\240\262\373L\271\364\320Z\000\002/\246\202\320\000&~\232\013@\003@\372h-\000M3\364\320Z\003\216\267\364\320Z\003\240k\351\240\264\007Yg\351\240\264\007\333\177\351\240\264\0204\377\323Ah,\262/\246\202\320Yi\317\246\202\320Ze\337\246\202\320[\020~\232\013Am\326\376\232\013Am\347\276\232\013Aq\346~\232\013At\016\3754\026\202\370[\372h-\005\366\331\372h-\t\221=\364\320Z\023\314\265\364\320Z\023\340w\351\240\264\310@\317\323Ai\220\205\237\246\202\323\"u\357\246\202\323,\274\337\323Ai\226\200\377\323Ai\227\033g\351\240\264\313\256\275\364\320Ze\327\205\364\320Ze\327\231\372h-2\370\005\364\320Ze\367\035\372h-4&Y\372h-4\310\233\372h-4\330\235\372h-4\333L\3754\026\232p\002\372h-4\340_\372h-4\353B\372h-6\020?\364\320Zm\247\003\364\320Zm\247\233\372h-6\330\005\364\320Zm\327E\364\320Zm\346\\\372h-8\007^\372h-8\007\201\364\320Zp-9\364\320Zp/3\364\320Zq\246\201\364\320Zq\246\205\364\320Zq\247]\372h-8\343!\372h-8\343\316\3754\026\234}\366\376\232\013N\201\267~\232\013N\264\373\337M\005\247_\000>\232\013N\276!}4\026\236\000]\372h-<\007\003\364\320Zx\016;\364\320ZxL\207\351\240\264\360\234{\351\240\264\363 k\351\240\264\363\"s\351\240\264\363\"{\351\240\264\363\"\177\351\240\264\363,\213\351\240\264\363O\275\364\320Zy\261\003\351\240\264\363b\027\323Ai\346\335g\351\240\264\363\255=\364\320Zy\347Y\372h-<\373b\372h->\000\273\364\320Z|\r\275\364\320Z|L\277\364\320Z}\226\303\364\320Z}\266\331\372h->\333\340\372h->\333\341\372h->\333\355\3754\026\237m\367\276\232\013O\270\'\376\232\013O\270\363?M\005\247\336\003\377M\005\247\336\013?M\005\247\336d\037M\005\247\337\013?M\005\260\000.\3754\026\300\006C\364\320[\000\037\007\323Al\r\005\257\246\202\330\032l?M\005\260<\007>\232\013`x.}4\026\302\'\034\372h-\204O\267\364\320[\010\237\177\351\240\266\026_\017\323Al.<\357\323Al.\266/\246\202\330]m\357\246\202\330^m\317\246\202\330^q\277\246\202\330^u\277\246\202\330_\013\377M\005\261\001\247\376\232\013b\013N\3754\026\304\333`\372h-\211\327\201\364\320[\023\341o\351\240\266\'\302\357\323AlO\266\017\246\202\333 i\377\246\202\333 q\357\246\202\333\"\000~\232\013l\210B\372h-\262\'^\372h-\262\313B\372h-\262\320\205\364\320[e\2605\364\320[e\301=\364\320[e\306\332\372h-\262\350\\\372h-\262\373b\372h-\262\373n\3754\026\332\000\201\364\320[h\016\213\351\240\266\320\036\017\323Am\246@\367\323Am\246[k\351\240\266\323\355\277\364\320[l,\277\364\320[l/\265\364\320[m\260\267\364\320[m\267\035\372h-\266\333\217\3754\026\333q\366\376\232\013m\272!}4\026\333u\220}4\026\333u\320}4\026\334\003O}4\026\334\003\356\3754\026\334\010\032\372h-\270\0207\364\320[p w\351\240\266\340Yw\351\240\266\340^\017\323Am\3014\327\323Am\306\202\337\323Am\307\033s\351\240\266\343\240k\351\240\266\343\240s\351\240\266\343\257\003\351\240\266\343\3173\364\320[t\016\207\351\240\266\350Y\177\351\240\266\353-\013\351\240\266\353-\203\351\240\266\353/\007\351\240\266\353/3\364\320[u\2405\364\320[u\306\336\372h-\272\350Z\372h-\272\353\201\372h-\272\353\356}4\026\335u\367\376\232\013n\274\000\3754\026\335y\227~\232\013n\274\363\277M\005\267_\023\377M\005\267\200\313\177M\005\267\200\343_M\005\267\202\373?M\005\267\231\003\177M\005\267\231\003\377M\005\267\234m\337\246\202\333\316\211\257\246\202\333\3176\327\323Am\347\304\017\246\202\333\355\272\017\246\202\333\356\t\277\246\202\333\3564/\246\202\333\356>\317\323Am\367]\007\323Am\367]g\351\240\266\373\256\273\364\320[}\360\013\351\240\266\373\355\007\351\240\266\373\356\273\364\320\\\000[\177\351\240\270\001\010\037M\005\3002\027>\232\013\200h\017}4\027\000\320_\372h.\001\2413\364\320\\\003N3\364\320\\\003O\213\351\240\270\007\304\317\323Ap\017\211\277\246\202\340\037i\317\246\202\340\037y\357\246\202\340@\007~\232\013\201\013\316\3754\027\002\313o}4\027\002\320Y\372h.\005\247\203\364\320\\\013O7\364\320\\\013O?\364\320\\\013a\027\323Ap-\211\317\246\202\340[i\357\246\202\340[|_M\005\300\266\373\177M\005\300\272\373_M\005\300\274\353\237M\005\300\276\027\276\232\013\201}\240\3754\027\004\006\331\372h.\010\017\003\351\240\270 \270/\246\202\340\204\'>\232\013\202\020\236\372h.\t\226C\364\320\\\023A\007\323ApM6\317\323ApM\204_M\005\3018 \3754\027\004\343\201\372h.\t\307\234\372h.2\007\034\372h.2\007\035\372h.2\026\237\372h.2 \013\351\240\270\313-9\364\320\\e\226\236\372h.2\313`\372h.2\313\357\3754\027\031l\017}4\027\031}\247\276\232\013\2152\313\337M\005\306\231q\377\246\202\343M:\017\246\202\343M<\037\246\202\343N\211\357\246\202\343N\270\327\323Aq\247\336\017\323Aq\247\337k\351\240\270\330\002\347\323Aq\2604\037\246\202\343`l?M\005\306\302\350~\232\013\215\210\r}4\027\033\023/}4\027\033\023`\372h.6\310\001\364\320\\m\220\263\364\320\\m\221?\364\320\\m\227\305\364\320\\m\247_\372h.6\330\037\372h.6\340\003\364\320\\m\3003\364\320\\m\300\277\364\320\\m\326\305\364\320\\m\327\205\364\320\\m\340?\364\320\\m\347Y\372h.6\363\255\3754\027\033y\327\276\232\013\215\276\006\376\232\013\215\276\007\276\232\013\216\000N\3754\027\034\020\034\372h.8\'\337\372h.8\333\315}4\027\034q\346\276\232\013\216:\373\337M\005\307\036d_M\005\307B\027\276\232\013\216\205\226~\232\013\216\205\240\3754\027\035\013L\3754\027\035\013M\3754\027\035\013\355}4\027\035\023-}4\027\035\023\357\3754\027\035e\326~\232\013\216\262\373?M\005\307Y}\277\246\202\343\255\005\257\246\202\343\255\205\257\246\202\343\256\266\367\323Aq\327\204\327\323Aq\327\231g\351\240\270\353\315\013\351\240\270\360\032w\351\240\270\360\032{\351\240\270\360\033\007\323Aq\3408\017\246\202\343\301y\277\246\202\343\301|?M\005\307\231}\237\246\202\343\315\205\277\246\202\343\315\266\037\246\202\343\315\266/\246\202\343\315\274/\246\202\343\316\201\257\246\202\343\342h\037M\005\307\304\353\277M\005\307\304\363_M\005\307\333}\357\246\202\343\356\001\337\246\202\343\356\270\367\323Aq\367\234o\351\240\270\373\354\273\364\320\\}\366^\372h.\200\r?\364\320]\000\034\177\351\240\272\000\264\017\246\202\350\002\333\277M\005\3204\323?M\005\320<\340~\232\013\240y\327\376\232\013\240|M\3754\027@\373\254\3758\330D\343\355}8\330D\360\001\364\343ad\r\267\364\343ae\267\301\364\343ae\301\007\351\346@\006\202\317\323\314\200\r<\327\323\314\200\r>\327\323\314\200\r>\357\323\314\200\r\270\347\320@ \000\0173\364\020\010\001}\267>\202\001\000@|\037A\000\200 \266\317\320@ \010M\013\350 \020\004\310\034\372\010\004\0012\310>\202\001\000M4\337\320@ \t\246\335\372\010\004\0016\313\237A\000\200&\332g\350 \020\004\333\217\375\004\002\000\233u\317\240\200@\023n\275\364\020\010\002p@\372\010\004\0018\'\276\202\001\000N2\357\320@ \t\306_\372\010\004\0018\333\177A\000\200\'\034w\350 \020\004\343\256\375\004\002\000\234u\357\240\262\330\000\000\273\364\026\336\003\256\270\327\320\\\020\000\026\\\372\013\202\000\002\353\337Ap@\000\204\357\320_\000\000\'\236\372\013\340\000\032\023?A|\000\003Bw\350/\200\001}\367\376\202\370\000&\332{\350/\200\002y\340}\005\360\001\226D\037\240\276\0002\310\231\372\013\340\003,\211\277\240\276\0002\310\234\372\013\340\003,\211\357\240\276\0002\310\237\372\013\340\003,\262\017\240\276\0002\320\035\372\013\340\003-\001\357\240\276\0002\320\037\372\013\340\003L\201\377\240\276\0004\330\\\372\013\340\003M\205\337\240\276\0004\330^\372\013\340\003M\205\377\240\276\0004\330\201\364\027\300\006\233\020~\202\370\000\323\317\213\350/\200\r>\000\375\005\360\001\261\001\277\240\276\0006 9\364\027\300\006\304\026\276\202\370\000\330\202\337\320_\000\033\020]\372\013\340\003b\013\337A|\000lA\177\350/\200\r\270\343\177A|\000m\307E\364\027\300\006\335e\317\240\276\0006\353M}4\026B\350[oa\226\337i~\224\000T\320?J\010\001yA\002\343A\270\313*b\321\277d\002-1X\215\256\303w\032K\364\245#\362\260\346,\000_\226I|\245\211\323M\037j\022q\330\202\246\014\233\265,\363\315\276\260\177@\230\362\264\307<\324\025i\245*\321\214\235KT\213X^\326\225\tX\325J\177\224)\244\202)/\237\225\203\361\203\261\223\026\301\372\232\274M_\361@\224\362\264\307<\324\025i\274!h\315P\354\364\267r\330\203\036\257\207\013\355\005\246\\m\357@\003p3p\257\275\256\017\347|\346B\206B\225\035*\rMl\353R\263\320bz\376\024\334R\2512\344;\025\263\\\345\242\265%=\212R{\n\241\252\224\353\377?@\236\362\264\307<\324\025i\245*\304\266\313\013RV\260\275\255*\022\261\016\204\255-\207\245i\274#\204\013K\264\017@\217\362\264\307<\324\025i\006\221\255\334\266 \307\253\207\013\355>\333\302m\277v\204-]\317\353@\217\362\264\307<\324\025j\212\232OR\324\0162\321\240b:\220\307k\030\214\366L\307k\030\216\24417\204-]\207\221\211\274 +\240\266w+\016\274\017@\214\362\267\224!j\354:JD\230\365\177\212\017\332\224\236B\301\035\007\'_@\213\362\264\266\016\222\254z\322c\324\217\211\335\016\214\032\266\344\305\223O@\223\362\264\307<\324\025i\245*\326\027\265\245BVM\203!\177\303\031)\350\027\2564\323?]\254\242\240\267q\367\231k\351\210\352C\035\254b3\3313\035\254b:\220\304\336\020\265v\036F&\360\200\256\202\331\334\254:\360>\273YEAn\343\3572\327\320\311O@\275q\246\231@\217\362\264\307<\324\025i\221Dk \266w1\013\003web\017(\300\016\270\262\303\266\001\000/,\006\326\000V\020>\324/\232\315aQ\006\371\355\372Q\220\255\240~\226\020\002\362\202\005\306\203q\226T\305\243\177\332\225\2153\300\307\332\222\036\221\232\250\027\230\347\232\202\256C\323\017(\377\'\323\222\374\001\023\360\037\036L\272\274\305D\276\254\237/\237yd\333\370\224\003\036\275\035w\352\247N\276\306j\251\346\240\277\226\001\313\315\343[\266I\315E\355\274\031\330W,\246\350*\375\351\230\217\342\277\336\034ZC\2435\323BO\323\311\023fK\306\316\314m;\0179\331\313\223\006\216\267\253\353\217e\373\037\305\2012\343\272/\034)\333]\267r\375\250_5\232\302\242\016E\223\351FB\266\242%a\000/( \\h7\031eLZ7\375\251X\323<\014}\251!\351\031\252\201y\216y\250*\344=?jcJk\325U\036\277@\215\362\264\307<\324\025h\306N\245\252D\177\320b:\220\307k\030\214\366L\307k\030\216\24417\204-]\207\221\211\274 +\240\266w+\016\274\017\256\326QP[\270\373\314\265\364\304u!\216\3261\031\354\231\216\3261\035Hbo\010Z\273\017#\023x@W\232R\260\363\237C%=\002\365\306\232g@\222\362\264\307<\324\025i\016\205\220[;\230\205Y6\014\205\207\244f\252\344\347\244\277@\214\362\264\307<\324\025d\026\316\346!\177\207\361\343\307\324\347\244\277@\234\362\264\307<\324\025i\245*\326\025\025\236\244\025b\036B\255!R3P\205\223`\310_\250\260\250\254\365 \261\020\362\026$\0251G\352(\306N\245\252O\253\n\212\317R\013\021\017!bAS\024~\242\214d\352Z\244@\236\362\264\307<\324\025i\245*\326\025\025\236\244\025h\317\'\245\223`\352D\247\262\221\244\307\251\037\215\232\332\275\232\272\313\'\321\'\266\256\245\223@\222\362\264\307<\324\025dNZ(\224\310\235d$i\265\037\207\275\010&\273\202\037_@\205\035\tY\035\311\354\237\264\037\315\306\232g\371\373R\221\300&\337\020\000\017\265;Zb@\330Y\003-2\317\332\235\2551 l,\201\226\231\027\332\235\2551 l,\201\226\231\017\332\235\2551 l,\201\226Y\177\355N\326\230\2206\026@\313,\267\365\332\323\022\017\346\343M3\374\375\251H\340\023o\210\000\007\332\235\340\376Zg\351\241}4\037\246_\372e\277\347@\223\362\264\307<\324\025i\245*\310-\235\314B\254\233\006B\377\207\361\343\307\324\347\244\277R\203\250\365\027{\213\204\204-i[\005D<\206\252o@\225\362\261j\356\177K[Z\023aGJ\310-\235\314B\254\223R_\202\010Z" + events { + timestamp { + seconds: 1525207293 + nanos: 230450657 + } + write { + data: { + as_bytes: "\000\000\006\004\000\000\000\000\000\000\004\020\000\000\000\000\000\000\004\001\000\000\000\000\000\000\004\010\000\000\000\000\000\017\377\000\001" + } + } } -} -events { - timestamp { - seconds: 1525207293 - nanos: 346744029 + events { + timestamp { + seconds: 1525207293 + nanos: 230558250 + } + read { + data: { + as_bytes: "\000\000\000\004\001\000\000\000\000" + } + } } - write { - data: "\000\035V\000\000\000\000\000\001Google

\"Google\"

 

\202\313b\003\301\372\013-\211\226\333\372\013-\211\226\335\372\013-\211\267\033\372\013-\211\366[\372\013-\211\367\001\364\026[d\017\271\364\026[d/=\364\026[dL\271\364\026[e\221;\364\026[e\267\031\372\013-\262\353N\375\005\226\331x\014\375\005\226\331x\r}\005\226\331x \372\013-\262\360^\372\013-\262\373\200\372\013-\262\373\354\375\005\226\332\003\356}\005\226\332\013\216\375\005\226\332\013\316\375\005\226\332\020\232\372\013-\264&\203\364\026[hN\273\364\026[i\220;\364\026[i\226\231\372\013-\264\350\003\364\026[i\326\335\372\013-\264\360[\372\013-\264\363m}\005\226\332|-\375\005\226\332}\347~\202\313m\201\227~\202\313m\210\"\372\013-\266\'\032\372\013-\266\'\303\364\026[m\246\335\372\013-\266\330]\372\013-\266\333\356\375\005\226\333m\367\376\202\313m\270\340\276\202\313m\272\340~\202\313m\272\350~\202\313m\272\363?Ae\266\335y\377\240\262\333o8\017\240\262\333o\211\237\240\262\333\200\020>\202\313n\000L\375\005\226\334\003N}\005\226\334\003\257\375\005\226\334\010^\372\013-\270\027\201\364\026[p/\013\350,\266\340\234o\350,\266\343 o\350,\266\343L\271\364\026[q\246\201\364\026[q\300\207\350,\266\343\201o\350,\266\343\201w\350,\266\343\201\177\350,\266\343\216\003\350,\266\343\217\273\364\026[q\3215\364\026[q\326\235\372\013-\270\353\257\375\005\226\334u\367>\202\313n<\333?Ae\267\036u\377\240\262\333\217\200?Ae\267\037i\377\240\262\333\240e\317\240\262\333\240x_Ae\267B\313\337Ae\267B\320\276\202\313n\205\307\276\202\313n\205\347>\202\313n\210\017\375\005\226\335\020_\372\013-\272\313m\375\005\226\335h\017\375\005\226\335h!\372\013-\272\320\203\364\026[u\247\237\372\013-\272\343\301\372\013-\272\363-}\005\226\335|.\375\005\226\335}\227~\202\313n\276\343?Ae\267_}\237\240\262\333\300q\257\240\262\333\300t?Ae\267\200\353?Ae\267\200\370~\202\313o\004\017}\005\226\336\010C\364\026[x!s\350,\266\360\204\357\320Ym\3416\037\240\262\333\302}\237\240\262\333\314\205\377\240\262\333\314\272\377\320Ym\346\200\017\240\262\333\315\005\257\240\262\333\315\010?Ae\267\232\023\177Ae\267\234\020~\202\313o8\313\337Ae\267\234h_Ae\267\234}\377\240\262\333\317>\017\240\262\333\317\205\377\240\262\333\340\013\377Ae\267\300\313\337Ae\267\304\360\276\204\330\002&\335\372h\002e\240\277\364\320\004\373\301o\351\2402\020\201\377\246\200\310B\343_M\001\226\231d?M\001\226\236h\037M\001\227\237}\237\246\200\320\002\313\177M\001\246B\370>\232\003L\264\370\276\232\003M\274\320~\232\003N\270\360\276\232\003O2\323\277M\001\261:\340~\232\003m\262\343\177M\001\267\032p\037M\001\267\202\323\337M\001\267\336u\317\246\200\340\237y\277\246\200\343\254\270\327\323@q\346\337\027\323@q\360>\317\323@q\360>\337\323@q\367\031k\351\2408\373\214\271\364\320\034}\347\034\372h\016\204\016\203\351\240:\026\\\007\323@t.\274/\246\200\353o\005\317\246\200\353\217\274\347\323@u\360\t\357\246\200\353\342\003\237M\001\327\334m\317\246\200\353\3568\037\246\200\353\3568\377\323@x\014\211\257\246\200\360\033\013\337M\001\3406&\276\232\003\301\003/}4\007\204 \277\364\320\036d-;\364\320\036e\366\301\364\320\036i\240\263\364\320\036i\2417\364\320\036i\320\273\364\320\036m\327\201\364\320\036q\367]\372h\017>\026\305\364\320\037\000\202/\246\200\370\031\010>\232\003\355\276\020}4\007\334\023\315\3754\007\334\023\316}4\007\334y\306~\232\003\356>\323\337M\001\367B\333?M\001\367Zq\377\246\200\373\257\210_M\001\367_\023\377M\001\367\202\320>\232\003\3574\340>\232\003\3578\373\377M\001\367\235\020~\232\003\357:\'\276\232\010\000!9\364\320@\021\004?M\004\014\205\366~\232\010\031i\340\3754\0204\363b\372h m\226\\\372h m\247\303\364\320@\343-;\364\320@\343\2179\364\320@\343\317\203\351\240\201\326\302\337\323A\003\255\262\017\246\202\007_\013_M\004\017<\006\376\232\010\036y\227~\232\010\037d.}4\020>\323\340\372h!\013\340k\351\240\204/\266\337\323A\010_p?M\004-4\343\277M\004-6\353_M\004-6\373\277M\004-\211\340\3754\020\266\320A\364\320B\333\217\273\364\320B\343 o\351\240\205\306@\357\323A\013\241p?M\004.\205\320\3754\020\272\330Y\372h!u\360\265\364\320B\360\037\027\323A\013\315\264\367\323A\013\340d_M\004/\201\226\276\232\010_\003.}4\020\276\006\201\364\320B\370\035k\351\240\205\366\331g\351\240\205\366\336k\351\240\210\014\262/\246\202 6\373?M\004@q\300}4\021\004.\263\364\320D\020\272\337\323A\020Zi\277\246\202 \264\373?M\004Al/}4\021\005\307\334\372h\"\020\236\027\323A\023.4\037\246\202&\204\323\177M\004M2\370~\232\010\232}\340\3754\0216\323\257}4\0216\363.\3754\0216\373\242\372h\"p@\007\323A\023\202\023\337M\004N:\333_M\004N\201\347>\232\010\235\023L\3754\021:\310\032\372h\"u\2207\364\320D\353A{\351\240\211\326\302\337\323A\023\301\013\277M\004O\010-}4\021< \271\364\320D\363-\265\364\320D\363@\027\323A\023\315\272\017\246\202\'\233y\317\246\202\'\333\020>\232\010\237q\226~\232\010\237u\367>\232\013 \020\232\372h,\201\227\234\372h,\201\267\001\364\320Y\003n?\364\320Y\010B\327\323Ad.2/\246\202\310]e\337\246\202\310^e\257\246\202\310\200\320~\232\013\"\013\355\3754\026D\320^\372h,\211\246\205\364\320Y\023o9\364\320Y\023\217\275\364\320Y\023\317\275\364\320Yd.\013\351\240\262\313-\277\364\320Ye\240\275\364\320Ye\247\003\364\320Ye\260?\364\320Ye\306\\\372h,\262\343/}4\026Yy\266\276\232\013-2\343?M\005\226\232\023\337M\005\226\233q\237\246\202\313`\023\177M\005\226\300\363\337M\005\226\304\323\377M\005\226\332m\337\246\202\313m8\037\246\202\313m\272\347\323Ae\300>/\246\202\313\201e\277\246\202\313\201m\277\246\202\313\201|_M\005\227Z\013\177M\005\227Zq\257\246\202\313\255\205\237\246\202\313\255\276\357\323Ae\327D\357\323Ae\346\233o\351\240\262\363\341k\351\240\262\363\357\007\351\240\262\373L\271\364\320Z\000\002/\246\202\320\000&~\232\013@\003@\372h-\000M3\364\320Z\003\216\267\364\320Z\003\240k\351\240\264\007Yg\351\240\264\007\333\177\351\240\264\0204\377\323Ah,\262/\246\202\320Yi\317\246\202\320Ze\337\246\202\320[\020~\232\013Am\326\376\232\013Am\347\276\232\013Aq\346~\232\013At\016\3754\026\202\370[\372h-\005\366\331\372h-\t\221=\364\320Z\023\314\265\364\320Z\023\340w\351\240\264\310@\317\323Ai\220\205\237\246\202\323\"u\357\246\202\323,\274\337\323Ai\226\200\377\323Ai\227\033g\351\240\264\313\256\275\364\320Ze\327\205\364\320Ze\327\231\372h-2\370\005\364\320Ze\367\035\372h-4&Y\372h-4\310\233\372h-4\330\235\372h-4\333L\3754\026\232p\002\372h-4\340_\372h-4\353B\372h-6\020?\364\320Zm\247\003\364\320Zm\247\233\372h-6\330\005\364\320Zm\327E\364\320Zm\346\\\372h-8\007^\372h-8\007\201\364\320Zp-9\364\320Zp/3\364\320Zq\246\201\364\320Zq\246\205\364\320Zq\247]\372h-8\343!\372h-8\343\316\3754\026\234}\366\376\232\013N\201\267~\232\013N\264\373\337M\005\247_\000>\232\013N\276!}4\026\236\000]\372h-<\007\003\364\320Zx\016;\364\320ZxL\207\351\240\264\360\234{\351\240\264\363 k\351\240\264\363\"s\351\240\264\363\"{\351\240\264\363\"\177\351\240\264\363,\213\351\240\264\363O\275\364\320Zy\261\003\351\240\264\363b\027\323Ai\346\335g\351\240\264\363\255=\364\320Zy\347Y\372h-<\373b\372h->\000\273\364\320Z|\r\275\364\320Z|L\277\364\320Z}\226\303\364\320Z}\266\331\372h->\333\340\372h->\333\341\372h->\333\355\3754\026\237m\367\276\232\013O\270\'\376\232\013O\270\363?M\005\247\336\003\377M\005\247\336\013?M\005\247\336d\037M\005\247\337\013?M\005\260\000.\3754\026\300\006C\364\320[\000\037\007\323Al\r\005\257\246\202\330\032l?M\005\260<\007>\232\013`x.}4\026\302\'\034\372h-\204O\267\364\320[\010\237\177\351\240\266\026_\017\323Al.<\357\323Al.\266/\246\202\330]m\357\246\202\330^m\317\246\202\330^q\277\246\202\330^u\277\246\202\330_\013\377M\005\261\001\247\376\232\013b\013N\3754\026\304\333`\372h-\211\327\201\364\320[\023\341o\351\240\266\'\302\357\323AlO\266\017\246\202\333 i\377\246\202\333 q\357\246\202\333\"\000~\232\013l\210B\372h-\262\'^\372h-\262\313B\372h-\262\320\205\364\320[e\2605\364\320[e\301=\364\320[e\306\332\372h-\262\350\\\372h-\262\373b\372h-\262\373n\3754\026\332\000\201\364\320[h\016\213\351\240\266\320\036\017\323Am\246@\367\323Am\246[k\351\240\266\323\355\277\364\320[l,\277\364\320[l/\265\364\320[m\260\267\364\320[m\267\035\372h-\266\333\217\3754\026\333q\366\376\232\013m\272!}4\026\333u\220}4\026\333u\320}4\026\334\003O}4\026\334\003\356\3754\026\334\010\032\372h-\270\0207\364\320[p w\351\240\266\340Yw\351\240\266\340^\017\323Am\3014\327\323Am\306\202\337\323Am\307\033s\351\240\266\343\240k\351\240\266\343\240s\351\240\266\343\257\003\351\240\266\343\3173\364\320[t\016\207\351\240\266\350Y\177\351\240\266\353-\013\351\240\266\353-\203\351\240\266\353/\007\351\240\266\353/3\364\320[u\2405\364\320[u\306\336\372h-\272\350Z\372h-\272\353\201\372h-\272\353\356}4\026\335u\367\376\232\013n\274\000\3754\026\335y\227~\232\013n\274\363\277M\005\267_\023\377M\005\267\200\313\177M\005\267\200\343_M\005\267\202\373?M\005\267\231\003\177M\005\267\231\003\377M\005\267\234m\337\246\202\333\316\211\257\246\202\333\3176\327\323Am\347\304\017\246\202\333\355\272\017\246\202\333\356\t\277\246\202\333\3564/\246\202\333\356>\317\323Am\367]\007\323Am\367]g\351\240\266\373\256\273\364\320[}\360\013\351\240\266\373\355\007\351\240\266\373\356\273\364\320\\\000[\177\351\240\270\001\010\037M\005\3002\027>\232\013\200h\017}4\027\000\320_\372h.\001\2413\364\320\\\003N3\364\320\\\003O\213\351\240\270\007\304\317\323Ap\017\211\277\246\202\340\037i\317\246\202\340\037y\357\246\202\340@\007~\232\013\201\013\316\3754\027\002\313o}4\027\002\320Y\372h.\005\247\203\364\320\\\013O7\364\320\\\013O?\364\320\\\013a\027\323Ap-\211\317\246\202\340[i\357\246\202\340[|_M\005\300\266\373\177M\005\300\272\373_M\005\300\274\353\237M\005\300\276\027\276\232\013\201}\240\3754\027\004\006\331\372h.\010\017\003\351\240\270 \270/\246\202\340\204\'>\232\013\202\020\236\372h.\t\226C\364\320\\\023A\007\323ApM6\317\323ApM\204_M\005\3018 \3754\027\004\343\201\372h.\t\307\234\372h.2\007\034\372h.2\007\035\372h.2\026\237\372h.2 \013\351\240\270\313-9\364\320\\e\226\236\372h.2\313`\372h.2\313\357\3754\027\031l\017}4\027\031}\247\276\232\013\2152\313\337M\005\306\231q\377\246\202\343M:\017\246\202\343M<\037\246\202\343N\211\357\246\202\343N\270\327\323Aq\247\336\017\323Aq\247\337k\351\240\270\330\002\347\323Aq\2604\037\246\202\343`l?M\005\306\302\350~\232\013\215\210\r}4\027\033\023/}4\027\033\023`\372h.6\310\001\364\320\\m\220\263\364\320\\m\221?\364\320\\m\227\305\364\320\\m\247_\372h.6\330\037\372h.6\340\003\364\320\\m\3003\364\320\\m\300\277\364\320\\m\326\305\364\320\\m\327\205\364\320\\m\340?\364\320\\m\347Y\372h.6\363\255\3754\027\033y\327\276\232\013\215\276\006\376\232\013\215\276\007\276\232\013\216\000N\3754\027\034\020\034\372h.8\'\337\372h.8\333\315}4\027\034q\346\276\232\013\216:\373\337M\005\307\036d_M\005\307B\027\276\232\013\216\205\226~\232\013\216\205\240\3754\027\035\013L\3754\027\035\013M\3754\027\035\013\355}4\027\035\023-}4\027\035\023\357\3754\027\035e\326~\232\013\216\262\373?M\005\307Y}\277\246\202\343\255\005\257\246\202\343\255\205\257\246\202\343\256\266\367\323Aq\327\204\327\323Aq\327\231g\351\240\270\353\315\013\351\240\270\360\032w\351\240\270\360\032{\351\240\270\360\033\007\323Aq\3408\017\246\202\343\301y\277\246\202\343\301|?M\005\307\231}\237\246\202\343\315\205\277\246\202\343\315\266\037\246\202\343\315\266/\246\202\343\315\274/\246\202\343\316\201\257\246\202\343\342h\037M\005\307\304\353\277M\005\307\304\363_M\005\307\333}\357\246\202\343\356\001\337\246\202\343\356\270\367\323Aq\367\234o\351\240\270\373\354\273\364\320\\}\366^\372h.\200\r?\364\320]\000\034\177\351\240\272\000\264\017\246\202\350\002\333\277M\005\3204\323?M\005\320<\340~\232\013\240y\327\376\232\013\240|M\3754\027@\373\254\3758\330D\343\355}8\330D\360\001\364\343ad\r\267\364\343ae\267\301\364\343ae\301\007\351\346@\006\202\317\323\314\200\r<\327\323\314\200\r>\327\323\314\200\r>\357\323\314\200\r\270\347\320@ \000\0173\364\020\010\001}\267>\202\001\000@|\037A\000\200 \266\317\320@ \010M\013\350 \020\004\310\034\372\010\004\0012\310>\202\001\000M4\337\320@ \t\246\335\372\010\004\0016\313\237A\000\200&\332g\350 \020\004\333\217\375\004\002\000\233u\317\240\200@\023n\275\364\020\010\002p@\372\010\004\0018\'\276\202\001\000N2\357\320@ \t\306_\372\010\004\0018\333\177A\000\200\'\034w\350 \020\004\343\256\375\004\002\000\234u\357\240\262\330\000\000\273\364\026\336\003\256\270\327\320\\\020\000\026\\\372\013\202\000\002\353\337Ap@\000\204\357\320_\000\000\'\236\372\013\340\000\032\023?A|\000\003Bw\350/\200\001}\367\376\202\370\000&\332{\350/\200\002y\340}\005\360\001\226D\037\240\276\0002\310\231\372\013\340\003,\211\277\240\276\0002\310\234\372\013\340\003,\211\357\240\276\0002\310\237\372\013\340\003,\262\017\240\276\0002\320\035\372\013\340\003-\001\357\240\276\0002\320\037\372\013\340\003L\201\377\240\276\0004\330\\\372\013\340\003M\205\337\240\276\0004\330^\372\013\340\003M\205\377\240\276\0004\330\201\364\027\300\006\233\020~\202\370\000\323\317\213\350/\200\r>\000\375\005\360\001\261\001\277\240\276\0006 9\364\027\300\006\304\026\276\202\370\000\330\202\337\320_\000\033\020]\372\013\340\003b\013\337A|\000lA\177\350/\200\r\270\343\177A|\000m\307E\364\027\300\006\335e\317\240\276\0006\353M}4\026B\350[oa\226\337i~\224\000T\320?J\010\001yA\002\343A\270\313*b\321\277d\002-1X\215\256\303w\032K\364\245#\362\260\346,\000_\226I|\245\211\323M\037j\022q\330\202\246\014\233\265,\363\315\276\260\177@\230\362\264\307<\324\025i\245*\321\214\235KT\213X^\326\225\tX\325J\177\224)\244\202)/\237\225\203\361\203\261\223\026\301\372\232\274M_\361@\224\362\264\307<\324\025i\274!h\315P\354\364\267r\330\203\036\257\207\013\355\005\246\\m\357@\003p3p\257\275\256\017\347|\346B\206B\225\035*\rMl\353R\263\320bz\376\024\334R\2512\344;\025\263\\\345\242\265%=\212R{\n\241\252\224\353\377?@\236\362\264\307<\324\025i\245*\304\266\313\013RV\260\275\255*\022\261\016\204\255-\207\245i\274#\204\013K\264\017@\217\362\264\307<\324\025i\006\221\255\334\266 \307\253\207\013\355>\333\302m\277v\204-]\317\353@\217\362\264\307<\324\025j\212\232OR\324\0162\321\240b:\220\307k\030\214\366L\307k\030\216\24417\204-]\207\221\211\274 +\240\266w+\016\274\017@\214\362\267\224!j\354:JD\230\365\177\212\017\332\224\236B\301\035\007\'_@\213\362\264\266\016\222\254z\322c\324\217\211\335\016\214\032\266\344\305\223O@\223\362\264\307<\324\025i\245*\326\027\265\245BVM\203!\177\303\031)\350\027\2564\323?]\254\242\240\267q\367\231k\351\210\352C\035\254b3\3313\035\254b:\220\304\336\020\265v\036F&\360\200\256\202\331\334\254:\360>\273YEAn\343\3572\327\320\311O@\275q\246\231@\217\362\264\307<\324\025i\221Dk \266w1\013\003web\017(\300\016\270\262\303\266\001\000/,\006\326\000V\020>\324/\232\315aQ\006\371\355\372Q\220\255\240~\226\020\002\362\202\005\306\203q\226T\305\243\177\332\225\2153\300\307\332\222\036\221\232\250\027\230\347\232\202\256C\323\017(\377\'\323\222\374\001\023\360\037\036L\272\274\305D\276\254\237/\237yd\333\370\224\003\036\275\035w\352\247N\276\306j\251\346\240\277\226\001\313\315\343[\266I\315E\355\274\031\330W,\246\350*\375\351\230\217\342\277\336\034ZC\2435\323BO\323\311\023fK\306\316\314m;\0179\331\313\223\006\216\267\253\353\217e\373\037\305\2012\343\272/\034)\333]\267r\375\250_5\232\302\242\016E\223\351FB\266\242%a\000/( \\h7\031eLZ7\375\251X\323<\014}\251!\351\031\252\201y\216y\250*\344=?jcJk\325U\036\277@\215\362\264\307<\324\025h\306N\245\252D\177\320b:\220\307k\030\214\366L\307k\030\216\24417\204-]\207\221\211\274 +\240\266w+\016\274\017\256\326QP[\270\373\314\265\364\304u!\216\3261\031\354\231\216\3261\035Hbo\010Z\273\017#\023x@W\232R\260\363\237C%=\002\365\306\232g@\222\362\264\307<\324\025i\016\205\220[;\230\205Y6\014\205\207\244f\252\344\347\244\277@\214\362\264\307<\324\025d\026\316\346!\177\207\361\343\307\324\347\244\277@\234\362\264\307<\324\025i\245*\326\025\025\236\244\025b\036B\255!R3P\205\223`\310_\250\260\250\254\365 \261\020\362\026$\0251G\352(\306N\245\252O\253\n\212\317R\013\021\017!bAS\024~\242\214d\352Z\244@\236\362\264\307<\324\025i\245*\326\025\025\236\244\025h\317\'\245\223`\352D\247\262\221\244\307\251\037\215\232\332\275\232\272\313\'\321\'\266\256\245\223@\222\362\264\307<\324\025dNZ(\224\310\235d$i\265\037\207\275\010&\273\202\037_@\205\035\tY\035\311\354\237\264\037\315\306\232g\371\373R\221\300&\337\020\000\017\265;Zb@\330Y\003-2\317\332\235\2551 l,\201\226\231\027\332\235\2551 l,\201\226\231\017\332\235\2551 l,\201\226Y\177\355N\326\230\2206\026@\313,\267\365\332\323\022\017\346\343M3\374\375\251H\340\023o\210\000\007\332\235\340\376Zg\351\241}4\037\246_\372e\277\347@\223\362\264\307<\324\025i\245*\310-\235\314B\254\233\006B\377\207\361\343\307\324\347\244\277R\203\250\365\027{\213\204\204-i[\005D<\206\252o@\225\362\261j\356\177K[Z\023aGJ\310-\235\314B\254\223R_\202\010Z" + } + } } -} -events { - timestamp { - seconds: 1525207293 - nanos: 347782371 + events { + timestamp { + seconds: 1525207293 + nanos: 346744029 + } + write { + data: { + as_bytes: "\000\035V\000\000\000\000\000\001Google

\"Google\"

 

Advanced searchLanguage tools

© 2018 - Privacy - Terms

\000\000\000\000\001\000\000\000\001" + events { + timestamp { + seconds: 1525207293 + nanos: 347782371 + } + write { + data: { + as_bytes: "\000\014\010\000\000\000\000\000\001\"ds\">
Advanced searchLanguage tools

© 2018 - Privacy - Terms

\000\000\000\000\001\000\000\000\001" + } + } } } -} diff --git a/api/tools/tap2pcap.py b/api/tools/tap2pcap.py index 6db378056e12d..29eeeb8cefed6 100644 --- a/api/tools/tap2pcap.py +++ b/api/tools/tap2pcap.py @@ -44,7 +44,7 @@ def DumpEvent(direction, timestamp, data): def Tap2Pcap(tap_path, pcap_path): - wrapper = wrapper_pb2.BufferedTraceWrapper() + wrapper = wrapper_pb2.TraceWrapper() if tap_path.endswith('.pb_text'): with open(tap_path, 'r') as f: text_format.Merge(f.read(), wrapper) @@ -61,9 +61,9 @@ def Tap2Pcap(tap_path, pcap_path): dumps = [] for event in trace.events: if event.HasField('read'): - dumps.append(DumpEvent('I', event.timestamp, event.read.data)) + dumps.append(DumpEvent('I', event.timestamp, event.read.data.as_bytes)) elif event.HasField('write'): - dumps.append(DumpEvent('O', event.timestamp, event.write.data)) + dumps.append(DumpEvent('O', event.timestamp, event.write.data.as_bytes)) ipv6 = False try: diff --git a/api/udpa/data/orca/v1/BUILD b/api/udpa/data/orca/v1/BUILD new file mode 100644 index 0000000000000..096ca28bac3b3 --- /dev/null +++ b/api/udpa/data/orca/v1/BUILD @@ -0,0 +1,16 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "orca_load_report", + srcs = ["orca_load_report.proto"], + visibility = [ + "//visibility:public", + ], +) + +api_go_proto_library( + name = "orca_load_report", + proto = ":orca_load_report", +) diff --git a/api/udpa/data/orca/v1/orca_load_report.proto b/api/udpa/data/orca/v1/orca_load_report.proto new file mode 100644 index 0000000000000..f33f11dda950f --- /dev/null +++ b/api/udpa/data/orca/v1/orca_load_report.proto @@ -0,0 +1,35 @@ +syntax = "proto3"; + +package udpa.data.orca.v1; + +option java_outer_classname = "OrcaLoadReportProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.udpa.data.orca.v1"; +option go_package = "v1"; + +import "validate/validate.proto"; + +// See section `ORCA load report format` of the design document in +// :ref:`https://github.com/envoyproxy/envoy/issues/6614`. + +message OrcaLoadReport { + // CPU utilization expressed as a fraction of available CPU resources. This + // should be derived from a sample or measurement taken during the request. + double cpu_utilization = 1 [(validate.rules).double.gte = 0, (validate.rules).double.lte = 1]; + + // Memory utilization expressed as a fraction of available memory + // resources. This should be derived from a sample or measurement taken + // during the request. + double mem_utilization = 2 [(validate.rules).double.gte = 0, (validate.rules).double.lte = 1]; + + // Total RPS being served by an endpoint. This should cover all services that an endpoint is + // responsible for. + uint64 rps = 3; + + // Application specific requests costs. Each value may be an absolute cost (e.g. + // 3487 bytes of storage) or utilization associated with the request, + // expressed as a fraction of total resources available. Utilization + // metrics should be derived from a sample or measurement taken + // during the request. + map request_cost_or_utilization = 4; +} diff --git a/api/udpa/service/orca/v1/BUILD b/api/udpa/service/orca/v1/BUILD new file mode 100644 index 0000000000000..72543e8092216 --- /dev/null +++ b/api/udpa/service/orca/v1/BUILD @@ -0,0 +1,20 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "orca", + srcs = ["orca.proto"], + has_services = 1, + deps = [ + "//udpa/data/orca/v1:orca_load_report", + ], +) + +api_go_grpc_library( + name = "orca", + proto = ":orca", + deps = [ + "//udpa/data/orca/v1:orca_load_report_go_proto", + ], +) diff --git a/api/udpa/service/orca/v1/orca.proto b/api/udpa/service/orca/v1/orca.proto new file mode 100644 index 0000000000000..87871d209a4cf --- /dev/null +++ b/api/udpa/service/orca/v1/orca.proto @@ -0,0 +1,38 @@ +syntax = "proto3"; + +package udpa.service.orca.v1; + +option java_outer_classname = "OrcaProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.udpa.service.orca.v1"; +option go_package = "v1"; + +import "udpa/data/orca/v1/orca_load_report.proto"; + +import "google/protobuf/duration.proto"; + +import "validate/validate.proto"; + +// See section `Out-of-band (OOB) reporting` of the design document in +// :ref:`https://github.com/envoyproxy/envoy/issues/6614`. + +// Out-of-band (OOB) load reporting service for the additional load reporting +// agent that does not sit in the request path. Reports are periodically sampled +// with sufficient frequency to provide temporal association with requests. +// OOB reporting compensates the limitation of in-band reporting in revealing +// costs for backends that do not provide a steady stream of telemetry such as +// long running stream operations and zero QPS services. This is a server +// streaming service, client needs to terminate current RPC and initiate +// a new call to change backend reporting frequency. +service OpenRcaService { + rpc StreamCoreMetrics(OrcaLoadReportRequest) returns (stream udpa.data.orca.v1.OrcaLoadReport); +} + +message OrcaLoadReportRequest { + // Interval for generating Open RCA core metric responses. + google.protobuf.Duration report_interval = 1; + // Request costs to collect. If this is empty, all known requests costs tracked by + // the load reporting agent will be returned. This provides an opportunity for + // the client to selectively obtain a subset of tracked costs. + repeated string request_cost_names = 2; +} diff --git a/api/wasm/cpp/BUILD b/api/wasm/cpp/BUILD new file mode 100644 index 0000000000000..3e49d8913994c --- /dev/null +++ b/api/wasm/cpp/BUILD @@ -0,0 +1,19 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "intrinsics_hdr", + hdrs = [ + "proxy_wasm_enums.h", + "proxy_wasm_externs.h", + "proxy_wasm_impl.h", + "proxy_wasm_intrinsics.h", + ], +) diff --git a/api/wasm/cpp/proxy_wasm_enums.h b/api/wasm/cpp/proxy_wasm_enums.h new file mode 100644 index 0000000000000..30ed55d27eb54 --- /dev/null +++ b/api/wasm/cpp/proxy_wasm_enums.h @@ -0,0 +1,60 @@ +/* + * Intrinsic enumerations available to WASM modules. + */ +// NOLINT(namespace-envoy) + +#pragma once + +enum class LogLevel : EnumType { trace, debug, info, warn, error, critical }; +enum class FilterHeadersStatus : EnumType { Continue = 0, StopIteration = 1 }; +enum class FilterMetadataStatus : EnumType { Continue = 0 }; +enum class FilterTrailersStatus : EnumType { Continue = 0, StopIteration = 1 }; +enum class FilterDataStatus : EnumType { + Continue = 0, + StopIterationAndBuffer = 1, + StopIterationAndWatermark = 2, + StopIterationNoBuffer = 3 +}; +enum class StreamType : EnumType { Request = 0, Response = 1 }; +enum class MetadataType : EnumType { + Request = 0, + Response = 1, + RequestRoute = 2, // Immutable + ResponseRoute = 3, // Immutable + Log = 4, // Immutable + Node = 5 // Immutable +}; +enum class HeaderMapType : EnumType { + RequestHeaders = 0, // During the onLog callback these are immutable + RequestTrailers = 1, // During the onLog callback these are immutable + ResponseHeaders = 2, // During the onLog callback these are immutable + ResponseTrailers = 3, // During the onLog callback these are immutable + GrpcCreateInitialMetadata = 4, + GrpcReceiveInitialMetadata = 5, // Immutable + GrpcReceiveTrailingMetadata = 6, // Immutable +}; +enum GrpcStatus : EnumType { + OK = 0, + CANCELLED = 1, + UNKNOWN = 2, + INVALID_ARGUMENT = 3, + DEADLINE_EXCEEDED = 4, + NOT_FOUND = 5, + ALREADY_EXISTS = 6, + PERMISSION_DENIED = 7, + UNAUTHENTICATED = 16, + RESOURCE_EXHAUSTED = 8, + FAILED_PRECONDITION = 9, + ABORTED = 10, + OUT_OF_RANGE = 11, + UNIMPLEMENTED = 12, + INTERNAL = 13, + UNAVAILABLE = 14, + DATA_LOSS = 15, + DO_NOT_USE = 0xFFFFFFFF +}; +enum class MetricType : EnumType { + Counter = 0, + Gauge = 1, + Histogram = 2, +}; diff --git a/api/wasm/cpp/proxy_wasm_externs.h b/api/wasm/cpp/proxy_wasm_externs.h new file mode 100644 index 0000000000000..9f9efe7c0a09a --- /dev/null +++ b/api/wasm/cpp/proxy_wasm_externs.h @@ -0,0 +1,120 @@ +/* + * Intrinsic functions available to WASM modules. + */ +// NOLINT(namespace-envoy) + +#pragma once + +#include "stddef.h" + +// clang-format off +/* + API Calls into the VM. + + extern "C" EMSCRIPTEN_KEEPALIVE void proxy_onConfigure(char* configuration, int size); + extern "C" EMSCRIPTEN_KEEPALIVE void proxy_onStart(); + extern "C" EMSCRIPTEN_KEEPALIVE int main(); // only called if proxy_onStart() is not available. + extern "C" EMSCRIPTEN_KEEPALIVE void proxy_onTick(); + extern "C" EMSCRIPTEN_KEEPALIVE void proxy_onCreate(uint32_t context_id); + extern "C" EMSCRIPTEN_KEEPALIVE void proxy_onRequestHeaders(uint32_t context_id); + extern "C" EMSCRIPTEN_KEEPALIVE void proxy_onRequestBody(uint32_t context_id, uint32_t body_buffer_length, uint32_t end_of_stream); + extern "C" EMSCRIPTEN_KEEPALIVE void proxy_onRequestTrailers(uint32_t context_id); + extern "C" EMSCRIPTEN_KEEPALIVE void proxy_onRequestMetadata(uint32_t context_id); + extern "C" EMSCRIPTEN_KEEPALIVE void proxy_onResponseHeaders(uint32_t context_id); + extern "C" EMSCRIPTEN_KEEPALIVE void proxy_onResponseBody(uint32_t context_id, uint32_t body_buffer_length, uint32_t end_of_stream); + extern "C" EMSCRIPTEN_KEEPALIVE void proxy_onResponseTrailers(uint32_t context_id); + extern "C" EMSCRIPTEN_KEEPALIVE void proxy_onResponseMetadata(uint32_t context_id); + extern "C" EMSCRIPTEN_KEEPALIVE void proxy_onHttpCallResponse(uint32_t context_id uint32_t token, uint32_t header_pairs_ptr, + uint32_t header_pairs_size, uint32_t body_ptr, uint32_t body_size, uint32_t trailer_pairs_ptr, uint32_t trailer_pairs_size): + // The stream has completed. + extern "C" EMSCRIPTEN_KEEPALIVE void proxy_onDone(uint32_t context_id); + // onLog occurs after onDone. + extern "C" EMSCRIPTEN_KEEPALIVE void proxy_onLog(uint32_t context_id); + // The Context in the proxy has been destroyed and no further calls will be coming. + extern "C" ENSCRIPTEN_KEEPALIVE void proxy_onDelete(uint32_t context_id); + extern "C" EMSCRIPTEN_KEEPALIVE proxy_onGrpcCreateInitialMetadata(uint32_t context_id, uint32_t token); + extern "C" EMSCRIPTEN_KEEPALIVE proxy_onGrpcReceiveInitialMetadata(uint32_t context_id, uint32_t token); + extern "C" EMSCRIPTEN_KEEPALIVE proxy_onGrpcTrailingMetadata(uint32_t context_id, uint32_t token); + extern "C" EMSCRIPTEN_KEEPALIVE proxy_onGrpcReceive(uint32_t context_id, uint32_t token, + uint32_t response_ptr, uint32_t response_size); + extern "C" EMSCRIPTEN_KEEPALIVE proxy_onGrpcClose(uint32_t context_id, uint32_t token, + uint32_t status_code, uint32_t status_message_ptr, uint32_t status_message_size); +*/ +// clang-format on + +// +// Low Level API. +// + +// Logging +extern "C" void proxy_log(LogLevel level, const char* logMessage, size_t messageSize); + +// Timer +extern "C" void proxy_setTickPeriodMilliseconds(uint32_t millisecond); +extern "C" uint64_t proxy_getCurrentTimeNanoseconds(); + +// Stream Info +extern "C" void proxy_getProtocol(StreamType type, const char** value_ptr, size_t* value_size); + +// Metadata +extern "C" void proxy_getMetadata(MetadataType type, const char* key_ptr, size_t key_size, + const char** value_ptr_ptr, size_t* value_size_ptr); +extern "C" void proxy_setMetadata(MetadataType type, const char* key_ptr, size_t key_size, + const char* value_ptr, size_t value_size); +extern "C" void proxy_getMetadataPairs(MetadataType type, const char** value_ptr, + size_t* value_size); +extern "C" void proxy_getMetadataStruct(MetadataType type, const char* name_ptr, size_t name_size, + const char** value_ptr_ptr, size_t* value_size_ptr); +extern "C" void proxy_setMetadataStruct(MetadataType type, const char* name_ptr, size_t name_size, + const char* value_ptr, size_t value_size); + +// Continue +extern "C" void proxy_continueRequest(); +extern "C" void proxy_continueResponse(); + +// SharedData +extern "C" void proxy_getSharedData(const char* key_ptr, size_t key_size, const char** value_ptr, + size_t* value_size, uint32_t* cas); +// If cas != 0 and cas != the current cas for 'key' return false, otherwise set the value and +// return true. +extern "C" bool proxy_setSharedData(const char* key_ptr, size_t key_size, const char* value_ptr, + size_t value_size, uint32_t cas); + +// Headers/Trailers/Metadata Maps +extern "C" void proxy_addHeaderMapValue(HeaderMapType type, const char* key_ptr, size_t key_size, const char* value_ptr, size_t value_size); +extern "C" void proxy_getHeaderMapValue(HeaderMapType type, const char* key_ptr, size_t key_size, const char** value_ptr, size_t* value_size); +extern "C" void proxy_getHeaderMapPairs(HeaderMapType type, const char** ptr, size_t* size); +extern "C" void proxy_setHeaderMapPairs(HeaderMapType type, const char* ptr, size_t size); +extern "C" void proxy_replaceHeaderMapValue(HeaderMapType type, const char* key_ptr, size_t key_size, const char* value_ptr, size_t value_size); +extern "C" void proxy_removeHeaderMapValue(HeaderMapType type, const char* key_ptr, size_t key_size); + +// Body +extern "C" void proxy_getRequestBodyBufferBytes(uint32_t start, uint32_t length, const char** ptr, + size_t* size); +extern "C" void proxy_getResponseBodyBufferBytes(uint32_t start, uint32_t length, const char** ptr, + size_t* size); + +// HTTP +// Returns token, used in callback onHttpCallResponse +extern "C" uint32_t proxy_httpCall(const char* uri_ptr, size_t uri_size, void* header_pairs_ptr, + size_t header_pairs_size, const char* body_ptr, size_t body_size, + void* trailer_pairs_ptr, size_t trailer_pairs_size, + uint32_t timeout_milliseconds); +// gRPC +// Returns token, used in gRPC callbacks (onGrpc...) +extern "C" uint32_t proxy_grpcCall(const char* service_ptr, size_t service_size, const char* service_name_ptr, + size_t service_name_size, const char* method_name_ptr, size_t method_name_size, + const char* request_ptr, size_t request_size, uint32_t timeout_milliseconds); +extern "C" uint32_t proxy_grpcStream(const char* service_ptr, size_t service_size, const char* service_name_ptr, + size_t service_name_size, const char* method_name_ptr, size_t method_name_size); +extern "C" void proxy_grpcCancel(uint32_t token); +extern "C" void proxy_grpcClose(uint32_t token); +extern "C" void proxy_grpcSend(uint32_t token, const char* message_ptr, size_t message_size, uint32_t end_stream); + +// Metrics +// Returns a metric_id which can be used to report a metric. On error returns 0. +extern "C" uint32_t proxy_defineMetric(MetricType type, const char* name_ptr, size_t name_size); +extern "C" void proxy_incrementMetric(uint32_t metric_id, int64_t offset); +extern "C" void proxy_recordMetric(uint32_t metric_id, uint64_t value); +extern "C" uint64_t proxy_getMetric(uint32_t metric_id); + diff --git a/api/wasm/cpp/proxy_wasm_impl.h b/api/wasm/cpp/proxy_wasm_impl.h new file mode 100644 index 0000000000000..d03c1c5b01efe --- /dev/null +++ b/api/wasm/cpp/proxy_wasm_impl.h @@ -0,0 +1,1298 @@ +/* + * Intrinsic high-level support functions available to WASM modules. + */ +// NOLINT(namespace-envoy) +#include +#include +#include +#include +#include + +// +// High Level C++ API. +// +class Context; + +class ProxyException : std::runtime_error { +public: + ProxyException(const std::string& message) : std::runtime_error(message) {} +}; + +inline void logTrace(const std::string& logMessage) { + proxy_log(LogLevel::trace, logMessage.c_str(), logMessage.size()); +} +inline void logDebug(const std::string& logMessage) { + proxy_log(LogLevel::debug, logMessage.c_str(), logMessage.size()); +} +inline void logInfo(const std::string& logMessage) { + proxy_log(LogLevel::info, logMessage.c_str(), logMessage.size()); +} +inline void logWarn(const std::string& logMessage) { + proxy_log(LogLevel::warn, logMessage.c_str(), logMessage.size()); +} +inline void logError(const std::string& logMessage) { + proxy_log(LogLevel::error, logMessage.c_str(), logMessage.size()); +} +inline void logCritical(const std::string& logMessage) { + proxy_log(LogLevel::critical, logMessage.c_str(), logMessage.size()); +} + +// Buffers coming into the WASM filter. +class WasmData { +public: + WasmData(const char* data, size_t size) : data_(data), size_(size) {} + ~WasmData() { ::free(const_cast(data_)); } + const char* data() { return data_; } + StringView view() { return {data_, size_}; } + std::string toString() { return std::string(view()); } + std::vector> pairs(); + template T proto() { + T p; + p.ParseFromArray(data_, size_); + return p; + } + + WasmData& operator=(const WasmData&) = delete; + WasmData(const WasmData&) = delete; + +private: + const char* data_; + size_t size_; +}; +typedef std::unique_ptr WasmDataPtr; + +inline std::vector> WasmData::pairs() { + std::vector> result; + if (!data()) + return result; + auto p = data(); + int n = *reinterpret_cast(p); + p += sizeof(int); + result.resize(n); + auto s = p + n * 8; + for (int i = 0; i < n; i++) { + int size = *reinterpret_cast(p); + p += sizeof(int); + result[i].first = StringView(s, size); + s += size + 1; + size = *reinterpret_cast(p); + p += sizeof(int); + result[i].second = StringView(s, size); + s += size + 1; + } + return result; +} + +template size_t pairsSize(const Pairs& result) { + size_t size = 4; // number of headers + for (auto& p : result) { + size += 8; // size of key, size of value + size += p.first.size() + 1; // null terminated key + size += p.second.size() + 1; // null terminated value + } + return size; +} + +template void marshalPairs(const Pairs& result, char* buffer) { + char* b = buffer; + *reinterpret_cast(b) = result.size(); + b += sizeof(uint32_t); + for (auto& p : result) { + *reinterpret_cast(b) = p.first.size(); + b += sizeof(uint32_t); + *reinterpret_cast(b) = p.second.size(); + b += sizeof(uint32_t); + } + for (auto& p : result) { + memcpy(b, p.first.data(), p.first.size()); + b += p.first.size(); + *b++ = 0; + memcpy(b, p.second.data(), p.second.size()); + b += p.second.size(); + *b++ = 0; + } +} + +template void exportPairs(const Pairs& pairs, const char** ptr, size_t* size_ptr) { + if (pairs.empty()) { + *ptr = nullptr; + *size_ptr = 0; + return; + } + size_t size = pairsSize(pairs); + char* buffer = static_cast(::malloc(size)); + marshalPairs(pairs, buffer); + *size_ptr = size; +} + +struct PairHash { + template std::size_t operator()(const std::pair& x) const { + return std::hash()(x.first) + std::hash()(x.second); + } +}; + +struct Tuple3Hash { + template std::size_t operator()(const std::tuple& x) const { + return std::hash()(std::get<0>(x)) + std::hash()(std::get<1>(x)) + std::hash()(std::get<2>(x)); + } +}; + +using HeaderStringPairs = std::vector>; + +class GrpcCallHandlerBase { +public: + GrpcCallHandlerBase(Context* context) : context_(context) {} + virtual ~GrpcCallHandlerBase() {} + + void cancel(); + + virtual void onCreateInitialMetadata() = 0; + virtual void onSuccess(std::unique_ptr message) = 0; + virtual void onFailure(GrpcStatus status, std::unique_ptr error_message) = 0; + +private: + friend class Context; + + Context* const context_; + uint32_t token_; +}; + +template +class GrpcCallHandler : public GrpcCallHandlerBase { +public: + GrpcCallHandler(Context* context) : GrpcCallHandlerBase(context) {} + virtual ~GrpcCallHandler() {} + + virtual void onSuccess(Message&& response) = 0; + +private: + void onSuccess(std::unique_ptr message) override { + onSuccess(message->proto()); + } +}; + +class GrpcStreamHandlerBase { +public: + GrpcStreamHandlerBase(Context* context) : context_(context) {} + virtual ~GrpcStreamHandlerBase() {} + + // NB: with end_of_stream == true, callbacks can still occur: reset() to prevent further callbacks. + void send(StringView message, bool end_of_stream); + void close(); // NB: callbacks can still occur: reset() to prevent further callbacks. + void reset(); + + virtual void onCreateInitialMetadata() = 0; + virtual void onReceiveInitialMetadata() = 0; + virtual void onReceiveTrailingMetadata() = 0; + virtual void onReceive(std::unique_ptr message) = 0; + virtual void onRemoteClose(GrpcStatus status, std::unique_ptr error_message) = 0; + +protected: + friend class Context; + + void doRemoteClose(GrpcStatus status, std::unique_ptr error_message); + + bool local_close_ = false; + bool remote_close_ = false; + Context* const context_; + uint32_t token_; +}; + +template +class GrpcStreamHandler : public GrpcStreamHandlerBase { +public: + GrpcStreamHandler(Context* context) : GrpcStreamHandlerBase(context) {} + virtual ~GrpcStreamHandler() {} + + void send(const Request& message, bool end_of_stream) { + std::string output; + if (!message.SerializeToString(&output)) { + return; + } + GrpcStreamHandlerBase::send(output, end_of_stream); + local_close_ = local_close_ || end_of_stream; + } + + virtual void onReceive(Response&& message) = 0; + +private: + void onReceive(std::unique_ptr message) override { + onReceive(message->proto()); + } +}; + +// Context for a stream. The distinguished context id == 0 is used for non-stream calls. +class Context { +public: + explicit Context(uint32_t id) : id_(id) {} + virtual ~Context() {} + + uint32_t id() { return id_; } + + static std::unique_ptr New(uint32_t id); // For subclassing. + + // Called once when the filter loads and on configuration changes. + virtual void onConfigure(std::unique_ptr /* configuration */) {} + // Called once when the filter loads. + virtual void onStart() {} + + // Called on individual requests/response streams. + virtual void onCreate() {} + virtual FilterHeadersStatus onRequestHeaders() { return FilterHeadersStatus::Continue; } + virtual FilterMetadataStatus onRequestMetadata() { return FilterMetadataStatus::Continue; } + virtual FilterDataStatus onRequestBody(size_t /* body_buffer_length */, bool /* end_of_stream */) { + return FilterDataStatus::Continue; + } + virtual FilterTrailersStatus onRequestTrailers() { return FilterTrailersStatus::Continue; } + virtual FilterHeadersStatus onResponseHeaders() { return FilterHeadersStatus::Continue; } + virtual FilterMetadataStatus onResponseMetadata() { return FilterMetadataStatus::Continue; } + virtual FilterDataStatus onResponseBody(size_t /* body_buffer_length */, bool /* end_of_stream */) { + return FilterDataStatus::Continue; + } + virtual FilterTrailersStatus onResponseTrailers() { return FilterTrailersStatus::Continue; } + virtual void onDone() {} + virtual void onLog() {} + virtual void onDelete() {} + virtual void onTick() {} + + // Low level HTTP/gRPC interface. + virtual void onHttpCallResponse(uint32_t token, std::unique_ptr header_pairs, + std::unique_ptr body, + std::unique_ptr trailer_pairs); + virtual void onGrpcCreateInitialMetadata(uint32_t token); + virtual void onGrpcReceiveInitialMetadata(uint32_t token); + virtual void onGrpcReceiveTrailingMetadata(uint32_t token); + virtual void onGrpcReceive(uint32_t token, std::unique_ptr message); + virtual void onGrpcClose(uint32_t token, GrpcStatus status, std::unique_ptr message); + + // Default high level HTTP/gRPC interface. NB: overriding the low level interface will disable this interface. + using HttpCallCallback = std::function header_pairs, + std::unique_ptr body, std::unique_ptr trailer_pairs)>; + using GrpcSimpleCallCallback = std::function message)>; + void httpCall(StringView uri, const HeaderStringPairs& request_headers, + StringView request_body, const HeaderStringPairs& request_trailers, + uint32_t timeout_milliseconds, HttpCallCallback callback); + // NB: the message is the response if status == OK and an error message otherwise. + void grpcSimpleCall(StringView service, StringView service_name, StringView method_name, + const google::protobuf::MessageLite &request, uint32_t timeout_milliseconds, GrpcSimpleCallCallback callback); + template void grpcSimpleCall(StringView service, StringView service_name, + StringView method_name, const google::protobuf::MessageLite &request, uint32_t timeout_milliseconds, + std::function success_callback, + std::function failure_callback) { + auto callback = [success_callback, failure_callback](GrpcStatus status, std::unique_ptr message) { + if (status == GrpcStatus::OK) { + success_callback(message->proto()); + } else { + failure_callback(status, message->view()); + } + }; + grpcSimpleCall(service, service_name, method_name, request, timeout_milliseconds, callback); + } + void grpcCallHandler(StringView service, StringView service_name, + StringView method_name, const google::protobuf::MessageLite &request, uint32_t timeout_milliseconds, + std::unique_ptr handler); + void grpcStreamHandler(StringView service, StringView service_name, + StringView method_name, std::unique_ptr handler); + + // Metadata + bool isImmutable(MetadataType type); + virtual bool isProactivelyCachable(MetadataType type); // Cache all keys on any read. + // Caching Metadata calls. Note: "name" refers to the metadata namespace. + google::protobuf::Value metadataValue(MetadataType type, StringView key); + google::protobuf::Value requestRouteMetadataValue(StringView key); + google::protobuf::Value responseRouteMetadataValue(StringView key); + google::protobuf::Value logMetadataValue(StringView key); + google::protobuf::Value requestMetadataValue(StringView key); + google::protobuf::Value responseMetadataValue(StringView key); + google::protobuf::Value nodeMetadataValue(StringView key); + google::protobuf::Value namedMetadataValue(MetadataType type, StringView name, StringView key); + google::protobuf::Value requestMetadataValue(StringView name, StringView key); + google::protobuf::Value responseMetadataValue(StringView name, StringView key); + google::protobuf::Struct metadataStruct(MetadataType type, StringView name = ""); + google::protobuf::Struct requestRouteMetadataStruct(); + google::protobuf::Struct responseRouteMetadataStruct(); + google::protobuf::Struct nodeMetadataStruct(); + google::protobuf::Struct logMetadataStruct(StringView name = ""); + google::protobuf::Struct requestMetadataStruct(StringView name = ""); + google::protobuf::Struct responseMetadataStruct(StringView name = ""); + // Uncached Metadata calls. + google::protobuf::Value getRequestMetadataValue(StringView key); + google::protobuf::Value getResponseMetadataValue(StringView key); + google::protobuf::Struct getRequestMetadataStruct(StringView name); + google::protobuf::Struct getResponseMetadataStruct(StringView name); + +private: + friend class GrpcCallHandlerBase; + friend class GrpcStreamHandlerBase; + + uint32_t id_; + std::unordered_map, google::protobuf::Value, PairHash> value_cache_; + std::unordered_map, google::protobuf::Value, Tuple3Hash> name_value_cache_; + std::unordered_map, google::protobuf::Struct, PairHash> struct_cache_; + std::unordered_map http_calls_; + std::unordered_map simple_grpc_calls_; + std::unordered_map> grpc_calls_; + std::unordered_map> grpc_streams_; +}; + +inline bool Context::isImmutable(MetadataType type) { + switch (type) { + case MetadataType::Request: + case MetadataType::Response: + return false; + default: + return true; + } +} + +// Override in subclasses to proactively cache certain types of metadata. +inline bool Context::isProactivelyCachable(MetadataType type) { + switch (type) { + case MetadataType::Node: + return true; + default: + return false; + } +} + +// StreamInfo +inline WasmDataPtr getProtocol(StreamType type) { + const char* ptr = nullptr; + size_t size = 0; + proxy_getProtocol(type, &ptr, &size); + return std::make_unique(ptr, size); +} + +// Metadata +inline WasmDataPtr getMetadata(MetadataType type, StringView key) { + const char* value_ptr = nullptr; + size_t value_size = 0; + proxy_getMetadata(type, key.data(), key.size(), &value_ptr, &value_size); + return std::make_unique(value_ptr, value_size); +} + +inline google::protobuf::Value getMetadataValue(MetadataType type, StringView key) { + const char* value_ptr = nullptr; + size_t value_size = 0; + proxy_getMetadata(type, key.data(), key.size(), &value_ptr, &value_size); + if (!value_size) { + return {}; + } + google::protobuf::Value value; + if (!value.ParseFromArray(value_ptr, value_size)) { + return {}; + } + return value; +} + +inline std::string getMetadataStringValue(MetadataType type, StringView key) { + return getMetadataValue(type, key).string_value(); +} + +inline void setMetadata(MetadataType type, StringView key, StringView value) { + proxy_setMetadata(type, key.data(), key.size(), value.data(), value.size()); +} + +inline void setMetadataValue(MetadataType type, StringView key, + const google::protobuf::Value& value) { + std::string output; + if (!value.SerializeToString(&output)) { + return; + } + proxy_setMetadata(type, key.data(), key.size(), output.data(), output.size()); +} + +inline void setMetadataStringValue(MetadataType type, StringView key, StringView s) { + google::protobuf::Value value; + value.set_string_value(s.data(), s.size()); + setMetadataValue(type, key, value); +} + +inline WasmDataPtr getMetadataValuePairs(MetadataType type) { + const char* value_ptr = nullptr; + size_t value_size = 0; + proxy_getMetadataPairs(type, &value_ptr, &value_size); + return std::make_unique(value_ptr, value_size); +} + +inline google::protobuf::Struct getMetadataStruct(MetadataType type, StringView name) { + const char* value_ptr = nullptr; + size_t value_size = 0; + proxy_getMetadataStruct(type, name.data(), name.size(), &value_ptr, &value_size); + if (!value_size) { + return {}; + } + google::protobuf::Struct s; + if (!s.ParseFromArray(value_ptr, value_size)) { + return {}; + } + return s; +} + +inline void setMetadataStruct(MetadataType type, StringView name, + const google::protobuf::Struct& s) { + std::string output; + if (!s.SerializeToString(&output)) { + return; + } + proxy_setMetadataStruct(type, name.data(), name.size(), output.data(), output.size()); +} + +inline google::protobuf::Value Context::metadataValue(MetadataType type, StringView key) { + auto cache_key = std::make_pair(type, std::string(key)); + auto it = value_cache_.find(cache_key); + if (it != value_cache_.end()) { + return it->second; + } + if (isProactivelyCachable(type)) { + auto values = getMetadataValuePairs(type); + for (auto &p : values->pairs()) { + google::protobuf::Value value; + if (value.ParseFromArray(p.second.data(), p.second.size())) { + auto k = std::make_pair(type, std::string(p.first)); + value_cache_[cache_key] = value; + } + } + auto it = value_cache_.find(cache_key); + if (it != value_cache_.end()) { + return it->second; + } + return {}; + } else { + auto value = getMetadataValue(type, key); + value_cache_[cache_key] = value; + return value; + } +} + +inline google::protobuf::Value Context::requestRouteMetadataValue(StringView key) { + return metadataValue(MetadataType::RequestRoute, key); +} + +inline google::protobuf::Value Context::responseRouteMetadataValue(StringView key) { + return metadataValue(MetadataType::ResponseRoute, key); +} + +inline google::protobuf::Value Context::logMetadataValue(StringView key) { + return metadataValue(MetadataType::Log, key); +} + +inline google::protobuf::Value Context::requestMetadataValue(StringView key) { + return metadataValue(MetadataType::Request, key); +} + +inline google::protobuf::Value Context::responseMetadataValue(StringView key) { + return metadataValue(MetadataType::Response, key); +} + +inline google::protobuf::Value Context::nodeMetadataValue(StringView key) { + return metadataValue(MetadataType::Node, key); +} + +inline google::protobuf::Value Context::namedMetadataValue(MetadataType type, StringView name, StringView key) { + auto n = std::string(name); + auto cache_key = std::make_tuple(type, n, std::string(key)); + auto it = name_value_cache_.find(cache_key); + if (it != name_value_cache_.end()) { + return it->second; + } + auto s = metadataStruct(type, name); + for (auto &f : s.fields()) { + auto k = std::make_tuple(type, n, f.first); + name_value_cache_[k] = f.second; + } + struct_cache_[std::make_pair(type, n)] = std::move(s); + it = name_value_cache_.find(cache_key); + if (it != name_value_cache_.end()) { + return it->second; + } + return {}; +} + +inline google::protobuf::Value Context::requestMetadataValue(StringView name, StringView key) { + return namedMetadataValue(MetadataType::Request, name, key); +} + +inline google::protobuf::Value Context::responseMetadataValue(StringView name, StringView key) { + return namedMetadataValue(MetadataType::Response, name, key); +} + +inline google::protobuf::Struct Context::metadataStruct(MetadataType type, StringView name) { + auto cache_key = std::make_pair(type, std::string(name)); + auto it = struct_cache_.find(cache_key); + if (it != struct_cache_.end()) { + return it->second; + } + auto s = getMetadataStruct(MetadataType::Request, name); + struct_cache_[cache_key] = s; + return s; +} + +inline google::protobuf::Struct Context::requestRouteMetadataStruct() { + return metadataStruct(MetadataType::RequestRoute); +} + +inline google::protobuf::Struct Context::responseRouteMetadataStruct() { + return metadataStruct(MetadataType::ResponseRoute); +} + +inline google::protobuf::Struct Context::nodeMetadataStruct() { + return metadataStruct(MetadataType::Node); +} + +inline google::protobuf::Struct Context::logMetadataStruct(StringView name) { + return metadataStruct(MetadataType::Log, name); +} + +inline google::protobuf::Struct Context::requestMetadataStruct(StringView name) { + return metadataStruct(MetadataType::Request, name); +} + +inline google::protobuf::Struct Context::responseMetadataStruct(StringView name) { + return metadataStruct(MetadataType::Response, name); +} + +inline google::protobuf::Value Context::getRequestMetadataValue(StringView key) { + return getMetadataValue(MetadataType::Request, key); +} + +inline google::protobuf::Value Context::getResponseMetadataValue(StringView key) { + return getMetadataValue(MetadataType::Response, key); +} + +inline google::protobuf::Struct Context::getRequestMetadataStruct(StringView name) { + return getMetadataStruct(MetadataType::Request, name); +} + +inline google::protobuf::Struct Context::getResponseMetadataStruct(StringView name) { + return getMetadataStruct(MetadataType::Response, name); +} + +// Continue +inline void continueRequest() { proxy_continueRequest(); } +inline void continueResponse() { proxy_continueResponse(); } + +// Shared +inline WasmDataPtr getSharedData(StringView key, uint32_t* cas = nullptr) { + uint32_t dummy_cas; + const char* value_ptr = nullptr; + size_t value_size = 0; + if (!cas) + cas = &dummy_cas; + proxy_getSharedData(key.data(), key.size(), &value_ptr, &value_size, cas); + return std::make_unique(value_ptr, value_size); +} + +inline bool setSharedData(StringView key, StringView value, uint32_t cas = 0) { + return proxy_setSharedData(key.data(), key.size(), value.data(), value.size(), cas); +} + +// Headers/Trailers +inline void addHeaderMapValue(HeaderMapType type, StringView key, StringView value) { + proxy_addHeaderMapValue(type, key.data(), key.size(), value.data(), value.size()); +} + +inline WasmDataPtr getHeaderMapValue(HeaderMapType type, StringView key) { + const char* value_ptr = nullptr; + size_t value_size = 0; + proxy_getHeaderMapValue(type, key.data(), key.size(), &value_ptr, &value_size); + return std::make_unique(value_ptr, value_size); +} + +inline void replaceHeaderMapValue(HeaderMapType type, StringView key, StringView value) { + proxy_replaceHeaderMapValue(type, key.data(), key.size(), value.data(), value.size()); +} + +inline void removeHeaderMapValue(HeaderMapType type, StringView key) { + proxy_removeHeaderMapValue(type, key.data(), key.size()); +} + +inline WasmDataPtr getHeaderMapPairs(HeaderMapType type) { + const char* ptr = nullptr; + size_t size = 0; + proxy_getHeaderMapPairs(type, &ptr, &size); + return std::make_unique(ptr, size); +} + +inline void setHeaderMapPairs(HeaderMapType type, const HeaderStringPairs &pairs) { + const char* ptr = nullptr; + size_t size = 0; + exportPairs(pairs, &ptr, &size); + proxy_setHeaderMapPairs(type, ptr, size); +} + +inline void addRequestHeader(StringView key, StringView value) { + addHeaderMapValue(HeaderMapType::RequestHeaders, key, value); +} +inline WasmDataPtr getRequestHeader(StringView key) { + return getHeaderMapValue(HeaderMapType::RequestHeaders, key); +} +inline void replaceRequestHeader(StringView key, StringView value) { + replaceHeaderMapValue(HeaderMapType::RequestHeaders, key, value); +} +inline void removeRequestHeader(StringView key) { + removeHeaderMapValue(HeaderMapType::RequestHeaders, key); +} +inline WasmDataPtr getRequestHeaderPairs() { + return getHeaderMapPairs(HeaderMapType::RequestHeaders); +} +inline void setRequestHeaderPairs(const HeaderStringPairs &pairs) { + return setHeaderMapPairs(HeaderMapType::RequestHeaders, pairs); +} + +inline void addRequestTrailer(StringView key, StringView value) { + addHeaderMapValue(HeaderMapType::RequestTrailers, key, value); +} +inline WasmDataPtr getRequestTrailer(StringView key) { + return getHeaderMapValue(HeaderMapType::RequestTrailers, key); +} +inline void replaceRequestTrailer(StringView key, StringView value) { + replaceHeaderMapValue(HeaderMapType::RequestTrailers, key, value); +} +inline void removeRequestTrailer(StringView key) { + removeHeaderMapValue(HeaderMapType::RequestTrailers, key); +} +inline WasmDataPtr getRequestTrailerPairs() { + return getHeaderMapPairs(HeaderMapType::RequestTrailers); +} +inline void setRequestTrailerPairs(const HeaderStringPairs &pairs) { + return setHeaderMapPairs(HeaderMapType::RequestTrailers, pairs); +} + +inline void addResponseHeader(StringView key, StringView value) { + addHeaderMapValue(HeaderMapType::ResponseHeaders, key, value); +} +inline WasmDataPtr getResponseHeader(StringView key) { + return getHeaderMapValue(HeaderMapType::ResponseHeaders, key); +} +inline void replaceResponseHeader(StringView key, StringView value) { + replaceHeaderMapValue(HeaderMapType::ResponseHeaders, key, value); +} +inline void removeResponseHeader(StringView key) { + removeHeaderMapValue(HeaderMapType::ResponseHeaders, key); +} +inline WasmDataPtr getResponseHeaderPairs() { + return getHeaderMapPairs(HeaderMapType::ResponseHeaders); +} +inline void setResponseHeaderPairs(const HeaderStringPairs &pairs) { + return setHeaderMapPairs(HeaderMapType::ResponseHeaders, pairs); +} + +inline void addResponseTrailer(StringView key, StringView value) { + addHeaderMapValue(HeaderMapType::ResponseTrailers, key, value); +} +inline WasmDataPtr getResponseTrailer(StringView key) { + return getHeaderMapValue(HeaderMapType::ResponseTrailers, key); +} +inline void replaceResponseTrailer(StringView key, StringView value) { + replaceHeaderMapValue(HeaderMapType::ResponseTrailers, key, value); +} +inline void removeResponseTrailer(StringView key) { + removeHeaderMapValue(HeaderMapType::ResponseTrailers, key); +} +inline WasmDataPtr getResponseTrailerPairs() { + return getHeaderMapPairs(HeaderMapType::ResponseTrailers); +} +inline void setResponseTrailerPairs(const HeaderStringPairs &pairs) { + return setHeaderMapPairs(HeaderMapType::ResponseTrailers, pairs); +} + +// Body +inline WasmDataPtr getRequestBodyBufferBytes(size_t start, size_t length) { + const char* ptr = nullptr; + size_t size = 0; + proxy_getRequestBodyBufferBytes(start, length, &ptr, &size); + return std::make_unique(ptr, size); +} + +inline WasmDataPtr getResponseBodyBufferBytes(size_t start, size_t length) { + const char* ptr = nullptr; + size_t size = 0; + proxy_getResponseBodyBufferBytes(start, length, &ptr, &size); + return std::make_unique(ptr, size); +} + +// HTTP + +inline void MakeHeaderStringPairsBuffer(const HeaderStringPairs& headers, void** buffer_ptr, + size_t* size_ptr) { + if (headers.empty()) { + *buffer_ptr = nullptr; + *size_ptr = 0; + return; + } + int size = 4; // number of headers + for (auto& p : headers) { + size += 8; // size of key, size of value + size += p.first.size() + 1; // null terminated key + size += p.second.size() + 1; // null terminated value + } + char* buffer = static_cast(::malloc(size)); + char* b = buffer; + *reinterpret_cast(b) = headers.size(); + b += sizeof(int32_t); + for (auto& p : headers) { + *reinterpret_cast(b) = p.first.size(); + b += sizeof(int32_t); + *reinterpret_cast(b) = p.second.size(); + b += sizeof(int32_t); + } + for (auto& p : headers) { + memcpy(b, p.first.data(), p.first.size()); + b += p.first.size(); + *b++ = 0; + memcpy(b, p.second.data(), p.second.size()); + b += p.second.size(); + *b++ = 0; + } + *buffer_ptr = buffer; + *size_ptr = size; +} + +inline uint32_t makeHttpCall(StringView uri, const HeaderStringPairs& request_headers, + StringView request_body, const HeaderStringPairs& request_trailers, + uint32_t timeout_milliseconds) { + void *headers_ptr = nullptr, *trailers_ptr = nullptr; + size_t headers_size = 0, trailers_size = 0; + MakeHeaderStringPairsBuffer(request_headers, &headers_ptr, &headers_size); + MakeHeaderStringPairsBuffer(request_trailers, &trailers_ptr, &trailers_size); + uint32_t result = + proxy_httpCall(uri.data(), uri.size(), headers_ptr, headers_size, request_body.data(), + request_body.size(), trailers_ptr, trailers_size, timeout_milliseconds); + ::free(headers_ptr); + ::free(trailers_ptr); + return result; +} + +// Low level metrics interface. + +inline uint32_t defineMetric(MetricType type, StringView name) { + return proxy_defineMetric(type, name.data(), name.size()); +} + +inline void incrementMetric(uint32_t metric_id, int64_t offset) { + proxy_incrementMetric(metric_id, offset); +} + +inline void recordMetric(uint32_t metric_id, uint64_t value) { + proxy_recordMetric(metric_id, value); +} + +inline uint64_t getMetric(uint32_t metric_id) { return proxy_getMetric(metric_id); } + +// Higher level metrics interface. + +struct MetricTag { + enum class TagType : uint32_t { + String = 0, + Int = 1, + Bool = 2, + }; + std::string name; + TagType tagType; +}; + +struct MetricBase { + MetricBase(MetricType t, const std::string& n) : type(t), name(n) {} + MetricBase(MetricType t, const std::string& n, std::vector ts) + : type(t), name(n), tags(ts.begin(), ts.end()) {} + + MetricType type; + std::string name; + std::string prefix; + std::vector tags; + std::unordered_map metric_ids; + + std::string prefixWithFields(const std::vector& fields); + uint32_t resolveFullName(const std::string& n); + uint32_t resolveWithFields(const std::vector& fields); + void partiallyResolveWithFields(const std::vector& fields); + std::string nameFromIdSlow(uint32_t id); +}; + +struct Metric : public MetricBase { + Metric(MetricType t, const std::string& n) : MetricBase(t, n) {} + Metric(MetricType t, const std::string& n, std::vector ts) : MetricBase(t, n, ts) {} + + template void increment(int64_t offset, Fields... tags); + template void record(uint64_t value, Fields... tags); + template uint64_t get(Fields... tags); + template uint32_t resolve(Fields... tags); + template Metric partiallyResolve(Fields... tags); +}; + +inline std::string MetricBase::prefixWithFields(const std::vector& fields) { + size_t s = prefix.size(); + for (size_t i = 0; i < fields.size(); i++) { + s += tags[i].name.size() + 1; // 1 more for "." + } + for (auto& f : fields) { + s += f.size() + 1; // 1 more for "." + } + std::string n; + n.reserve(s); + n.append(prefix); + for (size_t i = 0; i < fields.size(); i++) { + n.append(tags[i].name); + n.append("."); + n.append(fields[i]); + n.append("."); + } + return n; +} + +inline uint32_t MetricBase::resolveWithFields(const std::vector& fields) { + if (fields.size() != tags.size()) { + throw ProxyException("metric fields.size() != tags.size()"); + } + return resolveFullName(prefixWithFields(fields) + name); +} + +inline void MetricBase::partiallyResolveWithFields(const std::vector& fields) { + if (fields.size() >= tags.size()) { + throw ProxyException("metric fields.size() >= tags.size()"); + } + prefix = prefixWithFields(fields); + tags.erase(tags.begin(), tags.begin()+(fields.size())); +} + +template inline std::string ToString(T t) { return std::to_string(t); } + +template <> inline std::string ToString(const char* t) { return std::string(t); } + +template <> inline std::string ToString(std::string t) { return t; } + +template <> inline std::string ToString(bool t) { return t ? "true" : "false"; } + +inline uint32_t MetricBase::resolveFullName(const std::string& n) { + auto it = metric_ids.find(n); + if (it == metric_ids.end()) { + auto metric_id = defineMetric(type, n); + metric_ids[n] = metric_id; + return metric_id; + } + return it->second; +} + +inline std::string MetricBase::nameFromIdSlow(uint32_t id) { + for (auto& p : metric_ids) + if (p.second == id) + return p.first; + return ""; +} + +template inline uint32_t Metric::resolve(Fields... f) { + std::vector fields{ToString(f)...}; + return resolveWithFields(fields); +} + +template Metric Metric::partiallyResolve(Fields... f) { + std::vector fields{ToString(f)...}; + Metric partial_metric(*this); + partial_metric.partiallyResolveWithFields(fields); + return partial_metric; +} + +template inline void Metric::increment(int64_t offset, Fields... f) { + std::vector fields{ToString(f)...}; + auto metric_id = resolveWithFields(fields); + incrementMetric(metric_id, offset); +} + +template inline void Metric::record(uint64_t value, Fields... f) { + std::vector fields{ToString(f)...}; + auto metric_id = resolveWithFields(fields); + recordMetric(metric_id, value); +} + +template inline uint64_t Metric::get(Fields... f) { + std::vector fields{ToString(f)...}; + auto metric_id = resolveWithFields(fields); + return getMetric(metric_id); +} + +template struct MetricTagDescriptor { + MetricTagDescriptor(StringView n) : name(n) {} + MetricTagDescriptor(const char* n) : name(n) {} + typedef T type; + StringView name; +}; + +template inline MetricTag ToMetricTag(const MetricTagDescriptor&) { return {}; } + +template <> inline MetricTag ToMetricTag(const MetricTagDescriptor& d) { + return {std::string(d.name), MetricTag::TagType::String}; +} + +template <> inline MetricTag ToMetricTag(const MetricTagDescriptor& d) { + return {std::string(d.name), MetricTag::TagType::String}; +} + +template <> inline MetricTag ToMetricTag(const MetricTagDescriptor& d) { + return {std::string(d.name), MetricTag::TagType::String}; +} + +template <> inline MetricTag ToMetricTag(const MetricTagDescriptor& d) { + return {std::string(d.name), MetricTag::TagType::Int}; +} + +template <> inline MetricTag ToMetricTag(const MetricTagDescriptor& d) { + return {std::string(d.name), MetricTag::TagType::Bool}; +} + +struct SimpleCounter { + SimpleCounter(uint32_t id) : metric_id(id) {} + + void increment(int64_t offset) { recordMetric(metric_id, offset); } + void record(int64_t offset) { increment(offset); } + uint64_t get() { return getMetric(metric_id); } + void operator++() { increment(1); } + void operator++(int) { increment(1); } + + uint32_t metric_id; +}; + +struct SimpleGauge { + SimpleGauge(uint32_t id) : metric_id(id) {} + + void record(uint64_t offset) { recordMetric(metric_id, offset); } + uint64_t get() { return getMetric(metric_id); } + + uint32_t metric_id; +}; + +struct SimpleHistogram { + SimpleHistogram(uint32_t id) : metric_id(id) {} + + void record(int64_t offset) { recordMetric(metric_id, offset); } + + uint32_t metric_id; +}; + +template struct Counter : public MetricBase { + static Counter* New(StringView name, MetricTagDescriptor... fieldnames); + + SimpleCounter resolve(Tags... f) { + std::vector fields{ToString(f)...}; + return SimpleCounter(resolveWithFields(fields)); + } + + template + Counter* resolveAndExtend(Tags... f, MetricTagDescriptor... fieldnames) { + std::vector fields{ToString(f)...}; + auto new_counter = Counter::New(name, fieldnames...); + new_counter->prefix = prefixWithFields(fields); + return new_counter; + } + + void increment(int64_t offset, Tags... tags) { + std::vector fields{ToString(tags)...}; + auto metric_id = resolveWithFields(fields); + incrementMetric(metric_id, offset); + } + + void record(int64_t offset, Tags... tags) { increment(offset, tags...); } + + uint64_t get(Tags... tags) { + std::vector fields{ToString(tags)...}; + auto metric_id = resolveWithFields(fields); + return getMetric(metric_id); + } + +private: + Counter(const std::string& name, std::vector tags) + : MetricBase(MetricType::Counter, name, tags) {} +}; + +template +inline Counter* Counter::New(StringView name, + MetricTagDescriptor... descriptors) { + return new Counter(std::string(name), + std::vector({ToMetricTag(descriptors)...})); +} + +template struct Gauge : public MetricBase { + static Gauge* New(StringView name, MetricTagDescriptor... fieldnames); + + SimpleGauge resolve(Tags... f) { + std::vector fields{ToString(f)...}; + return SimpleGauge(resolveWithFields(fields)); + } + + template + Gauge* resolveAndExtend(Tags... f, MetricTagDescriptor... fieldnames) { + std::vector fields{ToString(f)...}; + auto new_gauge = Gauge::New(name, fieldnames...); + new_gauge->prefix = prefixWithFields(fields); + return new_gauge; + } + + void record(int64_t offset, Tags... tags) { + std::vector fields{ToString(tags)...}; + auto metric_id = resolveWithFields(fields); + recordMetric(metric_id, offset); + } + + uint64_t get(Tags... tags) { + std::vector fields{ToString(tags)...}; + auto metric_id = resolveWithFields(fields); + return getMetric(metric_id); + } + +private: + Gauge(const std::string& name, std::vector tags) + : MetricBase(MetricType::Gauge, name, tags) {} +}; + +template +inline Gauge* Gauge::New(StringView name, + MetricTagDescriptor... descriptors) { + return new Gauge(std::string(name), + std::vector({ToMetricTag(descriptors)...})); +} + +template struct Histogram : public MetricBase { + static Histogram* New(StringView name, MetricTagDescriptor... fieldnames); + + SimpleHistogram resolve(Tags... f) { + std::vector fields{ToString(f)...}; + return SimpleHistogram(resolveWithFields(fields)); + } + + template + Histogram* resolveAndExtend(Tags... f, MetricTagDescriptor... fieldnames) { + std::vector fields{ToString(f)...}; + auto new_histogram = Histogram::New(name, fieldnames...); + new_histogram->prefix = prefixWithFields(fields); + return new_histogram; + } + + void record(int64_t offset, Tags... tags) { + std::vector fields{ToString(tags)...}; + auto metric_id = resolveWithFields(fields); + recordMetric(metric_id, offset); + } + +private: + Histogram(const std::string& name, std::vector tags) + : MetricBase(MetricType::Histogram, name, tags) {} +}; + +template +inline Histogram* Histogram::New(StringView name, + MetricTagDescriptor... descriptors) { + return new Histogram(std::string(name), + std::vector({ToMetricTag(descriptors)...})); +} + +inline uint32_t grpcCall(StringView service, StringView service_name, StringView method_name, + const google::protobuf::MessageLite &request, uint32_t timeout_milliseconds) { + std::string serialized_request; + request.SerializeToString(&serialized_request); + return proxy_grpcCall(service.data(), service.size(), service_name.data(), service_name.size(), method_name.data(), method_name.size(), + serialized_request.data(), serialized_request.size(), timeout_milliseconds); + +} + +inline uint32_t grpcStream(StringView service, StringView service_name, StringView method_name) { + return proxy_grpcStream(service.data(), service.size(), service_name.data(), service_name.size(), method_name.data(), method_name.size()); +} + +inline void grpcCancel(uint32_t token) { + return proxy_grpcCancel(token); +} + +inline void grpcClose(uint32_t token) { + return proxy_grpcClose(token); +} + +inline void grpcSend(uint32_t token, StringView message, bool end_stream) { + return proxy_grpcSend(token, message.data(), message.size(), end_stream ? 1 : 0); +} + +inline void Context::httpCall(StringView uri, const HeaderStringPairs& request_headers, + StringView request_body, const HeaderStringPairs& request_trailers, + uint32_t timeout_milliseconds, HttpCallCallback callback) { + auto token = makeHttpCall(uri, request_headers, request_body, request_trailers, timeout_milliseconds); + if (token) { + http_calls_[token] = std::move(callback); + } else { + throw ProxyException("httpCall failed"); + } +} + +inline void Context::onHttpCallResponse(uint32_t token, std::unique_ptr header_pairs, + std::unique_ptr body, std::unique_ptr trailer_pairs) { + auto it = http_calls_.find(token); + if (it != http_calls_.end()) { + it->second(std::move(header_pairs), std::move(body), std::move(trailer_pairs)); + http_calls_.erase(token); + } +} + +inline void Context::grpcSimpleCall(StringView service, StringView service_name, StringView method_name, + const google::protobuf::MessageLite &request, uint32_t timeout_milliseconds, Context::GrpcSimpleCallCallback callback) { + auto token = grpcCall(service, service_name, method_name, request, timeout_milliseconds); + if (token) { + simple_grpc_calls_[token] = std::move(callback); + } else { + throw ProxyException("grpcCall failed"); + } +} + +inline void GrpcCallHandlerBase::cancel() { + grpcCancel(token_); + context_->grpc_calls_.erase(token_); +} + +inline void GrpcStreamHandlerBase::reset() { + grpcCancel(token_); + context_->grpc_streams_.erase(token_); +} + +inline void GrpcStreamHandlerBase::close() { + grpcClose(token_); + local_close_ = true; + if (local_close_ && remote_close_) { + context_->grpc_streams_.erase(token_); + } + // NB: else callbacks can still occur: reset() to prevent further callbacks. +} + +inline void GrpcStreamHandlerBase::send(StringView message, bool end_of_stream) { + grpcSend(token_, message, end_of_stream); + if (end_of_stream) { + // NB: callbacks can still occur: reset() to prevent further callbacks. + local_close_ = local_close_ || end_of_stream; + if (local_close_ && remote_close_) { + context_->grpc_streams_.erase(token_); + } + } +} + +inline void Context::onGrpcCreateInitialMetadata(uint32_t token) { + { + auto it = grpc_calls_.find(token); + if (it != grpc_calls_.end()) { + it->second->onCreateInitialMetadata(); + return; + } + } + { + auto it = grpc_streams_.find(token); + if (it != grpc_streams_.end()) { + it->second->onCreateInitialMetadata(); + return; + } + } +} + +inline void Context::onGrpcReceiveInitialMetadata(uint32_t token) { + { + auto it = grpc_streams_.find(token); + if (it != grpc_streams_.end()) { + it->second->onReceiveInitialMetadata(); + return; + } + } +} + +inline void Context::onGrpcReceiveTrailingMetadata(uint32_t token) { + { + auto it = grpc_streams_.find(token); + if (it != grpc_streams_.end()) { + it->second->onReceiveTrailingMetadata(); + return; + } + } +} + +inline void Context::onGrpcReceive(uint32_t token, std::unique_ptr message) { + { + auto it = simple_grpc_calls_.find(token); + if (it != simple_grpc_calls_.end()) { + it->second(GrpcStatus::OK, std::move(message)); + simple_grpc_calls_.erase(token); + return; + } + } + { + auto it = grpc_calls_.find(token); + if (it != grpc_calls_.end()) { + it->second->onSuccess(std::move(message)); + grpc_calls_.erase(token); + return; + } + } + { + auto it = grpc_streams_.find(token); + if (it != grpc_streams_.end()) { + it->second->onReceive(std::move(message)); + grpc_streams_.erase(token); + return; + } + } +} + +inline void GrpcStreamHandlerBase::doRemoteClose(GrpcStatus status, std::unique_ptr error_message) { + auto context = context_; + auto token = token_; + this->onRemoteClose(status, std::move(error_message)); + if (context->grpc_streams_.find(token) != context->grpc_streams_.end()) { + // We have not been deleted, e.g. by reset() in the onRemoteCall() virtual handler. + remote_close_ = true; + if (local_close_ && remote_close_) { + context_->grpc_streams_.erase(token_); + } + // else do not erase the token since we can still send in this state. + } +} + +inline void Context::onGrpcClose(uint32_t token, GrpcStatus status, std::unique_ptr message) { + { + auto it = simple_grpc_calls_.find(token); + if (it != simple_grpc_calls_.end()) { + it->second(status, std::move(message)); + simple_grpc_calls_.erase(token); + return; + } + } + { + auto it = grpc_calls_.find(token); + if (it != grpc_calls_.end()) { + it->second->onFailure(status, std::move(message)); + grpc_calls_.erase(token); + return; + } + } + { + auto it = grpc_streams_.find(token); + if (it != grpc_streams_.end()) { + it->second->doRemoteClose(status, std::move(message)); + return; + } + } +} + +inline void Context::grpcCallHandler(StringView service, StringView service_name, + StringView method_name, const google::protobuf::MessageLite &request, uint32_t timeout_milliseconds, + std::unique_ptr handler) { + auto token = grpcCall(service, service_name, method_name, request, timeout_milliseconds); + if (token) { + handler->token_ = token; + grpc_calls_[token] = std::move(handler); + } else { + throw ProxyException("grpcCall failed"); + } +} + +inline void Context::grpcStreamHandler(StringView service, StringView service_name, + StringView method_name, std::unique_ptr handler) { + auto token = grpcStream(service, service_name, method_name); + if (token) { + handler->token_ = token; + grpc_streams_[token] = std::move(handler); + } else { + throw ProxyException("grpcStream failed"); + } +} diff --git a/api/wasm/cpp/proxy_wasm_intrinsics.cc b/api/wasm/cpp/proxy_wasm_intrinsics.cc index c8177f9993d4f..42626e5fc47de 100644 --- a/api/wasm/cpp/proxy_wasm_intrinsics.cc +++ b/api/wasm/cpp/proxy_wasm_intrinsics.cc @@ -1,12 +1,14 @@ // NOLINT(namespace-envoy) #include "proxy_wasm_intrinsics.h" +std::unique_ptr NewContext(uint32_t id); + static std::unordered_map> context_map; static Context* ensureContext(uint32_t context_id) { auto e = context_map.insert(std::make_pair(context_id, nullptr)); if (e.second) - e.first->second = Context::New(context_id); + e.first->second = NewContext(context_id); return e.first->second.get(); } @@ -36,6 +38,13 @@ extern "C" EMSCRIPTEN_KEEPALIVE FilterHeadersStatus proxy_onRequestHeaders(uint3 return c->onRequestHeaders(); } +extern "C" EMSCRIPTEN_KEEPALIVE FilterMetadataStatus proxy_onRequestMetadata(uint32_t context_id) { + auto c = getContext(context_id); + if (!c) + return FilterMetadataStatus::Continue; + return c->onRequestMetadata(); +} + extern "C" EMSCRIPTEN_KEEPALIVE FilterDataStatus proxy_onRequestBody(uint32_t context_id, uint32_t body_buffer_length, uint32_t end_of_stream) { @@ -59,6 +68,13 @@ extern "C" EMSCRIPTEN_KEEPALIVE FilterHeadersStatus proxy_onResponseHeaders(uint return c->onResponseHeaders(); } +extern "C" EMSCRIPTEN_KEEPALIVE FilterMetadataStatus proxy_onResponseMetadata(uint32_t context_id) { + auto c = getContext(context_id); + if (!c) + return FilterMetadataStatus::Continue; + return c->onResponseMetadata(); +} + extern "C" EMSCRIPTEN_KEEPALIVE FilterDataStatus proxy_onResponseBody(uint32_t context_id, uint32_t body_buffer_length, uint32_t end_of_stream) { diff --git a/api/wasm/cpp/proxy_wasm_intrinsics.h b/api/wasm/cpp/proxy_wasm_intrinsics.h index 934a8d0dddb76..5ad9b698c5c63 100644 --- a/api/wasm/cpp/proxy_wasm_intrinsics.h +++ b/api/wasm/cpp/proxy_wasm_intrinsics.h @@ -1,1425 +1,24 @@ /* - * Intrinsic functions available to WASM modules. + * API Available to WASM modules. */ // NOLINT(namespace-envoy) -#include -#include -#include -#include -#include -#include -#ifndef EMSCRIPTEN_PROTOBUF_LITE -#include "proxy_wasm_intrinsics.pb.h" -#else -#include "proxy_wasm_intrinsics_lite.pb.h" -#endif +#pragma once #ifndef EMSCRIPTEN_KEEPALIVE #define EMSCRIPTEN_KEEPALIVE __attribute__((used)) __attribute__((visibility("default"))) #endif -// clang-format off -/* - API Calls into the VM. - - extern "C" EMSCRIPTEN_KEEPALIVE void proxy_onConfigure(char* configuration, int size); - extern "C" EMSCRIPTEN_KEEPALIVE void proxy_onStart(); - extern "C" EMSCRIPTEN_KEEPALIVE int main(); // only called if proxy_onStart() is not available. - extern "C" EMSCRIPTEN_KEEPALIVE void proxy_onTick(); - extern "C" EMSCRIPTEN_KEEPALIVE void proxy_onCreate(uint32_t context_id); - extern "C" EMSCRIPTEN_KEEPALIVE void proxy_onRequestHeaders(uint32_t context_id); - extern "C" EMSCRIPTEN_KEEPALIVE void proxy_onRequestBody(uint32_t context_id, uint32_t body_buffer_length, uint32_t end_of_stream); - extern "C" EMSCRIPTEN_KEEPALIVE void proxy_onRequestTrailers(uint32_t context_id); - extern "C" EMSCRIPTEN_KEEPALIVE void proxy_onRequestMetadata(uint32_t context_id); - extern "C" EMSCRIPTEN_KEEPALIVE void proxy_onResponseHeaders(uint32_t context_id); - extern "C" EMSCRIPTEN_KEEPALIVE void proxy_onResponseBody(uint32_t context_id, uint32_t body_buffer_length, uint32_t end_of_stream); - extern "C" EMSCRIPTEN_KEEPALIVE void proxy_onResponseTrailers(uint32_t context_id); - extern "C" EMSCRIPTEN_KEEPALIVE void proxy_onResponseMetadata(uint32_t context_id); - extern "C" EMSCRIPTEN_KEEPALIVE void proxy_onHttpCallResponse(uint32_t context_id uint32_t token, uint32_t header_pairs_ptr, - uint32_t header_pairs_size, uint32_t body_ptr, uint32_t body_size, uint32_t trailer_pairs_ptr, uint32_t trailer_pairs_size): - // The stream has completed. - extern "C" EMSCRIPTEN_KEEPALIVE void proxy_onDone(uint32_t context_id); - // onLog occurs after onDone. - extern "C" EMSCRIPTEN_KEEPALIVE void proxy_onLog(uint32_t context_id); - // The Context in the proxy has been destroyed and no further calls will be coming. - extern "C" ENSCRIPTEN_KEEPALIVE void proxy_onDelete(uint32_t context_id); - extern "C" EMSCRIPTEN_KEEPALIVE proxy_onGrpcCreateInitialMetadata(uint32_t context_id, uint32_t token); - extern "C" EMSCRIPTEN_KEEPALIVE proxy_onGrpcReceiveInitialMetadata(uint32_t context_id, uint32_t token); - extern "C" EMSCRIPTEN_KEEPALIVE proxy_onGrpcTrailingMetadata(uint32_t context_id, uint32_t token); - extern "C" EMSCRIPTEN_KEEPALIVE proxy_onGrpcReceive(uint32_t context_id, uint32_t token, - uint32_t response_ptr, uint32_t response_size); - extern "C" EMSCRIPTEN_KEEPALIVE proxy_onGrpcClose(uint32_t context_id, uint32_t token, - uint32_t status_code, uint32_t status_message_ptr, uint32_t status_message_size); -*/ -// clang-format on - -enum class LogLevel : uint32_t { trace, debug, info, warn, error, critical }; -extern "C" void proxy_log(LogLevel level, const char* logMessage, size_t messageSize); - -extern "C" void proxy_setTickPeriodMilliseconds(uint32_t millisecond); - -extern "C" uint64_t proxy_getCurrentTimeNanoseconds(); - -// -// Low Level API. -// -enum class FilterHeadersStatus : uint32_t { Continue = 0, StopIteration = 1 }; -enum class FilterTrailersStatus : uint32_t { Continue = 0, StopIteration = 1 }; -enum class FilterDataStatus : uint32_t { - Continue = 0, - StopIterationAndBuffer = 1, - StopIterationAndWatermark = 2, - StopIterationNoBuffer = 3 -}; -enum class StreamType : uint32_t { Request = 0, Response = 1 }; -enum class MetadataType : uint32_t { - Request = 0, - Response = 1, - RequestRoute = 2, // Immutable - ResponseRoute = 3, // Immutable - Log = 4, // Immutable - Node = 5 // Immutable -}; -enum class HeaderMapType : uint32_t { - RequestHeaders = 0, // During the onLog callback these are immutable - RequestTrailers = 1, // During the onLog callback these are immutable - ResponseHeaders = 2, // During the onLog callback these are immutable - ResponseTrailers = 3, // During the onLog callback these are immutable - GrpcCreateInitialMetadata = 4, - GrpcReceiveInitialMetadata = 5, // Immutable - GrpcReceiveTrailingMetadata = 6, // Immutable -}; -enum GrpcStatus : int32_t { - OK = 0, - CANCELLED = 1, - UNKNOWN = 2, - INVALID_ARGUMENT = 3, - DEADLINE_EXCEEDED = 4, - NOT_FOUND = 5, - ALREADY_EXISTS = 6, - PERMISSION_DENIED = 7, - UNAUTHENTICATED = 16, - RESOURCE_EXHAUSTED = 8, - FAILED_PRECONDITION = 9, - ABORTED = 10, - OUT_OF_RANGE = 11, - UNIMPLEMENTED = 12, - INTERNAL = 13, - UNAVAILABLE = 14, - DATA_LOSS = 15, - DO_NOT_USE = -1 -}; - -// Stream Info - -extern "C" void proxy_getProtocol(StreamType type, const char** value_ptr, size_t* value_size); - -// Metadata -extern "C" void proxy_getMetadata(MetadataType type, const char* key_ptr, size_t key_size, - const char** value_ptr_ptr, size_t* value_size_ptr); -extern "C" void proxy_setMetadata(MetadataType type, const char* key_ptr, size_t key_size, - const char* value_ptr, size_t value_size); -extern "C" void proxy_getMetadataPairs(MetadataType type, const char** value_ptr, - size_t* value_size); -extern "C" void proxy_getMetadataStruct(MetadataType type, const char* name_ptr, size_t name_size, - const char** value_ptr_ptr, size_t* value_size_ptr); -extern "C" void proxy_setMetadataStruct(MetadataType type, const char* name_ptr, size_t name_size, - const char* value_ptr, size_t value_size); - -// Continue -extern "C" void proxy_continueRequest(); -extern "C" void proxy_continueResponse(); - -// SharedData -extern "C" void proxy_getSharedData(const char* key_ptr, size_t key_size, const char** value_ptr, - size_t* value_size, uint32_t* cas); -// If cas != 0 and cas != the current cas for 'key' return false, otherwise set the value and -// return true. -extern "C" bool proxy_setSharedData(const char* key_ptr, size_t key_size, const char* value_ptr, - size_t value_size, uint32_t cas); - -// Headers/Trailers/Metadata HeaderMaps -extern "C" void proxy_addHeaderMapValue(HeaderMapType type, const char* key_ptr, size_t key_size, const char* value_ptr, size_t value_size); -extern "C" void proxy_getHeaderMapValue(HeaderMapType type, const char* key_ptr, size_t key_size, const char** value_ptr, size_t* value_size); -extern "C" void proxy_getHeaderMapPairs(HeaderMapType type, const char** ptr, size_t* size); -extern "C" void proxy_setHeaderMapPairs(HeaderMapType type, const char* ptr, size_t size); -extern "C" void proxy_replaceHeaderMapValue(HeaderMapType type, const char* key_ptr, size_t key_size, const char* value_ptr, size_t value_size); -extern "C" void proxy_removeHeaderMapValue(HeaderMapType type, const char* key_ptr, size_t key_size); - -// Body -extern "C" void proxy_getRequestBodyBufferBytes(uint32_t start, uint32_t length, const char** ptr, - size_t* size); -extern "C" void proxy_getResponseBodyBufferBytes(uint32_t start, uint32_t length, const char** ptr, - size_t* size); - -// HTTP -// Returns token, used in callback onHttpCallResponse -extern "C" uint32_t proxy_httpCall(const char* uri_ptr, size_t uri_size, void* header_pairs_ptr, - size_t header_pairs_size, const char* body_ptr, size_t body_size, - void* trailer_pairs_ptr, size_t trailer_pairs_size, - uint32_t timeout_milliseconds); -// gRPC -// Returns token, used in gRPC callbacks (onGrpc...) -extern "C" uint32_t proxy_grpcCall(const char* service_ptr, size_t service_size, const char* service_name_ptr, - size_t service_name_size, const char* method_name_ptr, size_t method_name_size, - const char* request_ptr, size_t request_size, uint32_t timeout_milliseconds); -extern "C" uint32_t proxy_grpcStream(const char* service_ptr, size_t service_size, const char* service_name_ptr, - size_t service_name_size, const char* method_name_ptr, size_t method_name_size); -extern "C" void proxy_grpcCancel(uint32_t token); -extern "C" void proxy_grpcClose(uint32_t token); -extern "C" void proxy_grpcSend(uint32_t token, const char* message_ptr, size_t message_size, uint32_t end_stream); - -// Metrics - -enum class MetricType : uint32_t { - Counter = 0, - Gauge = 1, - Histogram = 2, -}; -// Returns a metric_id which can be used to report a metric. On error returns 0. -extern "C" uint32_t proxy_defineMetric(MetricType type, const char* name_ptr, size_t name_size); -extern "C" void proxy_incrementMetric(uint32_t metric_id, int64_t offset); -extern "C" void proxy_recordMetric(uint32_t metric_id, uint64_t value); -extern "C" uint64_t proxy_getMetric(uint32_t metric_id); - -// -// High Level C++ API. -// -class Context; - -class ProxyException : std::runtime_error { -public: - ProxyException(const std::string& message) : std::runtime_error(message) {} -}; - -inline void logTrace(const std::string& logMessage) { - proxy_log(LogLevel::trace, logMessage.c_str(), logMessage.size()); -} -inline void logDebug(const std::string& logMessage) { - proxy_log(LogLevel::debug, logMessage.c_str(), logMessage.size()); -} -inline void logInfo(const std::string& logMessage) { - proxy_log(LogLevel::info, logMessage.c_str(), logMessage.size()); -} -inline void logWarn(const std::string& logMessage) { - proxy_log(LogLevel::warn, logMessage.c_str(), logMessage.size()); -} -inline void logError(const std::string& logMessage) { - proxy_log(LogLevel::error, logMessage.c_str(), logMessage.size()); -} -inline void logCritical(const std::string& logMessage) { - proxy_log(LogLevel::critical, logMessage.c_str(), logMessage.size()); -} - -// Buffers coming into the WASM filter. -class WasmData { -public: - WasmData(const char* data, size_t size) : data_(data), size_(size) {} - ~WasmData() { ::free((void*)data_); } - const char* data() { return data_; } - std::string_view view() { return {data_, size_}; } - std::string toString() { return std::string(view()); } - std::vector> pairs(); - template T proto() { - T p; - p.ParseFromArray(data_, size_); - return p; - } - - WasmData& operator=(const WasmData&) = delete; - WasmData(const WasmData&) = delete; - -private: - const char* data_; - size_t size_; -}; -typedef std::unique_ptr WasmDataPtr; - -inline std::vector> WasmData::pairs() { - std::vector> result; - if (!data()) - return result; - auto p = data(); - int n = *reinterpret_cast(p); - p += sizeof(int); - result.resize(n); - auto s = p + n * 8; - for (int i = 0; i < n; i++) { - int size = *reinterpret_cast(p); - p += sizeof(int); - result[i].first = std::string_view(s, size); - s += size + 1; - size = *reinterpret_cast(p); - p += sizeof(int); - result[i].second = std::string_view(s, size); - s += size + 1; - } - return result; -} - -template size_t pairsSize(const Pairs& result) { - size_t size = 4; // number of headers - for (auto& p : result) { - size += 8; // size of key, size of value - size += p.first.size() + 1; // null terminated key - size += p.second.size() + 1; // null terminated value - } - return size; -} - -template void marshalPairs(const Pairs& result, char* buffer) { - char* b = buffer; - *reinterpret_cast(b) = result.size(); - b += sizeof(uint32_t); - for (auto& p : result) { - *reinterpret_cast(b) = p.first.size(); - b += sizeof(uint32_t); - *reinterpret_cast(b) = p.second.size(); - b += sizeof(uint32_t); - } - for (auto& p : result) { - memcpy(b, p.first.data(), p.first.size()); - b += p.first.size(); - *b++ = 0; - memcpy(b, p.second.data(), p.second.size()); - b += p.second.size(); - *b++ = 0; - } -} - -template void exportPairs(const Pairs& pairs, const char** ptr, size_t* size_ptr) { - if (pairs.empty()) { - *ptr = nullptr; - *size_ptr = 0; - return; - } - size_t size = pairsSize(pairs); - char* buffer = static_cast(::malloc(size)); - marshalPairs(pairs, buffer); - *size_ptr = size; -} - -struct PairHash { - template std::size_t operator()(const std::pair& x) const { - return std::hash()(x.first) + std::hash()(x.second); - } -}; - -struct Tuple3Hash { - template std::size_t operator()(const std::tuple& x) const { - return std::hash()(std::get<0>(x)) + std::hash()(std::get<1>(x)) + std::hash()(std::get<2>(x)); - } -}; - -using HeaderStringPairs = std::vector>; - -class GrpcCallHandlerBase { -public: - GrpcCallHandlerBase(Context* context) : context_(context) {} - virtual ~GrpcCallHandlerBase() {} - - void cancel(); - - virtual void onCreateInitialMetadata() = 0; - virtual void onSuccess(std::unique_ptr message) = 0; - virtual void onFailure(GrpcStatus status, std::unique_ptr error_message) = 0; - -private: - friend class Context; - - Context* const context_; - uint32_t token_; -}; - -template -class GrpcCallHandler : public GrpcCallHandlerBase { -public: - GrpcCallHandler(Context* context) : GrpcCallHandlerBase(context) {} - virtual ~GrpcCallHandler() {} - - virtual void onSuccess(Message&& response) = 0; - -private: - void onSuccess(std::unique_ptr message) override { - onSuccess(message->proto()); - } -}; - -class GrpcStreamHandlerBase { -public: - GrpcStreamHandlerBase(Context* context) : context_(context) {} - virtual ~GrpcStreamHandlerBase() {} - - // NB: with end_of_stream == true, callbacks can still occur: reset() to prevent further callbacks. - void send(std::string_view message, bool end_of_stream); - void close(); // NB: callbacks can still occur: reset() to prevent further callbacks. - void reset(); - - virtual void onCreateInitialMetadata() = 0; - virtual void onReceiveInitialMetadata() = 0; - virtual void onReceiveTrailingMetadata() = 0; - virtual void onReceive(std::unique_ptr message) = 0; - virtual void onRemoteClose(GrpcStatus status, std::unique_ptr error_message) = 0; - -protected: - friend class Context; - - void doRemoteClose(GrpcStatus status, std::unique_ptr error_message); - - bool local_close_ = false; - bool remote_close_ = false; - Context* const context_; - uint32_t token_; -}; - -template -class GrpcStreamHandler : public GrpcStreamHandlerBase { -public: - GrpcStreamHandler(Context* context) : GrpcStreamHandlerBase(context) {} - virtual ~GrpcStreamHandler() {} - - void send(const Request& message, bool end_of_stream) { - std::string output; - if (!message.SerializeToString(&output)) { - return; - } - GrpcStreamHandlerBase::send(output, end_of_stream); - local_close_ = local_close_ || end_of_stream; - } - - virtual void onReceive(Response&& message) = 0; - -private: - void onReceive(std::unique_ptr message) override { - onReceive(message->proto()); - } -}; - -// Context for a stream. The distinguished context id == 0 is used for non-stream calls. -class Context { -public: - explicit Context(uint32_t id) : id_(id) {} - virtual ~Context() {} - - uint32_t id() { return id_; } - - static std::unique_ptr New(uint32_t id); // For subclassing. - - // Called once when the filter loads and on configuration changes. - virtual void onConfigure(std::unique_ptr configuration) {} - // Called once when the filter loads. - virtual void onStart() {} - - // Called on individual requests/response streams. - virtual void onCreate() {} - virtual FilterHeadersStatus onRequestHeaders() { return FilterHeadersStatus::Continue; } - virtual FilterDataStatus onRequestBody(size_t body_buffer_length, bool end_of_stream) { - return FilterDataStatus::Continue; - } - virtual FilterTrailersStatus onRequestTrailers() { return FilterTrailersStatus::Continue; } - virtual FilterHeadersStatus onResponseHeaders() { return FilterHeadersStatus::Continue; } - virtual FilterDataStatus onResponseBody(size_t body_buffer_length, bool end_of_stream) { - return FilterDataStatus::Continue; - } - virtual FilterTrailersStatus onResponseTrailers() { return FilterTrailersStatus::Continue; } - virtual void onDone() {} - virtual void onLog() {} - virtual void onDelete() {} - virtual void onTick() {} - - // Low level HTTP/gRPC interface. - virtual void onHttpCallResponse(uint32_t token, std::unique_ptr header_pairs, - std::unique_ptr body, - std::unique_ptr trailer_pairs); - virtual void onGrpcCreateInitialMetadata(uint32_t token); - virtual void onGrpcReceiveInitialMetadata(uint32_t token); - virtual void onGrpcReceiveTrailingMetadata(uint32_t token); - virtual void onGrpcReceive(uint32_t token, std::unique_ptr message); - virtual void onGrpcClose(uint32_t token, GrpcStatus status, std::unique_ptr message); - - // Default high level HTTP/gRPC interface. NB: overriding the low level interface will disable this interface. - using HttpCallCallback = std::function header_pairs, - std::unique_ptr body, std::unique_ptr trailer_pairs)>; - using GrpcSimpleCallCallback = std::function message)>; - void httpCall(std::string_view uri, const HeaderStringPairs& request_headers, - std::string_view request_body, const HeaderStringPairs& request_trailers, - uint32_t timeout_milliseconds, HttpCallCallback callback); - // NB: the message is the response if status == OK and an error message otherwise. - void grpcSimpleCall(std::string_view service, std::string_view service_name, std::string_view method_name, - const google::protobuf::MessageLite &request, uint32_t timeout_milliseconds, GrpcSimpleCallCallback callback); - template void grpcSimpleCall(std::string_view service, std::string_view service_name, - std::string_view method_name, const google::protobuf::MessageLite &request, uint32_t timeout_milliseconds, - std::function success_callback, - std::function failure_callback) { - auto callback = [success_callback, failure_callback](GrpcStatus status, std::unique_ptr message) { - if (status == GrpcStatus::OK) { - success_callback(message->proto()); - } else { - failure_callback(status, message->view()); - } - }; - grpcSimpleCall(service, service_name, method_name, request, timeout_milliseconds, callback); - } - void grpcCallHandler(std::string_view service, std::string_view service_name, - std::string_view method_name, const google::protobuf::MessageLite &request, uint32_t timeout_milliseconds, - std::unique_ptr handler); - void grpcStreamHandler(std::string_view service, std::string_view service_name, - std::string_view method_name, std::unique_ptr handler); - - // Metadata - bool isImmutable(MetadataType type); - virtual bool isProactivelyCachable(MetadataType type); // Cache all keys on any read. - // Caching Metadata calls. Note: "name" refers to the metadata namespace. - google::protobuf::Value metadataValue(MetadataType type, std::string_view key); - google::protobuf::Value requestRouteMetadataValue(std::string_view key); - google::protobuf::Value responseRouteMetadataValue(std::string_view key); - google::protobuf::Value logMetadataValue(std::string_view key); - google::protobuf::Value requestMetadataValue(std::string_view key); - google::protobuf::Value responseMetadataValue(std::string_view key); - google::protobuf::Value nodeMetadataValue(std::string_view key); - google::protobuf::Value namedMetadataValue(MetadataType type, std::string_view name, std::string_view key); - google::protobuf::Value requestMetadataValue(std::string_view name, std::string_view key); - google::protobuf::Value responseMetadataValue(std::string_view name, std::string_view key); - google::protobuf::Struct metadataStruct(MetadataType type, std::string_view name = ""); - google::protobuf::Struct requestRouteMetadataStruct(); - google::protobuf::Struct responseRouteMetadataStruct(); - google::protobuf::Struct nodeMetadataStruct(); - google::protobuf::Struct logMetadataStruct(std::string_view name = ""); - google::protobuf::Struct requestMetadataStruct(std::string_view name = ""); - google::protobuf::Struct responseMetadataStruct(std::string_view name = ""); - // Uncached Metadata calls. - google::protobuf::Value getRequestMetadataValue(std::string_view key); - google::protobuf::Value getResponseMetadataValue(std::string_view key); - google::protobuf::Struct getRequestMetadataStruct(std::string_view name); - google::protobuf::Struct getResponseMetadataStruct(std::string_view name); - -private: - friend class GrpcCallHandlerBase; - friend class GrpcStreamHandlerBase; - - uint32_t id_; - std::unordered_map, google::protobuf::Value, PairHash> value_cache_; - std::unordered_map, google::protobuf::Value, Tuple3Hash> name_value_cache_; - std::unordered_map, google::protobuf::Struct, PairHash> struct_cache_; - std::unordered_map http_calls_; - std::unordered_map simple_grpc_calls_; - std::unordered_map> grpc_calls_; - std::unordered_map> grpc_streams_; -}; - -inline bool Context::isImmutable(MetadataType type) { - switch (type) { - case MetadataType::Request: - case MetadataType::Response: - return false; - default: - return true; - } -} - -// Override in subclasses to proactively cache certain types of metadata. -inline bool Context::isProactivelyCachable(MetadataType type) { - switch (type) { - case MetadataType::Node: - return true; - default: - return false; - } -} - -// StreamInfo -inline WasmDataPtr getProtocol(StreamType type) { - const char* ptr = nullptr; - size_t size = 0; - proxy_getProtocol(type, &ptr, &size); - return std::make_unique(ptr, size); -} - -// Metadata -inline WasmDataPtr getMetadata(MetadataType type, std::string_view key) { - const char* value_ptr = nullptr; - size_t value_size = 0; - proxy_getMetadata(type, key.data(), key.size(), &value_ptr, &value_size); - return std::make_unique(value_ptr, value_size); -} - -inline google::protobuf::Value getMetadataValue(MetadataType type, std::string_view key) { - const char* value_ptr = nullptr; - size_t value_size = 0; - proxy_getMetadata(type, key.data(), key.size(), &value_ptr, &value_size); - if (!value_size) { - return {}; - } - google::protobuf::Value value; - if (!value.ParseFromArray(value_ptr, value_size)) { - return {}; - } - return value; -} - -inline std::string getMetadataStringValue(MetadataType type, std::string_view key) { - return getMetadataValue(type, key).string_value(); -} - -inline void setMetadata(MetadataType type, std::string_view key, std::string_view value) { - proxy_setMetadata(type, key.data(), key.size(), value.data(), value.size()); -} - -inline void setMetadataValue(MetadataType type, std::string_view key, - const google::protobuf::Value& value) { - std::string output; - if (!value.SerializeToString(&output)) { - return; - } - proxy_setMetadata(type, key.data(), key.size(), output.data(), output.size()); -} - -inline void setMetadataStringValue(MetadataType type, std::string_view key, std::string_view s) { - google::protobuf::Value value; - value.set_string_value(s.data(), s.size()); - setMetadataValue(type, key, value); -} - -inline WasmDataPtr getMetadataValuePairs(MetadataType type) { - const char* value_ptr = nullptr; - size_t value_size = 0; - proxy_getMetadataPairs(type, &value_ptr, &value_size); - return std::make_unique(value_ptr, value_size); -} - -inline google::protobuf::Struct getMetadataStruct(MetadataType type, std::string_view name) { - const char* value_ptr = nullptr; - size_t value_size = 0; - proxy_getMetadataStruct(type, name.data(), name.size(), &value_ptr, &value_size); - if (!value_size) { - return {}; - } - google::protobuf::Struct s; - if (!s.ParseFromArray(value_ptr, value_size)) { - return {}; - } - return s; -} - -inline void setMetadataStruct(MetadataType type, std::string_view name, - const google::protobuf::Struct& s) { - std::string output; - if (!s.SerializeToString(&output)) { - return; - } - proxy_setMetadataStruct(type, name.data(), name.size(), output.data(), output.size()); -} - -inline google::protobuf::Value Context::metadataValue(MetadataType type, std::string_view key) { - auto cache_key = std::make_pair(type, std::string(key)); - auto it = value_cache_.find(cache_key); - if (it != value_cache_.end()) { - return it->second; - } - if (isProactivelyCachable(type)) { - auto values = getMetadataValuePairs(type); - for (auto &p : values->pairs()) { - google::protobuf::Value value; - if (value.ParseFromArray(p.second.data(), p.second.size())) { - auto k = std::make_pair(type, p.first); - value_cache_[cache_key] = value; - } - } - auto it = value_cache_.find(cache_key); - if (it != value_cache_.end()) { - return it->second; - } - return {}; - } else { - auto value = getMetadataValue(type, key); - value_cache_[cache_key] = value; - return value; - } -} - -inline google::protobuf::Value Context::requestRouteMetadataValue(std::string_view key) { - return metadataValue(MetadataType::RequestRoute, key); -} - -inline google::protobuf::Value Context::responseRouteMetadataValue(std::string_view key) { - return metadataValue(MetadataType::ResponseRoute, key); -} - -inline google::protobuf::Value Context::logMetadataValue(std::string_view key) { - return metadataValue(MetadataType::Log, key); -} - -inline google::protobuf::Value Context::requestMetadataValue(std::string_view key) { - return metadataValue(MetadataType::Request, key); -} - -inline google::protobuf::Value Context::responseMetadataValue(std::string_view key) { - return metadataValue(MetadataType::Response, key); -} - -inline google::protobuf::Value Context::nodeMetadataValue(std::string_view key) { - return metadataValue(MetadataType::Node, key); -} - -inline google::protobuf::Value Context::namedMetadataValue(MetadataType type, std::string_view name, std::string_view key) { - auto n = std::string(name); - auto cache_key = std::make_tuple(type, n, std::string(key)); - auto it = name_value_cache_.find(cache_key); - if (it != name_value_cache_.end()) { - return it->second; - } - auto s = metadataStruct(type, name); - for (auto &f : s.fields()) { - auto k = std::make_tuple(type, n, f.first); - name_value_cache_[k] = f.second; - } - struct_cache_[std::make_pair(type, n)] = std::move(s); - it = name_value_cache_.find(cache_key); - if (it != name_value_cache_.end()) { - return it->second; - } - return {}; -} - -inline google::protobuf::Value Context::requestMetadataValue(std::string_view name, std::string_view key) { - return namedMetadataValue(MetadataType::Request, name, key); -} - -inline google::protobuf::Value Context::responseMetadataValue(std::string_view name, std::string_view key) { - return namedMetadataValue(MetadataType::Response, name, key); -} - -inline google::protobuf::Struct Context::metadataStruct(MetadataType type, std::string_view name) { - auto cache_key = std::make_pair(type, std::string(name)); - auto it = struct_cache_.find(cache_key); - if (it != struct_cache_.end()) { - return it->second; - } - auto s = getMetadataStruct(MetadataType::Request, name); - struct_cache_[cache_key] = s; - return s; -} - -inline google::protobuf::Struct Context::requestRouteMetadataStruct() { - return metadataStruct(MetadataType::RequestRoute); -} - -inline google::protobuf::Struct Context::responseRouteMetadataStruct() { - return metadataStruct(MetadataType::ResponseRoute); -} - -inline google::protobuf::Struct Context::nodeMetadataStruct() { - return metadataStruct(MetadataType::Node); -} - -inline google::protobuf::Struct Context::logMetadataStruct(std::string_view name) { - return metadataStruct(MetadataType::Log, name); -} - -inline google::protobuf::Struct Context::requestMetadataStruct(std::string_view name) { - return metadataStruct(MetadataType::Request, name); -} - -inline google::protobuf::Struct Context::responseMetadataStruct(std::string_view name) { - return metadataStruct(MetadataType::Response, name); -} - -inline google::protobuf::Value Context::getRequestMetadataValue(std::string_view key) { - return getMetadataValue(MetadataType::Request, key); -} - -inline google::protobuf::Value Context::getResponseMetadataValue(std::string_view key) { - return getMetadataValue(MetadataType::Response, key); -} - -inline google::protobuf::Struct Context::getRequestMetadataStruct(std::string_view name) { - return getMetadataStruct(MetadataType::Request, name); -} - -inline google::protobuf::Struct Context::getResponseMetadataStruct(std::string_view name) { - return getMetadataStruct(MetadataType::Response, name); -} - -// Continue -inline void continueRequest() { proxy_continueRequest(); } -inline void continueResponse() { proxy_continueResponse(); } - -// Shared -inline WasmDataPtr getSharedData(std::string_view key, uint32_t* cas = nullptr) { - uint32_t dummy_cas; - const char* value_ptr = nullptr; - size_t value_size = 0; - if (!cas) - cas = &dummy_cas; - proxy_getSharedData(key.data(), key.size(), &value_ptr, &value_size, cas); - return std::make_unique(value_ptr, value_size); -} - -inline bool setSharedData(std::string_view key, std::string_view value, uint32_t cas = 0) { - return proxy_setSharedData(key.data(), key.size(), value.data(), value.size(), cas); -} - -// Headers/Trailers -inline void addHeaderMapValue(HeaderMapType type, std::string_view key, std::string_view value) { - proxy_addHeaderMapValue(type, key.data(), key.size(), value.data(), value.size()); -} - -inline WasmDataPtr getHeaderMapValue(HeaderMapType type, std::string_view key) { - const char* value_ptr = nullptr; - size_t value_size = 0; - proxy_getHeaderMapValue(type, key.data(), key.size(), &value_ptr, &value_size); - return std::make_unique(value_ptr, value_size); -} - -inline void replaceHeaderMapValue(HeaderMapType type, std::string_view key, std::string_view value) { - proxy_replaceHeaderMapValue(type, key.data(), key.size(), value.data(), value.size()); -} - -inline void removeHeaderMapValue(HeaderMapType type, std::string_view key) { - proxy_removeHeaderMapValue(type, key.data(), key.size()); -} - -inline WasmDataPtr getHeaderMapPairs(HeaderMapType type) { - const char* ptr = nullptr; - size_t size = 0; - proxy_getHeaderMapPairs(type, &ptr, &size); - return std::make_unique(ptr, size); -} - -inline void setHeaderMapPairs(HeaderMapType type, const HeaderStringPairs &pairs) { - const char* ptr = nullptr; - size_t size = 0; - exportPairs(pairs, &ptr, &size); - proxy_setHeaderMapPairs(type, ptr, size); -} - -inline void addRequestHeader(std::string_view key, std::string_view value) { - addHeaderMapValue(HeaderMapType::RequestHeaders, key, value); -} -inline WasmDataPtr getRequestHeader(std::string_view key) { - return getHeaderMapValue(HeaderMapType::RequestHeaders, key); -} -inline void replaceRequestHeader(std::string_view key, std::string_view value) { - replaceHeaderMapValue(HeaderMapType::RequestHeaders, key, value); -} -inline void removeRequestHeader(std::string_view key) { - removeHeaderMapValue(HeaderMapType::RequestHeaders, key); -} -inline WasmDataPtr getRequestHeaderPairs() { - return getHeaderMapPairs(HeaderMapType::RequestHeaders); -} -inline void setRequestHeaderPairs(const HeaderStringPairs &pairs) { - return setHeaderMapPairs(HeaderMapType::RequestHeaders, pairs); -} - -inline void addRequestTrailer(std::string_view key, std::string_view value) { - addHeaderMapValue(HeaderMapType::RequestTrailers, key, value); -} -inline WasmDataPtr getRequestTrailer(std::string_view key) { - return getHeaderMapValue(HeaderMapType::RequestTrailers, key); -} -inline void replaceRequestTrailer(std::string_view key, std::string_view value) { - replaceHeaderMapValue(HeaderMapType::RequestTrailers, key, value); -} -inline void removeRequestTrailer(std::string_view key) { - removeHeaderMapValue(HeaderMapType::RequestTrailers, key); -} -inline WasmDataPtr getRequestTrailerPairs() { - return getHeaderMapPairs(HeaderMapType::RequestTrailers); -} -inline void setRequestTrailerPairs(const HeaderStringPairs &pairs) { - return setHeaderMapPairs(HeaderMapType::RequestTrailers, pairs); -} - -inline void addResponseHeader(std::string_view key, std::string_view value) { - addHeaderMapValue(HeaderMapType::ResponseHeaders, key, value); -} -inline WasmDataPtr getResponseHeader(std::string_view key) { - return getHeaderMapValue(HeaderMapType::ResponseHeaders, key); -} -inline void replaceResponseHeader(std::string_view key, std::string_view value) { - replaceHeaderMapValue(HeaderMapType::ResponseHeaders, key, value); -} -inline void removeResponseHeader(std::string_view key) { - removeHeaderMapValue(HeaderMapType::ResponseHeaders, key); -} -inline WasmDataPtr getResponseHeaderPairs() { - return getHeaderMapPairs(HeaderMapType::ResponseHeaders); -} -inline void setResponseHeaderPairs(const HeaderStringPairs &pairs) { - return setHeaderMapPairs(HeaderMapType::ResponseHeaders, pairs); -} - -inline void addResponseTrailer(std::string_view key, std::string_view value) { - addHeaderMapValue(HeaderMapType::ResponseTrailers, key, value); -} -inline WasmDataPtr getResponseTrailer(std::string_view key) { - return getHeaderMapValue(HeaderMapType::ResponseTrailers, key); -} -inline void replaceResponseTrailer(std::string_view key, std::string_view value) { - replaceHeaderMapValue(HeaderMapType::ResponseTrailers, key, value); -} -inline void removeResponseTrailer(std::string_view key) { - removeHeaderMapValue(HeaderMapType::ResponseTrailers, key); -} -inline WasmDataPtr getResponseTrailerPairs() { - return getHeaderMapPairs(HeaderMapType::ResponseTrailers); -} -inline void setResponseTrailerPairs(const HeaderStringPairs &pairs) { - return setHeaderMapPairs(HeaderMapType::ResponseTrailers, pairs); -} - -// Body -inline WasmDataPtr getRequestBodyBufferBytes(size_t start, size_t length) { - const char* ptr = nullptr; - size_t size = 0; - proxy_getRequestBodyBufferBytes(start, length, &ptr, &size); - return std::make_unique(ptr, size); -} - -inline WasmDataPtr getResponseBodyBufferBytes(size_t start, size_t length) { - const char* ptr = nullptr; - size_t size = 0; - proxy_getResponseBodyBufferBytes(start, length, &ptr, &size); - return std::make_unique(ptr, size); -} - -// HTTP - -inline void MakeHeaderStringPairsBuffer(const HeaderStringPairs& headers, void** buffer_ptr, - size_t* size_ptr) { - if (headers.empty()) { - *buffer_ptr = nullptr; - *size_ptr = 0; - return; - } - int size = 4; // number of headers - for (auto& p : headers) { - size += 8; // size of key, size of value - size += p.first.size() + 1; // null terminated key - size += p.second.size() + 1; // null terminated value - } - char* buffer = static_cast(::malloc(size)); - char* b = buffer; - *reinterpret_cast(b) = headers.size(); - b += sizeof(int32_t); - for (auto& p : headers) { - *reinterpret_cast(b) = p.first.size(); - b += sizeof(int32_t); - *reinterpret_cast(b) = p.second.size(); - b += sizeof(int32_t); - } - for (auto& p : headers) { - memcpy(b, p.first.data(), p.first.size()); - b += p.first.size(); - *b++ = 0; - memcpy(b, p.second.data(), p.second.size()); - b += p.second.size(); - *b++ = 0; - } - *buffer_ptr = buffer; - *size_ptr = size; -} - -inline uint32_t httpCall(std::string_view uri, const HeaderStringPairs& request_headers, - std::string_view request_body, const HeaderStringPairs& request_trailers, - uint32_t timeout_milliseconds) { - void *headers_ptr = nullptr, *trailers_ptr = nullptr; - size_t headers_size = 0, trailers_size = 0; - MakeHeaderStringPairsBuffer(request_headers, &headers_ptr, &headers_size); - MakeHeaderStringPairsBuffer(request_trailers, &trailers_ptr, &trailers_size); - uint32_t result = - proxy_httpCall(uri.data(), uri.size(), headers_ptr, headers_size, request_body.data(), - request_body.size(), trailers_ptr, trailers_size, timeout_milliseconds); - ::free(headers_ptr); - ::free(trailers_ptr); - return result; -} - -// Low level metrics interface. - -inline uint32_t defineMetric(MetricType type, std::string_view name) { - return proxy_defineMetric(type, name.data(), name.size()); -} - -inline void incrementMetric(uint32_t metric_id, int64_t offset) { - proxy_incrementMetric(metric_id, offset); -} - -inline void recordMetric(uint32_t metric_id, uint64_t value) { - proxy_recordMetric(metric_id, value); -} - -inline uint64_t getMetric(uint32_t metric_id) { return proxy_getMetric(metric_id); } - -// Higher level metrics interface. - -struct MetricTag { - enum class TagType : uint32_t { - String = 0, - Int = 1, - Bool = 2, - }; - std::string name; - TagType tagType; -}; - -struct MetricBase { - MetricBase(MetricType t, const std::string& n) : type(t), name(n) {} - MetricBase(MetricType t, const std::string& n, std::vector ts) - : type(t), name(n), tags(ts.begin(), ts.end()) {} - - MetricType type; - std::string name; - std::vector tags; - std::unordered_map metric_ids; - - uint32_t resolveFullName(const std::string& n); - uint32_t resolveWithFields(const std::vector& fields); - std::string nameFromIdSlow(uint32_t id); -}; - -struct Metric : public MetricBase { - Metric(MetricType t, const std::string& n) : MetricBase(t, n) {} - Metric(MetricType t, const std::string& n, std::vector ts) : MetricBase(t, n, ts) {} - - template void increment(int64_t offset, Fields... tags); - template void record(uint64_t value, Fields... tags); - template uint64_t get(Fields... tags); - template uint32_t resolve(Fields... tags); -}; - -inline uint32_t MetricBase::resolveWithFields(const std::vector& fields) { - if (fields.size() != tags.size()) { - throw ProxyException("metric fields.size() != tags.size()"); - } - size_t s = 0; - for (auto& t : tags) { - s += t.name.size() + 1; // 1 more for "." - } - for (auto& f : fields) { - s += f.size() + 1; // 1 more for "." - } - s += name.size() + 2; // "." and "\0"; - std::string n; - n.reserve(s); - for (int i = 0; i < tags.size(); i++) { - n.append(tags[i].name); - n.append("."); - n.append(fields[i]); - n.append("."); - } - n.append(name); - return resolveFullName(n); -} - -template inline std::string ToString(T t) { return std::to_string(t); } - -template <> inline std::string ToString(const char* t) { return std::string(t); } - -template <> inline std::string ToString(std::string t) { return std::move(t); } - -template <> inline std::string ToString(bool t) { return t ? "true" : "false"; } - -inline uint32_t MetricBase::resolveFullName(const std::string& n) { - auto it = metric_ids.find(n); - if (it == metric_ids.end()) { - auto metric_id = defineMetric(type, n); - metric_ids[n] = metric_id; - return metric_id; - } - return it->second; -} - -inline std::string MetricBase::nameFromIdSlow(uint32_t id) { - for (auto& p : metric_ids) - if (p.second == id) - return p.first; - return ""; -} - -template inline uint32_t Metric::resolve(Fields... f) { - std::vector fields{ToString(f)...}; - return resolveWithFields(fields); -} - -template inline void Metric::increment(int64_t offset, Fields... f) { - std::vector fields{ToString(f)...}; - auto metric_id = resolveWithFields(fields); - incrementMetric(metric_id, offset); -} - -template inline void Metric::record(uint64_t value, Fields... f) { - std::vector fields{ToString(f)...}; - auto metric_id = resolveWithFields(fields); - recordMetric(metric_id, value); -} - -template inline uint64_t Metric::get(Fields... f) { - std::vector fields{ToString(f)...}; - auto metric_id = resolveWithFields(fields); - return getMetric(metric_id); -} - -template struct MetricTagDescriptor { - MetricTagDescriptor(std::string_view n) : name(n) {} - MetricTagDescriptor(const char* n) : name(n) {} - typedef T type; - std::string_view name; -}; - -template inline MetricTag ToMetricTag(const MetricTagDescriptor& d) { return {}; } - -template <> inline MetricTag ToMetricTag(const MetricTagDescriptor& d) { - return {std::string(d.name), MetricTag::TagType::String}; -} - -template <> inline MetricTag ToMetricTag(const MetricTagDescriptor& d) { - return {std::string(d.name), MetricTag::TagType::String}; -} - -template <> inline MetricTag ToMetricTag(const MetricTagDescriptor& d) { - return {std::string(d.name), MetricTag::TagType::String}; -} - -template <> inline MetricTag ToMetricTag(const MetricTagDescriptor& d) { - return {std::string(d.name), MetricTag::TagType::Int}; -} - -template <> inline MetricTag ToMetricTag(const MetricTagDescriptor& d) { - return {std::string(d.name), MetricTag::TagType::Bool}; -} - -struct SimpleCounter { - SimpleCounter(uint32_t id) : metric_id(id) {} - - void increment(int64_t offset) { recordMetric(metric_id, offset); } - void record(int64_t offset) { increment(offset); } - uint64_t get() { return getMetric(metric_id); } - void operator++() { increment(1); } - void operator++(int) { increment(1); } - - uint32_t metric_id; -}; - -struct SimpleGauge { - SimpleGauge(uint32_t id) : metric_id(id) {} - - void record(uint64_t offset) { recordMetric(metric_id, offset); } - uint64_t get() { return getMetric(metric_id); } - - uint32_t metric_id; -}; - -struct SimpleHistogram { - SimpleHistogram(uint32_t id) : metric_id(id) {} - - void record(int64_t offset) { recordMetric(metric_id, offset); } - - uint32_t metric_id; -}; - -template struct Counter : public MetricBase { - static Counter* New(std::string_view name, MetricTagDescriptor... fieldnames); - - SimpleCounter resolve(Tags... f) { - std::vector fields{ToString(f)...}; - return SimpleCounter(resolveWithFields(fields)); - } - - void increment(int64_t offset, Tags... tags) { - std::vector fields{ToString(tags)...}; - auto metric_id = resolveWithFields(fields); - incrementMetric(metric_id, offset); - } - - void record(int64_t offset, Tags... tags) { increment(offset, tags...); } - - uint64_t get(Tags... tags) { - std::vector fields{ToString(tags)...}; - auto metric_id = resolveWithFields(fields); - return getMetric(metric_id); - } - -private: - Counter(const std::string& name, std::vector tags) - : MetricBase(MetricType::Counter, name, tags) {} -}; - -template -inline Counter* Counter::New(std::string_view name, - MetricTagDescriptor... descriptors) { - return new Counter(std::string(name), - std::vector({ToMetricTag(descriptors)...})); -} - -template struct Gauge : public MetricBase { - static Gauge* New(std::string_view name, MetricTagDescriptor... fieldnames); - - SimpleGauge resolve(Tags... f) { - std::vector fields{ToString(f)...}; - return SimpleGauge(resolveWithFields(fields)); - } - - void record(int64_t offset, Tags... tags) { - std::vector fields{ToString(tags)...}; - auto metric_id = resolveWithFields(fields); - recordMetric(metric_id, offset); - } - - uint64_t get(Tags... tags) { - std::vector fields{ToString(tags)...}; - auto metric_id = resolveWithFields(fields); - return getMetric(metric_id); - } - -private: - Gauge(const std::string& name, std::vector tags) - : MetricBase(MetricType::Gauge, name, tags) {} -}; - -template -inline Gauge* Gauge::New(std::string_view name, - MetricTagDescriptor... descriptors) { - return new Gauge(std::string(name), - std::vector({ToMetricTag(descriptors)...})); -} - -template struct Histogram : public MetricBase { - static Histogram* New(std::string_view name, MetricTagDescriptor... fieldnames); - - SimpleHistogram resolve(Tags... f) { - std::vector fields{ToString(f)...}; - return SimpleHistogram(resolveWithFields(fields)); - } - - void record(int64_t offset, Tags... tags) { - std::vector fields{ToString(tags)...}; - auto metric_id = resolveWithFields(fields); - recordMetric(metric_id, offset); - } - -private: - Histogram(const std::string& name, std::vector tags) - : MetricBase(MetricType::Histogram, name, tags) {} -}; - -template -inline Histogram* Histogram::New(std::string_view name, - MetricTagDescriptor... descriptors) { - return new Histogram(std::string(name), - std::vector({ToMetricTag(descriptors)...})); -} - -inline uint32_t grpcCall(std::string_view service, std::string_view service_name, std::string_view method_name, - const google::protobuf::MessageLite &request, uint32_t timeout_milliseconds) { - std::string serialized_request; - request.SerializeToString(&serialized_request); - return proxy_grpcCall(service.data(), service.size(), service_name.data(), service_name.size(), method_name.data(), method_name.size(), - serialized_request.data(), serialized_request.size(), timeout_milliseconds); - -} - -inline uint32_t grpcStream(std::string_view service, std::string_view service_name, std::string_view method_name) { - return proxy_grpcStream(service.data(), service.size(), service_name.data(), service_name.size(), method_name.data(), method_name.size()); -} - -inline void grpcCancel(uint32_t token) { - return proxy_grpcCancel(token); -} - -inline void grpcClose(uint32_t token) { - return proxy_grpcClose(token); -} - -inline void grpcSend(uint32_t token, std::string_view message, bool end_stream) { - return proxy_grpcSend(token, message.data(), message.size(), end_stream ? 1 : 0); -} - -inline void Context::httpCall(std::string_view uri, const HeaderStringPairs& request_headers, - std::string_view request_body, const HeaderStringPairs& request_trailers, - uint32_t timeout_milliseconds, HttpCallCallback callback) { - auto token = ::httpCall(uri, request_headers, request_body, request_trailers, timeout_milliseconds); - if (token) { - http_calls_[token] = std::move(callback); - } else { - throw ProxyException("httpCall failed"); - } -} - -inline void Context::onHttpCallResponse(uint32_t token, std::unique_ptr header_pairs, - std::unique_ptr body, std::unique_ptr trailer_pairs) { - auto it = http_calls_.find(token); - if (it != http_calls_.end()) { - it->second(std::move(header_pairs), std::move(body), std::move(trailer_pairs)); - http_calls_.erase(token); - } -} - -inline void Context::grpcSimpleCall(std::string_view service, std::string_view service_name, std::string_view method_name, - const google::protobuf::MessageLite &request, uint32_t timeout_milliseconds, Context::GrpcSimpleCallCallback callback) { - auto token = grpcCall(service, service_name, method_name, request, timeout_milliseconds); - if (token) { - simple_grpc_calls_[token] = std::move(callback); - } else { - throw ProxyException("grpcCall failed"); - } -} - -inline void GrpcCallHandlerBase::cancel() { - grpcCancel(token_); - context_->grpc_calls_.erase(token_); -} - -inline void GrpcStreamHandlerBase::reset() { - grpcCancel(token_); - context_->grpc_streams_.erase(token_); -} - -inline void GrpcStreamHandlerBase::close() { - grpcClose(token_); - local_close_ = true; - if (local_close_ && remote_close_) { - context_->grpc_streams_.erase(token_); - } - // NB: else callbacks can still occur: reset() to prevent further callbacks. -} - -inline void GrpcStreamHandlerBase::send(std::string_view message, bool end_of_stream) { - grpcSend(token_, message, end_of_stream); - if (end_of_stream) { - // NB: callbacks can still occur: reset() to prevent further callbacks. - local_close_ = local_close_ || end_of_stream; - if (local_close_ && remote_close_) { - context_->grpc_streams_.erase(token_); - } - } -} - -inline void Context::onGrpcCreateInitialMetadata(uint32_t token) { - { - auto it = grpc_calls_.find(token); - if (it != grpc_calls_.end()) { - it->second->onCreateInitialMetadata(); - return; - } - } - { - auto it = grpc_streams_.find(token); - if (it != grpc_streams_.end()) { - it->second->onCreateInitialMetadata(); - return; - } - } -} - -inline void Context::onGrpcReceiveInitialMetadata(uint32_t token) { - { - auto it = grpc_streams_.find(token); - if (it != grpc_streams_.end()) { - it->second->onReceiveInitialMetadata(); - return; - } - } -} - -inline void Context::onGrpcReceiveTrailingMetadata(uint32_t token) { - { - auto it = grpc_streams_.find(token); - if (it != grpc_streams_.end()) { - it->second->onReceiveTrailingMetadata(); - return; - } - } -} - -inline void Context::onGrpcReceive(uint32_t token, std::unique_ptr message) { - { - auto it = simple_grpc_calls_.find(token); - if (it != simple_grpc_calls_.end()) { - it->second(GrpcStatus::OK, std::move(message)); - simple_grpc_calls_.erase(token); - return; - } - } - { - auto it = grpc_calls_.find(token); - if (it != grpc_calls_.end()) { - it->second->onSuccess(std::move(message)); - grpc_calls_.erase(token); - return; - } - } - { - auto it = grpc_streams_.find(token); - if (it != grpc_streams_.end()) { - it->second->onReceive(std::move(message)); - grpc_streams_.erase(token); - return; - } - } -} - -inline void GrpcStreamHandlerBase::doRemoteClose(GrpcStatus status, std::unique_ptr error_message) { - auto context = context_; - auto token = token_; - this->onRemoteClose(status, std::move(error_message)); - if (context->grpc_streams_.find(token) != context->grpc_streams_.end()) { - // We have not been deleted, e.g. by reset() in the onRemoteCall() virtual handler. - remote_close_ = true; - if (local_close_ && remote_close_) { - context_->grpc_streams_.erase(token_); - } - // else do not erase the token since we can still send in this state. - } -} - -inline void Context::onGrpcClose(uint32_t token, GrpcStatus status, std::unique_ptr message) { - { - auto it = simple_grpc_calls_.find(token); - if (it != simple_grpc_calls_.end()) { - it->second(status, std::move(message)); - simple_grpc_calls_.erase(token); - return; - } - } - { - auto it = grpc_calls_.find(token); - if (it != grpc_calls_.end()) { - it->second->onFailure(status, std::move(message)); - grpc_calls_.erase(token); - return; - } - } - { - auto it = grpc_streams_.find(token); - if (it != grpc_streams_.end()) { - it->second->doRemoteClose(status, std::move(message)); - return; - } - } -} - -inline void Context::grpcCallHandler(std::string_view service, std::string_view service_name, - std::string_view method_name, const google::protobuf::MessageLite &request, uint32_t timeout_milliseconds, - std::unique_ptr handler) { - auto token = grpcCall(service, service_name, method_name, request, timeout_milliseconds); - if (token) { - handler->token_ = token; - grpc_calls_[token] = std::move(handler); - } else { - throw ProxyException("grpcCall failed"); - } -} +#include +using EnumType = uint32_t; +#include +using StringView = std::string_view; -inline void Context::grpcStreamHandler(std::string_view service, std::string_view service_name, - std::string_view method_name, std::unique_ptr handler) { - auto token = grpcStream(service, service_name, method_name); - if (token) { - handler->token_ = token; - grpc_streams_[token] = std::move(handler); - } else { - throw ProxyException("grpcStream failed"); - } -} +#include "proxy_wasm_enums.h" +#include "proxy_wasm_externs.h" +#ifndef EMSCRIPTEN_PROTOBUF_LITE +#include "proxy_wasm_intrinsics.pb.h" +#else +#include "proxy_wasm_intrinsics_lite.pb.h" +#endif +#include "proxy_wasm_impl.h" diff --git a/api/xds_protocol.rst b/api/xds_protocol.rst new file mode 100644 index 0000000000000..40f323c4bd0ad --- /dev/null +++ b/api/xds_protocol.rst @@ -0,0 +1,456 @@ +xDS REST and gRPC protocol +========================== + +Envoy discovers its various dynamic resources via the filesystem or by +querying one or more management servers. Collectively, these discovery +services and their corresponding APIs are referred to as *xDS*. +Resources are requested via *subscriptions*, by specifying a filesystem +path to watch, initiating gRPC streams or polling a REST-JSON URL. The +latter two methods involve sending requests with a :ref:`DiscoveryRequest ` +proto payload. Resources are delivered in a +:ref:`DiscoveryResponse ` +proto payload in all methods. We discuss each type of subscription +below. + +Filesystem subscriptions +------------------------ + +The simplest approach to delivering dynamic configuration is to place it +at a well known path specified in the :ref:`ConfigSource `. +Envoy will use `inotify` (`kqueue` on macOS) to monitor the file for +changes and parse the +:ref:`DiscoveryResponse ` proto in the file on update. +Binary protobufs, JSON, YAML and proto text are supported formats for +the +:ref:`DiscoveryResponse `. + +There is no mechanism available for filesystem subscriptions to ACK/NACK +updates beyond stats counters and logs. The last valid configuration for +an xDS API will continue to apply if an configuration update rejection +occurs. + +Streaming gRPC subscriptions +---------------------------- + +Singleton resource type discovery +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +A gRPC +:ref:`ApiConfigSource ` +can be specified independently for each xDS API, pointing at an upstream +cluster corresponding to a management server. This will initiate an +independent bidirectional gRPC stream for each xDS resource type, +potentially to distinct management servers. API delivery is eventually +consistent. See :ref:`Aggregated Discovery Service` below for +situations in which explicit control of sequencing is required. + +Type URLs +^^^^^^^^^ + +Each xDS API is concerned with resources of a given type. There is a 1:1 +correspondence between an xDS API and a resource type. That is: + +- LDS: :ref:`envoy.api.v2.Listener ` +- RDS: :ref:`envoy.api.v2.RouteConfiguration ` +- VHDS: :ref:`envoy.api.v2.Vhds ` +- CDS: :ref:`envoy.api.v2.Cluster ` +- EDS: :ref:`envoy.api.v2.ClusterLoadAssignment ` +- SDS: :ref:`envoy.api.v2.Auth.Secret ` + +The concept of `type URLs `_ appears below, and takes the form +`type.googleapis.com/`, e.g. +`type.googleapis.com/envoy.api.v2.Cluster` for CDS. In various +requests from Envoy and responses by the management server, the resource +type URL is stated. + +ACK/NACK and versioning +^^^^^^^^^^^^^^^^^^^^^^^ + +Each stream begins with a +:ref:`DiscoveryRequest ` from Envoy, specifying +the list of resources to subscribe to, the type URL corresponding to the +subscribed resources, the node identifier and an empty :ref:`version_info `. +An example EDS request might be: + +.. code:: yaml + + version_info: + node: { id: envoy } + resource_names: + - foo + - bar + type_url: type.googleapis.com/envoy.api.v2.ClusterLoadAssignment + response_nonce: + +The management server may reply either immediately or when the requested +resources are available with a :ref:`DiscoveryResponse `, e.g.: + +.. code:: yaml + + version_info: X + resources: + - foo ClusterLoadAssignment proto encoding + - bar ClusterLoadAssignment proto encoding + type_url: type.googleapis.com/envoy.api.v2.ClusterLoadAssignment + nonce: A + +After processing the :ref:`DiscoveryResponse `, Envoy will send a new +request on the stream, specifying the last version successfully applied +and the nonce provided by the management server. If the update was +successfully applied, the :ref:`version_info ` will be **X**, as indicated +in the sequence diagram: + +.. figure:: diagrams/simple-ack.svg + :alt: Version update after ACK + +In this sequence diagram, and below, the following format is used to abbreviate messages: + +- *DiscoveryRequest*: (V=version_info,R=resource_names,N=response_nonce,T=type_url) +- *DiscoveryResponse*: (V=version_info,R=resources,N=nonce,T=type_url) + +The version provides Envoy and the management server a shared notion of +the currently applied configuration, as well as a mechanism to ACK/NACK +configuration updates. If Envoy had instead rejected configuration +update **X**, it would reply with :ref:`error_detail ` +populated and its previous version, which in this case was the empty +initial version. The :ref:`error_detail ` has more details around the exact +error message populated in the message field: + +.. figure:: diagrams/simple-nack.svg + :alt: No version update after NACK + +Later, an API update may succeed at a new version **Y**: + + +.. figure:: diagrams/later-ack.svg + :alt: ACK after NACK + +Each stream has its own notion of versioning, there is no shared +versioning across resource types. When ADS is not used, even each +resource of a given resource type may have a distinct version, since the +Envoy API allows distinct EDS/RDS resources to point at different :ref:`ConfigSources `. + +.. _Resource Updates: + +When to send an update +^^^^^^^^^^^^^^^^^^^^^^ + +The management server should only send updates to the Envoy client when +the resources in the :ref:`DiscoveryResponse ` have changed. Envoy replies +to any :ref:`DiscoveryResponse ` with a :ref:`DiscoveryRequest ` containing the +ACK/NACK immediately after it has been either accepted or rejected. If +the management server provides the same set of resources rather than +waiting for a change to occur, it will cause Envoy and the management +server to spin and have a severe performance impact. + +Within a stream, new :ref:`DiscoveryRequests ` supersede any prior +:ref:`DiscoveryRequests ` having the same resource type. This means that +the management server only needs to respond to the latest +:ref:`DiscoveryRequest ` on each stream for any given resource type. + +Resource hints +^^^^^^^^^^^^^^ + +The :ref:`resource_names ` specified in the :ref:`DiscoveryRequest ` are a hint. +Some resource types, e.g. `Clusters` and `Listeners` will +specify an empty :ref:`resource_names ` list, since Envoy is interested in +learning about all the :ref:`Clusters (CDS) ` and :ref:`Listeners (LDS) ` +that the management server(s) know about corresponding to its node +identification. Other resource types, e.g. :ref:`RouteConfiguration (RDS) ` +and :ref:`ClusterLoadAssignment (EDS) `, follow from earlier +CDS/LDS updates and Envoy is able to explicitly enumerate these +resources. + +LDS/CDS resource hints will always be empty and it is expected that the +management server will provide the complete state of the LDS/CDS +resources in each response. An absent `Listener` or `Cluster` will +be deleted. + +For EDS/RDS, the management server does not need to supply every +requested resource and may also supply additional, unrequested +resources. :ref:`resource_names ` is only a hint. Envoy will silently ignore +any superfluous resources. When a requested resource is missing in a RDS +or EDS update, Envoy will retain the last known value for this resource +except in the case where the `Cluster` or `Listener` is being +warmed. See :ref:`Resource warming` section below on +the expectations during warming. The management server may be able to +infer all the required EDS/RDS resources from the :ref:`node ` +identification in the :ref:`DiscoveryRequest `, in which case this hint may +be discarded. An empty EDS/RDS :ref:`DiscoveryResponse ` is effectively a +nop from the perspective of the respective resources in the Envoy. + +When a `Listener` or `Cluster` is deleted, its corresponding EDS and +RDS resources are also deleted inside the Envoy instance. In order for +EDS resources to be known or tracked by Envoy, there must exist an +applied `Cluster` definition (e.g. sourced via CDS). A similar +relationship exists between RDS and `Listeners` (e.g. sourced via +LDS). + +For EDS/RDS, Envoy may either generate a distinct stream for each +resource of a given type (e.g. if each :ref:`ConfigSource ` has its own +distinct upstream cluster for a management server), or may combine +together multiple resource requests for a given resource type when they +are destined for the same management server. While this is left to +implementation specifics, management servers should be capable of +handling one or more :ref:`resource_names ` for a given resource type in +each request. Both sequence diagrams below are valid for fetching two +EDS resources `{foo, bar}`: + +|Multiple EDS requests on the same stream| |Multiple EDS requests on +distinct streams| + +Resource updates +^^^^^^^^^^^^^^^^ + +As discussed above, Envoy may update the list of :ref:`resource_names ` it +presents to the management server in each :ref:`DiscoveryRequest ` that +ACK/NACKs a specific :ref:`DiscoveryResponse `. In addition, Envoy may later +issue additional :ref:`DiscoveryRequests ` at a given :ref:`version_info ` to +update the management server with new resource hints. For example, if +Envoy is at EDS version **X** and knows only about cluster ``foo``, but +then receives a CDS update and learns about ``bar`` in addition, it may +issue an additional :ref:`DiscoveryRequest ` for **X** with `{foo,bar}` as +`resource_names`. + +.. figure:: diagrams/cds-eds-resources.svg + :alt: CDS response leads to EDS resource hint update + +There is a race condition that may arise here; if after a resource hint +update is issued by Envoy at **X**, but before the management server +processes the update it replies with a new version **Y**, the resource +hint update may be interpreted as a rejection of **Y** by presenting an +**X** :ref:`version_info `. To avoid this, the management server provides a +``nonce`` that Envoy uses to indicate the specific :ref:`DiscoveryResponse ` +each :ref:`DiscoveryRequest ` corresponds to: + +.. figure:: diagrams/update-race.svg + :alt: EDS update race motivates nonces + +The management server should not send a :ref:`DiscoveryResponse ` for any +:ref:`DiscoveryRequest ` that has a stale nonce. A nonce becomes stale +following a newer nonce being presented to Envoy in a +:ref:`DiscoveryResponse `. A management server does not need to send an +update until it determines a new version is available. Earlier requests +at a version then also become stale. It may process multiple +:ref:`DiscoveryRequests ` at a version until a new version is ready. + +.. figure:: diagrams/stale-requests.svg + :alt: Requests become stale + +An implication of the above resource update sequencing is that Envoy +does not expect a :ref:`DiscoveryResponse ` for every :ref:`DiscoveryRequests ` +it issues. + +.. _Resource Warming: + +Resource warming +~~~~~~~~~~~~~~~~ + +:ref:`Clusters ` and +:ref:`Listeners ` +go through warming before they can serve requests. This process +happens both during :ref:`Envoy initialization ` +and when the `Cluster` or `Listener` is updated. Warming of +`Cluster` is completed only when a `ClusterLoadAssignment` response +is supplied by management server. Similarly, warming of `Listener` is +completed only when a `RouteConfiguration` is supplied by management +server if the listener refers to an RDS configuration. Management server +is expected to provide the EDS/RDS updates during warming. If management +server does not provide EDS/RDS responses, Envoy will not initialize +itself during the initialization phase and the updates sent via CDS/LDS +will not take effect until EDS/RDS responses are supplied. + +Eventual consistency considerations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Since Envoy's xDS APIs are eventually consistent, traffic may drop +briefly during updates. For example, if only cluster **X** is known via +CDS/EDS, a `RouteConfiguration` references cluster **X** and is then +adjusted to cluster **Y** just before the CDS/EDS update providing +**Y**, traffic will be blackholed until **Y** is known about by the +Envoy instance. + +For some applications, a temporary drop of traffic is acceptable, +retries at the client or by other Envoy sidecars will hide this drop. +For other scenarios where drop can't be tolerated, traffic drop could +have been avoided by providing a CDS/EDS update with both **X** and +**Y**, then the RDS update repointing from **X** to **Y** and then a +CDS/EDS update dropping **X**. + +In general, to avoid traffic drop, sequencing of updates should follow a +make before break model, wherein: + +- CDS updates (if any) must always be pushed first. +- EDS updates (if any) must arrive after CDS updates for the respective clusters. +- LDS updates must arrive after corresponding CDS/EDS updates. +- RDS updates related to the newly added listeners must arrive after CDS/EDS/LDS updates. +- VHDS updates (if any) related to the newly added RouteConfigurations must arrive after RDS updates. +- Stale CDS clusters and related EDS endpoints (ones no longer being referenced) can then be removed. + +xDS updates can be pushed independently if no new +clusters/routes/listeners are added or if it's acceptable to temporarily +drop traffic during updates. Note that in case of LDS updates, the +listeners will be warmed before they receive traffic, i.e. the dependent +routes are fetched through RDS if configured. Clusters are warmed when +adding/removing/updating clusters. On the other hand, routes are not +warmed, i.e., the management plane must ensure that clusters referenced +by a route are in place, before pushing the updates for a route. + +.. _Aggregated Discovery Service: + +Aggregated Discovery Service (ADS) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +It's challenging to provide the above guarantees on sequencing to avoid +traffic drop when management servers are distributed. ADS allow a single +management server, via a single gRPC stream, to deliver all API updates. +This provides the ability to carefully sequence updates to avoid traffic +drop. With ADS, a single stream is used with multiple independent +:ref:`DiscoveryRequest `/:ref:`DiscoveryResponse ` sequences multiplexed via the +type URL. For any given type URL, the above sequencing of +:ref:`DiscoveryRequest ` and :ref:`DiscoveryResponse ` messages applies. An +example update sequence might look like: + +.. figure:: diagrams/ads.svg + :alt: EDS/CDS multiplexed on an ADS stream + +A single ADS stream is available per Envoy instance. + +An example minimal ``bootstrap.yaml`` fragment for ADS configuration is: + +.. code:: yaml + + node: + id: + dynamic_resources: + cds_config: {ads: {}} + lds_config: {ads: {}} + ads_config: + api_type: GRPC + grpc_services: + envoy_grpc: + cluster_name: ads_cluster + static_resources: + clusters: + - name: ads_cluster + connect_timeout: { seconds: 5 } + type: STATIC + hosts: + - socket_address: + address: + port_value: + lb_policy: ROUND_ROBIN + http2_protocol_options: {} + upstream_connection_options: + # configure a TCP keep-alive to detect and reconnect to the admin + # server in the event of a TCP socket disconnection + tcp_keepalive: + ... + admin: + ... + +Incremental xDS +~~~~~~~~~~~~~~~ + +Incremental xDS is a separate xDS endpoint that: + +- Allows the protocol to communicate on the wire in terms of + resource/resource name deltas ("Delta xDS"). This supports the goal + of scalability of xDS resources. Rather than deliver all 100k + clusters when a single cluster is modified, the management server + only needs to deliver the single cluster that changed. +- Allows the Envoy to on-demand / lazily request additional resources. + For example, requesting a cluster only when a request for that + cluster arrives. + +An Incremental xDS session is always in the context of a gRPC +bidirectional stream. This allows the xDS server to keep track of the +state of xDS clients connected to it. There is no REST version of +Incremental xDS yet. + +In the delta xDS wire protocol, the nonce field is required and used to +pair a :ref:`DeltaDiscoveryResponse ` +to a :ref:`DeltaDiscoveryRequest ` +ACK or NACK. Optionally, a response message level :ref:`system_version_info ` +is present for debugging purposes only. + +:ref:`DeltaDiscoveryRequest ` can be sent in the following situations: + +- Initial message in a xDS bidirectional gRPC stream. +- As an ACK or NACK response to a previous :ref:`DeltaDiscoveryResponse `. In this case the :ref:`response_nonce ` is set to the nonce value in the Response. ACK or NACK is determined by the absence or presence of :ref:`error_detail `. +- Spontaneous :ref:`DeltaDiscoveryRequests ` from the client. This can be done to dynamically add or remove elements from the tracked :ref:`resource_names ` set. In this case :ref:`response_nonce ` must be omitted. + +In this first example the client connects and receives a first update +that it ACKs. The second update fails and the client NACKs the update. +Later the xDS client spontaneously requests the "wc" resource. + +.. figure:: diagrams/incremental.svg + :alt: Incremental session example + +On reconnect the Incremental xDS client may tell the server of its known +resources to avoid resending them over the network. Because no state is +assumed to be preserved from the previous stream, the reconnecting +client must provide the server with all resource names it is interested +in. + +.. figure:: diagrams/incremental-reconnect.svg + :alt: Incremental reconnect example + +Resource names +^^^^^^^^^^^^^^ + +Resources are identified by a resource name or an alias. Aliases of a +resource, if present, can be identified by the alias field in the +resource of a :ref:`DeltaDiscoveryResponse `. The resource name will be +returned in the name field in the resource of a +:ref:`DeltaDiscoveryResponse `. + +Subscribing to Resources +^^^^^^^^^^^^^^^^^^^^^^^^ + +The client can send either an alias or the name of a resource in the +:ref:`resource_names_subscribe ` field of a :ref:`DeltaDiscoveryRequest ` in +order to subscribe to a resource. Both the names and aliases of +resources should be checked in order to determine whether the entity in +question has been subscribed to. + +A :ref:`resource_names_subscribe ` field may contain resource names that the +server believes the client is already subscribed to, and furthermore has +the most recent versions of. However, the server *must* still provide +those resources in the response; due to implementation details hidden +from the server, the client may have "forgotten" those resources despite +apparently remaining subscribed. + +Unsubscribing from Resources +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +When a client loses interest in some resources, it will indicate that +with the :ref:`resource_names_unsubscribe ` field of a +:ref:`DeltaDiscoveryRequest `. As with :ref:`resource_names_subscribe `, these +may be resource names or aliases. + +A :ref:`resource_names_unsubscribe ` field may contain superfluous resource +names, which the server thought the client was already not subscribed +to. The server must cleanly process such a request; it can simply ignore +these phantom unsubscriptions. + +REST-JSON polling subscriptions +------------------------------- + +Synchronous (long) polling via REST endpoints is also available for the +xDS singleton APIs. The above sequencing of messages is similar, except +no persistent stream is maintained to the management server. It is +expected that there is only a single outstanding request at any point in +time, and as a result the response nonce is optional in REST-JSON. The +`JSON canonical transform of +proto3 `__ +is used to encode :ref:`DiscoveryRequest ` and :ref:`DiscoveryResponse ` +messages. ADS is not available for REST-JSON polling. + +When the poll period is set to a small value, with the intention of long +polling, then there is also a requirement to avoid sending a +:ref:`DiscoveryResponse ` :ref:`unless a change to the underlying resources has +occurred `. + +.. |Multiple EDS requests on the same stream| image:: diagrams/eds-same-stream.svg +.. |Multiple EDS requests on distinct streams| image:: diagrams/eds-distinct-stream.svg \ No newline at end of file diff --git a/bazel/BUILD b/bazel/BUILD index 34315c825bab8..8be69e60045a1 100644 --- a/bazel/BUILD +++ b/bazel/BUILD @@ -36,16 +36,6 @@ genrule( stamp = 1, ) -config_setting( - name = "windows_x86_64", - values = {"cpu": "x64_windows"}, -) - -config_setting( - name = "linux_ppc", - values = {"cpu": "ppc"}, -) - config_setting( name = "windows_opt_build", values = { @@ -85,6 +75,11 @@ config_setting( values = {"compilation_mode": "dbg"}, ) +config_setting( + name = "asan_build", + values = {"define": "ENVOY_CONFIG_ASAN=1"}, +) + config_setting( name = "coverage_build", values = {"define": "ENVOY_CONFIG_COVERAGE=1"}, @@ -115,6 +110,11 @@ config_setting( values = {"define": "google_grpc=disabled"}, ) +config_setting( + name = "enable_path_normalization_by_default", + values = {"define": "path_normalization_by_default=true"}, +) + cc_proto_library( name = "grpc_health_proto", deps = ["@com_github_grpc_grpc//src/proto/grpc/health/v1:_health_proto_only"], @@ -164,3 +164,89 @@ alias( "//conditions:default": "@boringssl//:ssl", }), ) + +config_setting( + name = "linux_x86_64", + values = {"cpu": "k8"}, +) + +config_setting( + name = "linux_aarch64", + values = {"cpu": "aarch64"}, +) + +config_setting( + name = "linux_ppc", + values = {"cpu": "ppc"}, +) + +config_setting( + name = "windows_x86_64", + values = {"cpu": "x64_windows"}, +) + +# Configuration settings to make doing selects for Apple vs non-Apple platforms +# easier. More details: https://docs.bazel.build/versions/master/configurable-attributes.html#config_settingaliasing +config_setting( + name = "darwin", + values = {"cpu": "darwin"}, +) + +config_setting( + name = "darwin_x86_64", + values = {"cpu": "darwin_x86_64"}, +) + +config_setting( + name = "ios_i386", + values = {"cpu": "ios_i386"}, +) + +config_setting( + name = "ios_x86_64", + values = {"cpu": "ios_x86_64"}, +) + +config_setting( + name = "ios_armv7", + values = {"cpu": "ios_armv7"}, +) + +config_setting( + name = "ios_armv7s", + values = {"cpu": "ios_armv7s"}, +) + +config_setting( + name = "ios_arm64", + values = {"cpu": "ios_arm64"}, +) + +config_setting( + name = "ios_arm64e", + values = {"cpu": "ios_arm64e"}, +) + +config_setting( + name = "manual_stamp", + values = {"define": "manual_stamp=manual_stamp"}, +) + +alias( + name = "apple", + actual = select( + { + ":darwin": ":darwin", + ":darwin_x86_64": ":darwin_x86_64", + ":ios_arm64": ":ios_arm64", + ":ios_arm64e": ":ios_arm64e", + ":ios_armv7": ":ios_armv7", + ":ios_armv7s": ":ios_armv7s", + ":ios_i386": ":ios_i386", + ":ios_x86_64": ":ios_x86_64", + # If we're not on an apple platform return a value that will never match in the select() statement calling this + # since it would have already been matched above + "//conditions:default": ":darwin", + }, + ), +) diff --git a/bazel/DEVELOPER.md b/bazel/DEVELOPER.md index 40bd6f7641663..e54d30ea9e626 100644 --- a/bazel/DEVELOPER.md +++ b/bazel/DEVELOPER.md @@ -59,7 +59,7 @@ envoy_cc_library( This declares a new target `bar_interface`, where the convention is that pure virtual interfaces have their targets suffixed with `_interface`. The header `bar.h` is exported to other targets that depend on -`//incude/envoy/foo:bar_interface`. The interface target itself depends on +`//include/envoy/foo:bar_interface`. The interface target itself depends on `baz_interface` (in the same directory, hence the relative Bazel label) and `buffer_interface`. diff --git a/bazel/EXTERNAL_DEPS.md b/bazel/EXTERNAL_DEPS.md index e6d2c67545e6c..a1b3a9fd69993 100644 --- a/bazel/EXTERNAL_DEPS.md +++ b/bazel/EXTERNAL_DEPS.md @@ -54,28 +54,11 @@ Dependencies between external libraries can use the standard Bazel dependency resolution logic, using the `$(location)` shell extension to resolve paths to binaries, libraries, headers, etc. -# Adding external dependencies to Envoy (build recipe) - -This is the older style of adding dependencies. It uses shell scripts to build and install -dependencies into a shared directory prefix. This should no longer be used unless there are -extenuating circumstances. - -1. Add a build recipe X in [`ci/build_container/build_recipes`](../ci/build_container/build_recipes) - for developer-local and CI external dependency build flows. -2. Add a build target Y in [`ci/prebuilt/BUILD`](../ci/prebuilt/BUILD) to consume the headers and - libraries produced by the build recipe X. -3. Add a map from target Y to build recipe X in [`target_recipes.bzl`](target_recipes.bzl). -4. Reference your new external dependency in some `envoy_cc_library` via Y in the `external_deps` - attribute. -5. `bazel test //test/...` - # Updating an external dependency version -1. If the dependency is a build recipe, update the build recipe in -[`ci/build_container/build_recipes`](../ci/build_container/build_recipes). -2. If not, update the corresponding entry in +1. Update the corresponding entry in [the repository locations file.](https://github.com/envoyproxy/envoy/blob/master/bazel/repository_locations.bzl) -3. `bazel test //test/...` +2. `bazel test //test/...` # Overriding an external dependency temporarily diff --git a/bazel/PPROF.md b/bazel/PPROF.md index edaf791af07ae..8345eedb9fce2 100644 --- a/bazel/PPROF.md +++ b/bazel/PPROF.md @@ -92,6 +92,26 @@ Build the binary using bazel, and run the binary without any environment variabl This will dump your profiler output to the working directory. +## Memory Profiling in Tests +To support memory leaks detection, tests are built with gperftools dependencies enabled by default. + +### Enabling Memory Profiling in Tests +Use `HeapProfilerStart()`, `HeapProfilerStop()`, and `HeapProfilerDump()` to start, stop, and persist +memory dumps, respectively. Please see [above](#adding-tcmalloc_dep-to-envoy) for more details. + +### Bazel Configuration +By default, bazel executes tests in a sandbox, which will be deleted together with memory dumps +after the test run. To preserve memory dumps, bazel can be forced to run tests without +sandboxing, by setting the ```TestRunner``` parameter to ```standalone```: +``` +bazel test --strategy=TestRunner=standalone ... +``` + +An alternative is to set ```HEAPPROFILE``` environment variable for the test runner: +``` +bazel test --test_env=HEAPPROFILE=/tmp/testprofile ... +``` + # Methodology For consistent testing, it makes sense to run Envoy for a constant amount of diff --git a/bazel/README.md b/bazel/README.md index 276075061551b..6c429f913d029 100644 --- a/bazel/README.md +++ b/bazel/README.md @@ -8,12 +8,7 @@ independently sourced, the following steps should be followed: 1. Install the latest version of [Bazel](https://bazel.build/versions/master/docs/install.html) in your environment. 2. Configure, build and/or install the [Envoy dependencies](https://www.envoyproxy.io/docs/envoy/latest/install/building.html#requirements). -3. Configure a Bazel [WORKSPACE](https://bazel.build/versions/master/docs/be/workspace.html) - to point Bazel at the Envoy dependencies. An example is provided in the CI Docker image - [WORKSPACE](https://github.com/envoyproxy/envoy/blob/master/ci/WORKSPACE) and corresponding - [BUILD](https://github.com/envoyproxy/envoy/blob/master/ci/prebuilt/BUILD) files. -4. `bazel build --package_path %workspace%: //source/exe:envoy-static` - from the directory containing your WORKSPACE. +3. `bazel build //source/exe:envoy-static` from the repository root. ## Quick start Bazel build for developers @@ -33,8 +28,9 @@ for how to update or override dependencies. sudo apt-get install \ libtool \ cmake \ - clang-format-7 \ + clang-format-8 \ automake \ + autoconf \ make \ ninja-build \ curl \ @@ -49,9 +45,9 @@ for how to update or override dependencies. On macOS, you'll need to install several dependencies. This can be accomplished via [Homebrew](https://brew.sh/): ``` - brew install coreutils wget cmake libtool go bazel automake ninja llvm@7 + brew install coreutils wget cmake libtool go bazel automake ninja clang-format autoconf aspell ``` - _notes_: `coreutils` is used for `realpath`, `gmd5sum` and `gsha256sum`; `llvm@7` is used for `clang-format` + _notes_: `coreutils` is used for `realpath`, `gmd5sum` and `gsha256sum` Envoy compiles and passes tests with the version of clang installed by XCode 9.3.0: Apple LLVM version 9.1.0 (clang-902.0.30). @@ -67,6 +63,10 @@ for how to update or override dependencies. Alternatively, you can pass `--action_env` on the command line when running `bazel build`/`bazel test`. + Having the binutils keg installed in Brew is known to cause issues due to putting an incompatible + version of `ar` on the PATH, so if you run into issues building third party code like luajit + consider uninstalling binutils. + 1. Install Golang on your machine. This is required as part of building [BoringSSL](https://boringssl.googlesource.com/boringssl/+/HEAD/BUILDING.md) and also for [Buildifer](https://github.com/bazelbuild/buildtools) which is used for formatting bazel BUILD files. 1. `go get -u github.com/bazelbuild/buildtools/buildifier` to install buildifier. You may need to set `BUILDIFIER_BIN` to `$GOPATH/bin/buildifier` @@ -93,8 +93,8 @@ export CXX=clang++ bazel build --config=libc++ //source/exe:envoy-static ``` Note: this assumes that both: clang compiler and libc++ library are installed in the system, -and that `clang` and `clang++` are available in `$PATH`. On some systems, exports might need -to be changed to versioned binaries, e.g. `CC=clang-7` and `CXX=clang++-7`. +and that `clang` and `clang++` are available in `$PATH`. On some systems, you might need to +include them in the search path, e.g. `export PATH=/usr/lib/llvm-7/bin:$PATH`. You might also need to ensure libc++ is installed correctly on your system, e.g. on Ubuntu this might look like `sudo apt-get install libc++abi-7-dev libc++-7-dev`. @@ -263,8 +263,8 @@ Use `RUN_REMOTE=yes` when you don't want to run against your local docker instan will need to override a few environment variables to set up the remote docker. The list of variables can be found in the [Documentation](https://docs.docker.com/engine/reference/commandline/cli/). -Use `LOCAL_MOUNT=yes` when you are not building with the envoy build container. This will ensure -that the libraries against which the tests dynmically link will be available and of the correct +Use `LOCAL_MOUNT=yes` when you are not building with the Envoy build container. This will ensure +that the libraries against which the tests dynamically link will be available and of the correct version. ## Examples @@ -326,6 +326,12 @@ Similarly, for [thread sanitizer (TSAN)](https://github.com/google/sanitizers/wi bazel test -c dbg --config=clang-tsan //test/... ``` +To run the sanitizers on OS X, prefix `macos-` to the config option, e.g.: + +``` +bazel test -c dbg --config=macos-asan //test/... +``` + ## Log Verbosity Log verbosity is controlled at runtime in all builds. @@ -354,7 +360,13 @@ The following optional features can be enabled on the Bazel build command-line: `--define log_debug_assert_in_release=enabled`. The default behavior is to compile debug assertions out of release builds so that the condition is not evaluated. This option has no effect in debug builds. * memory-debugging (scribbling over memory after allocation and before freeing) with - `--define tcmalloc=debug`. + `--define tcmalloc=debug`. Note this option cannot be used with FIPS-compliant mode BoringSSL. +* Default [path normalization](https://github.com/envoyproxy/envoy/issues/6435) with + `--define path_normalization_by_default=true`. Note this still could be disable by explicit xDS config. +* Manual stamping via VersionInfo with `--define manual_stamp=manual_stamp`. + This is needed if the `version_info_lib` is compiled via a non-binary bazel rules, e.g `envoy_cc_library`. + Otherwise, the linker will fail to resolve symbols that are included via the `linktamp` rule, which is only available to binary targets. + This is being tracked as a feature in: https://github.com/envoyproxy/envoy/issues/6859. ## Disabling extensions @@ -398,18 +410,6 @@ local_repository( ... ``` -## Stats Tunables - -The default maximum number of stats in shared memory, and the default -maximum length of a cluster/route config/listener name, can be -overridden at compile-time by defining `ENVOY_DEFAULT_MAX_STATS` and -`ENVOY_DEFAULT_MAX_OBJ_NAME_LENGTH`, respectively, to the desired -value. For example: - -``` -bazel build --copt=-DENVOY_DEFAULT_MAX_STATS=32768 --copt=-DENVOY_DEFAULT_MAX_OBJ_NAME_LENGTH=150 //source/exe:envoy-static -``` - # Release builds Release builds should be built in `opt` mode, processed with `strip` and have a @@ -524,7 +524,7 @@ to run clang-format scripts on your workstation directly: * Type-ahead doesn't always work when waiting running a command through docker To run the tools directly, you must install the correct version of clang. This may change over time but as of January 2019, -[clang+llvm-7.0.0](http://releases.llvm.org/download.html) works well. You must +[clang+llvm-7.0.0](https://releases.llvm.org/download.html) works well. You must also have 'buildifier' installed from the bazel distribution. Edit the paths shown here to reflect the installation locations on your system: @@ -548,28 +548,6 @@ Once this is set up, you can run clang-tidy without docker: Setting up an HTTP cache for Bazel output helps optimize Bazel performance and resource usage when using multiple compilation modes or multiple trees. -## Setup common `envoy_deps` - -This step sets up the common `envoy_deps` allowing HTTP or disk cache (described below) to work -across working trees in different paths. Also it allows new working trees to skip dependency -compilation. The drawback is that the cached dependencies won't be updated automatically, so make -sure all your working trees have same (or compatible) dependencies, and run this step periodically -to update them. - -Make sure you don't have `--override_repository` in your `.bazelrc` when you run this step. - -``` -bazel fetch //test/... -cp -LR $(bazel info output_base)/external/envoy_deps ${HOME}/envoy_deps_cache -``` - -Adding the following parameter to Bazel everytime or persist them in `.bazelrc`, note you will need to expand -the environment variables for `.bazelrc`. - -``` ---override_repository=envoy_deps=${HOME}/envoy_deps_cache -``` - ## Setup local cache You may use any [Remote Caching](https://docs.bazel.build/versions/master/remote-caching.html) backend diff --git a/bazel/api_repositories.bzl b/bazel/api_repositories.bzl new file mode 100644 index 0000000000000..016fb16c8a2ee --- /dev/null +++ b/bazel/api_repositories.bzl @@ -0,0 +1,35 @@ +def _default_envoy_api_impl(ctx): + ctx.file("WORKSPACE", "") + ctx.file("BUILD.bazel", "") + api_dirs = [ + "bazel", + "docs", + "envoy", + "examples", + "test", + "tools", + ] + for d in api_dirs: + ctx.symlink(ctx.path(ctx.attr.api).dirname.get_child(d), d) + +_default_envoy_api = repository_rule( + implementation = _default_envoy_api_impl, + attrs = { + "api": attr.label(default = "@envoy//api:BUILD"), + }, +) + +def envoy_api_dependencies(): + # Treat the data plane API as an external repo, this simplifies exporting the API to + # https://github.com/envoyproxy/data-plane-api. + if "envoy_api" not in native.existing_rules().keys(): + _default_envoy_api(name = "envoy_api") + + native.bind( + name = "api_httpbody_protos", + actual = "@googleapis//:api_httpbody_protos", + ) + native.bind( + name = "http_api_protos", + actual = "@googleapis//:http_api_protos", + ) diff --git a/bazel/cc_configure.bzl b/bazel/cc_configure.bzl index 436ef8c79e28d..9122735d9a2da 100644 --- a/bazel/cc_configure.bzl +++ b/bazel/cc_configure.bzl @@ -63,6 +63,7 @@ def _build_envoy_cc_wrapper(repository_ctx): # and escaping will be handled correctly. repository_ctx.template("extra_tools/envoy_cc_wrapper", repository_ctx.attr._envoy_cc_wrapper, { "{ENVOY_REAL_CC}": repr(str(real_cc)), + "{ENVOY_CFLAGS}": repr(str(repository_ctx.os.environ.get("CFLAGS", ""))), "{ENVOY_REAL_CXX}": repr(str(real_cxx)), "{ENVOY_CXXFLAGS}": repr(str(repository_ctx.os.environ.get("CXXFLAGS", ""))), }) @@ -106,6 +107,7 @@ cc_autoconf = repository_rule( "BAZEL_LLVM", "USE_CLANG_CL", "CC", + "CFLAGS", "CXX", "CXXFLAGS", "CC_CONFIGURE_DEBUG", diff --git a/bazel/cc_wrapper.py b/bazel/cc_wrapper.py index bd7098950dad2..d31847904d2f3 100755 --- a/bazel/cc_wrapper.py +++ b/bazel/cc_wrapper.py @@ -7,6 +7,7 @@ envoy_real_cc = {ENVOY_REAL_CC} envoy_real_cxx = {ENVOY_REAL_CXX} +envoy_cflags = {ENVOY_CFLAGS} envoy_cxxflags = {ENVOY_CXXFLAGS} @@ -27,33 +28,31 @@ def sanitize_flagfile(in_path, out_fd): os.write(out_fd, "-lc++\n") +# Is the arg a flag indicating that we're building for C++ (rather than C)? +def is_cpp_flag(arg): + return arg in ["-static-libstdc++", "-stdlib=libc++", "-lstdc++", "-lc++" + ] or arg.startswith("-std=c++") or arg.startswith("-std=gnu++") + + def main(): # Append CXXFLAGS to correctly detect include paths for either libstdc++ or libc++. if sys.argv[1:5] == ["-E", "-xc++", "-", "-v"]: os.execv(envoy_real_cxx, [envoy_real_cxx] + sys.argv[1:] + shlex.split(envoy_cxxflags)) - # `g++` and `gcc -lstdc++` have similar behavior and Bazel treats them as - # interchangeable, but `gcc` will ignore the `-static-libstdc++` flag. - # This check lets Envoy statically link against libstdc++ to be more - # portable between installed glibc versions. - # - # Similar behavior exists for Clang's `-stdlib=libc++` flag, so we handle - # it in the same test. - if ("-static-libstdc++" in sys.argv[1:] or "-stdlib=libc++" in sys.argv[1:] or - "-std=c++0x" in sys.argv[1:]): + # Detect if we're building for C++ or vanilla C. + if any(map(is_cpp_flag, sys.argv[1:])): compiler = envoy_real_cxx + # Append CXXFLAGS to all C++ targets (this is mostly for dependencies). + argv = shlex.split(envoy_cxxflags) else: compiler = envoy_real_cc + # Append CFLAGS to all C targets (this is mostly for dependencies). + argv = shlex.split(envoy_cflags) # Either: # a) remove all occurrences of -lstdc++ (when statically linking against libstdc++), # b) replace all occurrences of -lstdc++ with -lc++ (when linking against libc++). if "-static-libstdc++" in sys.argv[1:] or "-stdlib=libc++" in envoy_cxxflags: - # Append CXXFLAGS to all C++ targets (this is mostly for dependencies). - if envoy_cxxflags and "-std=c++" in str(sys.argv[1:]): - argv = shlex.split(envoy_cxxflags) - else: - argv = [] for arg in sys.argv[1:]: if arg == "-lstdc++": if "-stdlib=libc++" in envoy_cxxflags: @@ -70,7 +69,12 @@ def main(): else: argv.append(arg) else: - argv = sys.argv[1:] + argv += sys.argv[1:] + + # Bazel will add -fuse-ld=gold in some cases, gcc/clang will take the last -fuse-ld argument, + # so whenever we see lld once, add it to the end. + if "-fuse-ld=lld" in argv: + argv.append("-fuse-ld=lld") # Add compiler-specific options if "clang" in compiler: diff --git a/bazel/envoy_build_system.bzl b/bazel/envoy_build_system.bzl index 1300856e28d41..a353f9eab6af2 100644 --- a/bazel/envoy_build_system.bzl +++ b/bazel/envoy_build_system.bzl @@ -1,6 +1,7 @@ load("@com_google_protobuf//:protobuf.bzl", "cc_proto_library", "py_proto_library") load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library") load("@rules_foreign_cc//tools/build_defs:cmake.bzl", "cmake_external") +load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar") def envoy_package(): native.package(default_visibility = ["//visibility:public"]) @@ -79,11 +80,18 @@ def envoy_copts(repository, test = False): "//conditions:default": [], }) + select({ # TCLAP command line parser needs this to support int64_t/uint64_t - "@bazel_tools//tools/osx:darwin": ["-DHAVE_LONG_LONG"], + repository + "//bazel:apple": ["-DHAVE_LONG_LONG"], "//conditions:default": [], }) + envoy_select_hot_restart(["-DENVOY_HOT_RESTART"], repository) + \ envoy_select_perf_annotation(["-DENVOY_PERF_ANNOTATION"]) + \ - envoy_select_google_grpc(["-DENVOY_GOOGLE_GRPC"], repository) + envoy_select_google_grpc(["-DENVOY_GOOGLE_GRPC"], repository) + \ + envoy_select_path_normalization_by_default(["-DENVOY_NORMALIZE_PATH_BY_DEFAULT"], repository) + +def envoy_linkstatic(): + return select({ + "@envoy//bazel:asan_build": 0, + "//conditions:default": 1, + }) def envoy_static_link_libstdcpp_linkopts(): return envoy_select_force_libcpp( @@ -97,8 +105,8 @@ def envoy_static_link_libstdcpp_linkopts(): def envoy_linkopts(): return select({ # The macOS system library transitively links common libraries (e.g., pthread). - "@bazel_tools//tools/osx:darwin": [ - # See note here: http://luajit.org/install.html + "@envoy//bazel:apple": [ + # See note here: https://luajit.org/install.html "-pagezero_size 10000", "-image_base 100000000", ], @@ -126,7 +134,7 @@ def _envoy_stamped_linkopts(): # macOS doesn't have an official equivalent to the `.note.gnu.build-id` # ELF section, so just stuff the raw ID into a new text section. - "@bazel_tools//tools/osx:darwin": [ + "@envoy//bazel:apple": [ "-sectcreate __TEXT __build_id", "$(location @envoy//bazel:raw_build_id.ldscript)", ], @@ -139,7 +147,7 @@ def _envoy_stamped_linkopts(): def _envoy_stamped_deps(): return select({ - "@bazel_tools//tools/osx:darwin": [ + "@envoy//bazel:apple": [ "@envoy//bazel:raw_build_id.ldscript", ], "//conditions:default": [ @@ -150,8 +158,8 @@ def _envoy_stamped_deps(): # Compute the test linkopts based on various options. def envoy_test_linkopts(): return select({ - "@bazel_tools//tools/osx:darwin": [ - # See note here: http://luajit.org/install.html + "@envoy//bazel:apple": [ + # See note here: https://luajit.org/install.html "-pagezero_size 10000", "-image_base 100000000", ], @@ -174,8 +182,7 @@ def envoy_external_dep_path(dep): def tcmalloc_external_dep(repository): return select({ repository + "//bazel:disable_tcmalloc": None, - repository + "//bazel:debug_tcmalloc": envoy_external_dep_path("tcmalloc_debug"), - "//conditions:default": envoy_external_dep_path("tcmalloc_and_profiler"), + "//conditions:default": envoy_external_dep_path("gperftools"), }) # As above, but wrapped in list form for adding to dep lists. This smell seems needed as @@ -184,8 +191,7 @@ def tcmalloc_external_dep(repository): def tcmalloc_external_deps(repository): return select({ repository + "//bazel:disable_tcmalloc": [], - repository + "//bazel:debug_tcmalloc": [envoy_external_dep_path("tcmalloc_debug")], - "//conditions:default": [envoy_external_dep_path("tcmalloc_and_profiler")], + "//conditions:default": [envoy_external_dep_path("gperftools")], }) # Transform the package path (e.g. include/envoy/common) into a path for @@ -196,11 +202,17 @@ def envoy_include_prefix(path): return "/".join(path.split("/")[1:]) return None +def filter_windows_keys(cache_entries = {}): + # On Windows, we don't want to explicitly set CMAKE_BUILD_TYPE, + # rules_foreign_cc will figure it out for us + return {key: cache_entries[key] for key in cache_entries.keys() if key != "CMAKE_BUILD_TYPE"} + # External CMake C++ library targets should be specified with this function. This defaults # to building the dependencies with ninja def envoy_cmake_external( name, cache_entries = {}, + debug_cache_entries = {}, cmake_options = ["-GNinja"], make_commands = ["ninja", "ninja install"], lib_source = "", @@ -208,10 +220,10 @@ def envoy_cmake_external( static_libraries = [], copy_pdb = False, pdb_name = "", - cmake_files_dir = "$BUILD_TMPDIR/CMakeFiles"): - # On Windows, we don't want to explicitly set CMAKE_BUILD_TYPE, - # rules_foreign_cc will figure it out for us - cache_entries_no_build_type = {key: cache_entries[key] for key in cache_entries.keys() if key != "CMAKE_BUILD_TYPE"} + cmake_files_dir = "$BUILD_TMPDIR/CMakeFiles", + **kwargs): + cache_entries_debug = dict(cache_entries) + cache_entries_debug.update(debug_cache_entries) pf = "" if copy_pdb: @@ -232,8 +244,10 @@ def envoy_cmake_external( cmake_external( name = name, cache_entries = select({ - "@envoy//bazel:windows_x86_64": cache_entries_no_build_type, - "//conditions:default": cache_entries, + "@envoy//bazel:windows_opt_build": filter_windows_keys(cache_entries), + "@envoy//bazel:windows_x86_64": filter_windows_keys(cache_entries_debug), + "@envoy//bazel:opt_build": cache_entries, + "//conditions:default": cache_entries_debug, }), cmake_options = cmake_options, generate_crosstool_file = select({ @@ -244,6 +258,7 @@ def envoy_cmake_external( make_commands = make_commands, postfix_script = pf, static_libraries = static_libraries, + **kwargs ) # Envoy C++ library targets that need no transformations or additional dependencies before being @@ -327,7 +342,7 @@ def envoy_cc_library( ], include_prefix = envoy_include_prefix(native.package_name()), alwayslink = 1, - linkstatic = 1, + linkstatic = envoy_linkstatic(), linkstamp = select({ repository + "//bazel:windows_x86_64": None, "//conditions:default": linkstamp, @@ -367,8 +382,6 @@ def envoy_cc_binary( deps = deps, ) -load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar") - # Envoy C++ fuzz test targes. These are not included in coverage runs. def envoy_cc_fuzz_test(name, corpus, deps = [], tags = [], **kwargs): if not (corpus.startswith("//") or corpus.startswith(":")): @@ -400,7 +413,7 @@ def envoy_cc_fuzz_test(name, corpus, deps = [], tags = [], **kwargs): data = [corpus_name], # No fuzzing on macOS. deps = select({ - "@bazel_tools//tools/osx:darwin": ["//test:dummy_main"], + "@envoy//bazel:apple": ["//test:dummy_main"], "//conditions:default": [ ":" + test_lib_name, "//test/fuzz:main", @@ -434,29 +447,34 @@ def envoy_cc_test( deps = [], tags = [], args = [], + copts = [], shard_count = None, coverage = True, - local = False): + local = False, + size = "medium"): test_lib_tags = [] if coverage: test_lib_tags.append("coverage_test_lib") - envoy_cc_test_library( - name = name + "_lib", + _envoy_cc_test_infrastructure_library( + name = name + "_lib_internal_only", srcs = srcs, data = data, external_deps = external_deps, - deps = deps, + deps = deps + [repository + "//test/test_common:printers_includes"], repository = repository, tags = test_lib_tags, + copts = copts, + # Restrict only to the code coverage tools. + visibility = ["@envoy//test/coverage:__pkg__"], ) native.cc_test( name = name, - copts = envoy_copts(repository, test = True), + copts = envoy_copts(repository, test = True) + copts, linkopts = envoy_test_linkopts(), - linkstatic = 1, + linkstatic = envoy_linkstatic(), malloc = tcmalloc_external_dep(repository), deps = [ - ":" + name + "_lib", + ":" + name + "_lib_internal_only", repository + "//test:main", ], # from https://github.com/google/googletest/blob/6e1970e2376c14bf658eb88f655a054030353f9f/googlemock/src/gmock.cc#L51 @@ -465,11 +483,12 @@ def envoy_cc_test( tags = tags + ["coverage_test"], local = local, shard_count = shard_count, + size = size, ) # Envoy C++ related test infrastructure (that want gtest, gmock, but may be # relied on by envoy_cc_test_library) should use this function. -def envoy_cc_test_infrastructure_library( +def _envoy_cc_test_infrastructure_library( name, srcs = [], hdrs = [], @@ -477,20 +496,25 @@ def envoy_cc_test_infrastructure_library( external_deps = [], deps = [], repository = "", - tags = []): + tags = [], + include_prefix = None, + copts = [], + **kargs): native.cc_library( name = name, srcs = srcs, hdrs = hdrs, data = data, - copts = envoy_copts(repository, test = True), + copts = envoy_copts(repository, test = True) + copts, testonly = 1, deps = deps + [envoy_external_dep_path(dep) for dep in external_deps] + [ envoy_external_dep_path("googletest"), ], tags = tags, + include_prefix = include_prefix, alwayslink = 1, - linkstatic = 1, + linkstatic = envoy_linkstatic(), + **kargs ) # Envoy C++ test related libraries (that want gtest, gmock) should be specified @@ -503,12 +527,14 @@ def envoy_cc_test_library( external_deps = [], deps = [], repository = "", - tags = []): + tags = [], + include_prefix = None, + copts = [], + **kargs): deps = deps + [ repository + "//test/test_common:printers_includes", - repository + "//test/test_common:test_base", ] - envoy_cc_test_infrastructure_library( + _envoy_cc_test_infrastructure_library( name, srcs, hdrs, @@ -517,6 +543,10 @@ def envoy_cc_test_library( deps, repository, tags, + include_prefix, + copts, + visibility = ["//visibility:public"], + **kargs ) # Envoy test binaries should be specified with this function. @@ -589,7 +619,7 @@ def envoy_proto_library(name, external_deps = [], **kwargs): if "api_httpbody_protos" in external_deps: external_cc_proto_deps.append("@googleapis//:api_httpbody_protos") external_proto_deps.append("@googleapis//:api_httpbody_protos_proto") - return api_proto_library( + api_proto_library( name, external_cc_proto_deps = external_cc_proto_deps, external_proto_deps = external_proto_deps, @@ -635,10 +665,17 @@ def envoy_proto_descriptor(name, out, srcs = [], external_deps = []): def envoy_select_hot_restart(xs, repository = ""): return select({ repository + "//bazel:disable_hot_restart": [], - "@bazel_tools//tools/osx:darwin": [], + repository + "//bazel:apple": [], "//conditions:default": xs, }) +# Select the given values if default path normalization is on in the current build. +def envoy_select_path_normalization_by_default(xs, repository = ""): + return select({ + repository + "//bazel:enable_path_normalization_by_default": xs, + "//conditions:default": [], + }) + def envoy_select_perf_annotation(xs): return select({ "@envoy//bazel:enable_perf_annotation": xs, @@ -652,6 +689,10 @@ def envoy_select_google_grpc(xs, repository = ""): "//conditions:default": xs, }) +# Dependencies on Google grpc should be wrapped with this function. +def envoy_google_grpc_external_deps(): + return envoy_select_google_grpc([envoy_external_dep_path("grpc")]) + # Select the given values if exporting is enabled in the current build. def envoy_select_exported_symbols(xs): return select({ @@ -662,7 +703,7 @@ def envoy_select_exported_symbols(xs): def envoy_select_force_libcpp(if_libcpp, default = None): return select({ "@envoy//bazel:force_libcpp": if_libcpp, - "@bazel_tools//tools/osx:darwin": [], + "@envoy//bazel:apple": [], "@envoy//bazel:windows_x86_64": [], "//conditions:default": default or [], }) @@ -672,9 +713,3 @@ def envoy_select_boringssl(if_fips, default = None): "@envoy//bazel:boringssl_fips": if_fips, "//conditions:default": default or [], }) - -def envoy_select_quiche(xs, repository = ""): - return select({ - repository + "//bazel:enable_quiche": xs, - "//conditions:default": [], - }) diff --git a/bazel/external/boringssl_fips.genrule_cmd b/bazel/external/boringssl_fips.genrule_cmd index afa0744ca122c..d498d2ffd3db5 100644 --- a/bazel/external/boringssl_fips.genrule_cmd +++ b/bazel/external/boringssl_fips.genrule_cmd @@ -16,7 +16,7 @@ ROOT=$$(dirname $(rootpath boringssl/BUILDING.md))/.. pushd $$ROOT # Build tools requirements: -# - Clang compiler version 6.0.1 (http://releases.llvm.org/download.html) +# - Clang compiler version 6.0.1 (https://releases.llvm.org/download.html) # - Go programming language version 1.10.3 (https://golang.org/dl/) # - Ninja build system version 1.8.2 (https://github.com/ninja-build/ninja/releases) diff --git a/bazel/external/http-parser.BUILD b/bazel/external/http-parser.BUILD index 523d94fbf4316..303950d7c00b6 100644 --- a/bazel/external/http-parser.BUILD +++ b/bazel/external/http-parser.BUILD @@ -7,6 +7,10 @@ cc_library( "http_parser.h", ], hdrs = ["http_parser.h"], + # This compiler flag is set to an arbtitrarily high number so + # as to effectively disables the http_parser header limit, as + # we do our own checks in the conn manager and codec. + copts = ["-DHTTP_MAX_HEADER_SIZE=0x2000000"], includes = ["."], visibility = ["//visibility:public"], ) diff --git a/bazel/external/quiche.BUILD b/bazel/external/quiche.BUILD index 9a3a6d9a46465..26fed2be71278 100644 --- a/bazel/external/quiche.BUILD +++ b/bazel/external/quiche.BUILD @@ -28,7 +28,9 @@ licenses(["notice"]) # Apache 2 load(":genrule_cmd.bzl", "genrule_cmd") load( "@envoy//bazel:envoy_build_system.bzl", + "envoy_cc_library", "envoy_cc_test", + "envoy_cc_test_library", ) src_files = glob([ @@ -47,61 +49,112 @@ genrule( visibility = ["//visibility:private"], ) -cc_library( +quiche_copt = ["-Wno-unused-parameter"] + +envoy_cc_test_library( + name = "http2_platform_reconstruct_object", + hdrs = ["quiche/http2/platform/api/http2_reconstruct_object.h"], + repository = "@envoy", + deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:http2_platform_reconstruct_object_impl_lib"], +) + +envoy_cc_test_library( + name = "http2_test_tools_random", + srcs = ["quiche/http2/test_tools/http2_random.cc"], + hdrs = ["quiche/http2/test_tools/http2_random.h"], + external_deps = ["ssl"], + repository = "@envoy", + deps = [":http2_platform"], +) + +envoy_cc_library( name = "http2_platform", hdrs = [ "quiche/http2/platform/api/http2_arraysize.h", + "quiche/http2/platform/api/http2_bug_tracker.h", "quiche/http2/platform/api/http2_containers.h", "quiche/http2/platform/api/http2_estimate_memory_usage.h", "quiche/http2/platform/api/http2_export.h", "quiche/http2/platform/api/http2_flag_utils.h", + "quiche/http2/platform/api/http2_flags.h", + "quiche/http2/platform/api/http2_logging.h", "quiche/http2/platform/api/http2_macros.h", "quiche/http2/platform/api/http2_optional.h", "quiche/http2/platform/api/http2_ptr_util.h", "quiche/http2/platform/api/http2_string.h", "quiche/http2/platform/api/http2_string_piece.h", + "quiche/http2/platform/api/http2_string_utils.h", # TODO: uncomment the following files as implementations are added. - # "quiche/http2/platform/api/http2_bug_tracker.h", - # "quiche/http2/platform/api/http2_flags.h", - # "quiche/http2/platform/api/http2_mock_log.h", - # "quiche/http2/platform/api/http2_reconstruct_object.h", - # "quiche/http2/platform/api/http2_string_utils.h", # "quiche/http2/platform/api/http2_test_helpers.h", ], + repository = "@envoy", visibility = ["//visibility:public"], - deps = [ - "@envoy//source/extensions/quic_listeners/quiche/platform:http2_platform_impl_lib", - ], + deps = ["@envoy//source/extensions/quic_listeners/quiche/platform:http2_platform_impl_lib"], ) -cc_library( +envoy_cc_library( name = "spdy_platform", hdrs = [ "quiche/spdy/platform/api/spdy_arraysize.h", + "quiche/spdy/platform/api/spdy_bug_tracker.h", "quiche/spdy/platform/api/spdy_containers.h", "quiche/spdy/platform/api/spdy_endianness_util.h", "quiche/spdy/platform/api/spdy_estimate_memory_usage.h", "quiche/spdy/platform/api/spdy_export.h", + "quiche/spdy/platform/api/spdy_flags.h", + "quiche/spdy/platform/api/spdy_logging.h", + "quiche/spdy/platform/api/spdy_mem_slice.h", "quiche/spdy/platform/api/spdy_ptr_util.h", "quiche/spdy/platform/api/spdy_string.h", "quiche/spdy/platform/api/spdy_string_piece.h", - # TODO: uncomment the following files as implementations are added. - # "quiche/spdy/platform/api/spdy_flags.h", - # "quiche/spdy/platform/api/spdy_mem_slice.h", - # "quiche/spdy/platform/api/spdy_string_utils.h", + "quiche/spdy/platform/api/spdy_string_utils.h", ], + repository = "@envoy", visibility = ["//visibility:public"], - deps = [ - "@envoy//source/extensions/quic_listeners/quiche/platform:spdy_platform_impl_lib", - ], + deps = ["@envoy//source/extensions/quic_listeners/quiche/platform:spdy_platform_impl_lib"], ) -cc_library( +envoy_cc_library( + name = "spdy_simple_arena_lib", + srcs = ["quiche/spdy/core/spdy_simple_arena.cc"], + hdrs = ["quiche/spdy/core/spdy_simple_arena.h"], + repository = "@envoy", + visibility = ["//visibility:public"], + deps = [":spdy_platform"], +) + +envoy_cc_test_library( + name = "spdy_platform_test_helpers", + hdrs = ["quiche/spdy/platform/api/spdy_test_helpers.h"], + repository = "@envoy", + deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:spdy_platform_test_helpers_impl_lib"], +) + +envoy_cc_library( + name = "spdy_platform_unsafe_arena_lib", + hdrs = ["quiche/spdy/platform/api/spdy_unsafe_arena.h"], + repository = "@envoy", + visibility = ["//visibility:public"], + deps = ["@envoy//source/extensions/quic_listeners/quiche/platform:spdy_platform_unsafe_arena_impl_lib"], +) + +envoy_cc_library( name = "quic_platform", - srcs = ["quiche/quic/platform/api/quic_mutex.cc"], + srcs = [ + "quiche/quic/platform/api/quic_clock.cc", + "quiche/quic/platform/api/quic_file_utils.cc", + "quiche/quic/platform/api/quic_hostname_utils.cc", + "quiche/quic/platform/api/quic_mutex.cc", + ], hdrs = [ + "quiche/quic/platform/api/quic_cert_utils.h", + "quiche/quic/platform/api/quic_clock.h", + "quiche/quic/platform/api/quic_file_utils.h", + "quiche/quic/platform/api/quic_hostname_utils.h", "quiche/quic/platform/api/quic_mutex.h", + "quiche/quic/platform/api/quic_pcc_sender.h", ], + repository = "@envoy", visibility = ["//visibility:public"], deps = [ ":quic_platform_base", @@ -109,71 +162,250 @@ cc_library( ], ) -cc_library( +envoy_cc_test_library( + name = "quic_platform_epoll_lib", + hdrs = ["quiche/quic/platform/api/quic_epoll.h"], + repository = "@envoy", + deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_epoll_impl_lib"], +) + +envoy_cc_test_library( + name = "quic_platform_expect_bug", + hdrs = ["quiche/quic/platform/api/quic_expect_bug.h"], + repository = "@envoy", + deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_expect_bug_impl_lib"], +) + +envoy_cc_library( + name = "quic_platform_export", + hdrs = ["quiche/quic/platform/api/quic_export.h"], + repository = "@envoy", + visibility = ["//visibility:public"], + deps = ["@envoy//source/extensions/quic_listeners/quiche/platform:quic_platform_export_impl_lib"], +) + +envoy_cc_test_library( + name = "quic_platform_mock_log", + hdrs = ["quiche/quic/platform/api/quic_mock_log.h"], + repository = "@envoy", + deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_mock_log_impl_lib"], +) + +envoy_cc_test_library( + name = "quic_platform_port_utils", + hdrs = ["quiche/quic/platform/api/quic_port_utils.h"], + repository = "@envoy", + deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_port_utils_impl_lib"], +) + +envoy_cc_test_library( + name = "quic_platform_sleep", + hdrs = ["quiche/quic/platform/api/quic_sleep.h"], + repository = "@envoy", + deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_sleep_impl_lib"], +) + +envoy_cc_test_library( + name = "quic_platform_test", + hdrs = ["quiche/quic/platform/api/quic_test.h"], + repository = "@envoy", + deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_test_impl_lib"], +) + +envoy_cc_test_library( + name = "quic_platform_test_output", + hdrs = ["quiche/quic/platform/api/quic_test_output.h"], + repository = "@envoy", + deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_test_output_impl_lib"], +) + +envoy_cc_test_library( + name = "quic_platform_system_event_loop", + hdrs = ["quiche/quic/platform/api/quic_system_event_loop.h"], + repository = "@envoy", + deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_system_event_loop_impl_lib"], +) + +envoy_cc_test_library( + name = "quic_platform_thread", + hdrs = ["quiche/quic/platform/api/quic_thread.h"], + repository = "@envoy", + deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_thread_impl_lib"], +) + +envoy_cc_library( name = "quic_platform_base", hdrs = [ "quiche/quic/platform/api/quic_aligned.h", "quiche/quic/platform/api/quic_arraysize.h", + "quiche/quic/platform/api/quic_bug_tracker.h", "quiche/quic/platform/api/quic_client_stats.h", "quiche/quic/platform/api/quic_containers.h", "quiche/quic/platform/api/quic_endian.h", "quiche/quic/platform/api/quic_estimate_memory_usage.h", - "quiche/quic/platform/api/quic_export.h", + "quiche/quic/platform/api/quic_exported_stats.h", "quiche/quic/platform/api/quic_fallthrough.h", "quiche/quic/platform/api/quic_flag_utils.h", + "quiche/quic/platform/api/quic_flags.h", "quiche/quic/platform/api/quic_iovec.h", "quiche/quic/platform/api/quic_logging.h", + "quiche/quic/platform/api/quic_map_util.h", "quiche/quic/platform/api/quic_prefetch.h", "quiche/quic/platform/api/quic_ptr_util.h", + "quiche/quic/platform/api/quic_reference_counted.h", + "quiche/quic/platform/api/quic_server_stats.h", + "quiche/quic/platform/api/quic_stack_trace.h", "quiche/quic/platform/api/quic_str_cat.h", - "quiche/quic/platform/api/quic_string.h", + "quiche/quic/platform/api/quic_stream_buffer_allocator.h", "quiche/quic/platform/api/quic_string_piece.h", "quiche/quic/platform/api/quic_string_utils.h", "quiche/quic/platform/api/quic_uint128.h", + "quiche/quic/platform/api/quic_text_utils.h", # TODO: uncomment the following files as implementations are added. - # "quiche/quic/platform/api/quic_bug_tracker.h", - # "quiche/quic/platform/api/quic_clock.h", - # "quiche/quic/platform/api/quic_expect_bug.h", - # "quiche/quic/platform/api/quic_exported_stats.h", - # "quiche/quic/platform/api/quic_file_utils.h", - # "quiche/quic/platform/api/quic_flags.h", # "quiche/quic/platform/api/quic_fuzzed_data_provider.h", - # "quiche/quic/platform/api/quic_goog_cc_sender.h", - # "quiche/quic/platform/api/quic_hostname_utils.h", - # "quiche/quic/platform/api/quic_interval.h", # "quiche/quic/platform/api/quic_ip_address_family.h", # "quiche/quic/platform/api/quic_ip_address.h", - # "quiche/quic/platform/api/quic_lru_cache.h", - # "quiche/quic/platform/api/quic_map_util.h", # "quiche/quic/platform/api/quic_mem_slice.h", # "quiche/quic/platform/api/quic_mem_slice_span.h", # "quiche/quic/platform/api/quic_mem_slice_storage.h", - # "quiche/quic/platform/api/quic_mock_log.h", - # "quiche/quic/platform/api/quic_pcc_sender.h", - # "quiche/quic/platform/api/quic_reference_counted.h", - # "quiche/quic/platform/api/quic_server_stats.h", - # "quiche/quic/platform/api/quic_singleton.h", - # "quiche/quic/platform/api/quic_sleep.h", # "quiche/quic/platform/api/quic_socket_address.h", - # "quiche/quic/platform/api/quic_stack_trace.h", - # "quiche/quic/platform/api/quic_test.h", # "quiche/quic/platform/api/quic_test_loopback.h", # "quiche/quic/platform/api/quic_test_mem_slice_vector.h", - # "quiche/quic/platform/api/quic_test_output.h", - # "quiche/quic/platform/api/quic_text_utils.h", - # "quiche/quic/platform/api/quic_thread.h", ], + repository = "@envoy", visibility = ["//visibility:public"], deps = [ + ":quic_platform_export", "@envoy//source/extensions/quic_listeners/quiche/platform:quic_platform_base_impl_lib", ], ) +envoy_cc_library( + name = "quic_core_buffer_allocator_lib", + srcs = [ + "quiche/quic/core/quic_buffer_allocator.cc", + "quiche/quic/core/quic_simple_buffer_allocator.cc", + ], + hdrs = [ + "quiche/quic/core/quic_buffer_allocator.h", + "quiche/quic/core/quic_simple_buffer_allocator.h", + ], + repository = "@envoy", + visibility = ["//visibility:public"], + deps = [":quic_platform_export"], +) + +envoy_cc_library( + name = "quic_core_error_codes_lib", + srcs = ["quiche/quic/core/quic_error_codes.cc"], + hdrs = ["quiche/quic/core/quic_error_codes.h"], + copts = quiche_copt, + repository = "@envoy", + deps = [":quic_platform_export"], +) + +envoy_cc_library( + name = "quic_core_time_lib", + srcs = ["quiche/quic/core/quic_time.cc"], + hdrs = ["quiche/quic/core/quic_time.h"], + repository = "@envoy", + visibility = ["//visibility:public"], + deps = [":quic_platform_base"], +) + +envoy_cc_library( + name = "quic_core_types_lib", + srcs = [ + "quiche/quic/core/quic_connection_id.cc", + "quiche/quic/core/quic_packet_number.cc", + "quiche/quic/core/quic_types.cc", + ], + hdrs = [ + "quiche/quic/core/quic_connection_id.h", + "quiche/quic/core/quic_packet_number.h", + "quiche/quic/core/quic_types.h", + ], + copts = quiche_copt, + repository = "@envoy", + visibility = ["//visibility:public"], + deps = [ + ":quic_core_error_codes_lib", + ":quic_core_time_lib", + ":quic_platform_base", + ], +) + +envoy_cc_test_library( + name = "epoll_server_platform", + hdrs = [ + "quiche/epoll_server/platform/api/epoll_address_test_utils.h", + "quiche/epoll_server/platform/api/epoll_bug.h", + "quiche/epoll_server/platform/api/epoll_expect_bug.h", + "quiche/epoll_server/platform/api/epoll_export.h", + "quiche/epoll_server/platform/api/epoll_logging.h", + "quiche/epoll_server/platform/api/epoll_ptr_util.h", + "quiche/epoll_server/platform/api/epoll_test.h", + "quiche/epoll_server/platform/api/epoll_thread.h", + "quiche/epoll_server/platform/api/epoll_time.h", + ], + repository = "@envoy", + deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:epoll_server_platform_impl_lib"], +) + +envoy_cc_test_library( + name = "epoll_server_lib", + srcs = [ + "quiche/epoll_server/fake_simple_epoll_server.cc", + "quiche/epoll_server/simple_epoll_server.cc", + ], + hdrs = [ + "quiche/epoll_server/fake_simple_epoll_server.h", + "quiche/epoll_server/simple_epoll_server.h", + ], + copts = quiche_copt, + repository = "@envoy", + deps = [":epoll_server_platform"], +) + envoy_cc_test( - name = "quic_platform_test", - srcs = ["quiche/quic/platform/api/quic_string_utils_test.cc"], + name = "epoll_server_test", + srcs = ["quiche/epoll_server/simple_epoll_server_test.cc"], + copts = quiche_copt, + repository = "@envoy", + deps = [":epoll_server_lib"], +) + +envoy_cc_test( + name = "http2_platform_api_test", + srcs = [ + "quiche/http2/platform/api/http2_string_utils_test.cc", + "quiche/http2/test_tools/http2_random_test.cc", + ], + repository = "@envoy", + deps = [ + ":http2_platform", + ":http2_test_tools_random", + ], +) + +envoy_cc_test( + name = "spdy_platform_api_test", + srcs = ["quiche/spdy/platform/api/spdy_string_utils_test.cc"], + repository = "@envoy", + deps = [":spdy_platform"], +) + +envoy_cc_test( + name = "quic_platform_api_test", + srcs = [ + "quiche/quic/platform/api/quic_endian_test.cc", + "quiche/quic/platform/api/quic_reference_counted_test.cc", + "quiche/quic/platform/api/quic_string_utils_test.cc", + "quiche/quic/platform/api/quic_text_utils_test.cc", + ], repository = "@envoy", deps = [ ":quic_platform", + ":quic_platform_test", ], ) diff --git a/bazel/external/quiche.genrule_cmd b/bazel/external/quiche.genrule_cmd index 5295ff9ed8408..3a9a40f12362e 100644 --- a/bazel/external/quiche.genrule_cmd +++ b/bazel/external/quiche.genrule_cmd @@ -18,14 +18,34 @@ src_base_dir=$$(dirname $$(dirname $$(dirname $(rootpath quic/core/quic_constant # sed commands to apply to each source file. cat <sed_commands +# Rewrite include directives for testonly platform impl files. +/^#include/ s!net/http2/platform/impl/http2_reconstruct_object_impl.h!test/extensions/quic_listeners/quiche/platform/http2_reconstruct_object_impl.h! +/^#include/ s!net/quic/platform/impl/quic_expect_bug_impl.h!test/extensions/quic_listeners/quiche/platform/quic_expect_bug_impl.h! +/^#include/ s!net/quic/platform/impl/quic_mock_log_impl.h!test/extensions/quic_listeners/quiche/platform/quic_mock_log_impl.h! +/^#include/ s!net/quic/platform/impl/quic_port_utils_impl.h!test/extensions/quic_listeners/quiche/platform/quic_port_utils_impl.h! +/^#include/ s!net/quic/platform/impl/quic_sleep_impl.h!test/extensions/quic_listeners/quiche/platform/quic_sleep_impl.h! +/^#include/ s!net/quic/platform/impl/quic_system_event_loop_impl.h!test/extensions/quic_listeners/quiche/platform/quic_system_event_loop_impl.h! +/^#include/ s!net/quic/platform/impl/quic_test_impl.h!test/extensions/quic_listeners/quiche/platform/quic_test_impl.h! +/^#include/ s!net/quic/platform/impl/quic_test_output_impl.h!test/extensions/quic_listeners/quiche/platform/quic_test_output_impl.h! +/^#include/ s!net/quic/platform/impl/quic_thread_impl.h!test/extensions/quic_listeners/quiche/platform/quic_thread_impl.h! +/^#include/ s!net/spdy/platform/impl/spdy_test_helpers_impl.h!test/extensions/quic_listeners/quiche/platform/spdy_test_helpers_impl.h! + # Rewrite include directives for platform impl files. /^#include/ s!net/(http2|spdy|quic)/platform/impl/!extensions/quic_listeners/quiche/platform/! +# Rewrite include directives for epoll_server platform impl files. +/^#include/ s!net/tools/epoll_server/platform/impl!test/extensions/quic_listeners/quiche/platform/! + # Strip "net/third_party" from include directives to other QUICHE files. /^#include/ s!net/third_party/quiche/src/!quiche/! -# Rewrite gtest includes. +# Rewrite gmock & gtest includes. +/^#include/ s!testing/gmock/include/gmock/!gmock/! /^#include/ s!testing/gtest/include/gtest/!gtest/! + +# Rewrite third_party includes. +/^#include/ s!third_party/boringssl/src/include/!! + EOF for src_file in $(SRCS); do diff --git a/bazel/external/sqlparser.BUILD b/bazel/external/sqlparser.BUILD index b45e9f7f88952..8e14f45e53605 100644 --- a/bazel/external/sqlparser.BUILD +++ b/bazel/external/sqlparser.BUILD @@ -7,5 +7,9 @@ cc_library( "include/**/*.h", "src/**/*.h", ]), + defines = select({ + "@envoy//bazel:windows_x86_64": ["YY_NO_UNISTD_H"], + "//conditions:default": [], + }), visibility = ["//visibility:public"], ) diff --git a/bazel/external/xxhash.BUILD b/bazel/external/xxhash.BUILD index 732f87a7b9374..5f8120dfee0f0 100644 --- a/bazel/external/xxhash.BUILD +++ b/bazel/external/xxhash.BUILD @@ -3,6 +3,9 @@ licenses(["notice"]) # Apache 2 cc_library( name = "xxhash", srcs = ["xxhash.c"], - hdrs = ["xxhash.h"], + hdrs = [ + "xxh3.h", + "xxhash.h", + ], visibility = ["//visibility:public"], ) diff --git a/bazel/foreign_cc/BUILD b/bazel/foreign_cc/BUILD index 03dfedcd42077..77f1fc9e86a26 100644 --- a/bazel/foreign_cc/BUILD +++ b/bazel/foreign_cc/BUILD @@ -1,9 +1,51 @@ licenses(["notice"]) # Apache 2 load("//bazel:envoy_build_system.bzl", "envoy_cmake_external", "envoy_package") +load("@rules_foreign_cc//tools/build_defs:configure.bzl", "configure_make") envoy_package() +configure_make( + name = "gperftools_build", + configure_options = [ + "--enable-shared=no", + "--enable-frame-pointers", + "--disable-libunwind", + ], + lib_source = "@com_github_gperftools_gperftools//:all", + linkopts = ["-lpthread"], + make_commands = ["make install-libLTLIBRARIES install-perftoolsincludeHEADERS"], + static_libraries = select({ + "//bazel:debug_tcmalloc": ["libtcmalloc_debug.a"], + "//conditions:default": ["libtcmalloc_and_profiler.a"], + }), +) + +# Workaround for https://github.com/bazelbuild/rules_foreign_cc/issues/227 +cc_library( + name = "gperftools", + deps = [ + "gperftools_build", + ], +) + +configure_make( + name = "luajit", + configure_command = "build.py", + configure_env_vars = select({ + # This shouldn't be needed! See + # https://github.com/envoyproxy/envoy/issues/6084 + # TODO(htuch): Remove when #6084 is fixed + "//bazel:asan_build": {"ENVOY_CONFIG_ASAN": "1"}, + "//conditions:default": {}, + }), + lib_source = "@com_github_luajit_luajit//:all", + make_commands = [], + static_libraries = [ + "libluajit-5.1.a", + ], +) + envoy_cmake_external( name = "ares", cache_entries = { @@ -20,33 +62,23 @@ envoy_cmake_external( }), ) -envoy_cmake_external( - name = "benchmark", - cache_entries = { - "BENCHMARK_ENABLE_GTEST_TESTS": "OFF", - "BENCHMARK_ENABLE_TESTING": "OFF", - }, - copy_pdb = True, - lib_source = "@com_github_google_benchmark//:all", - postfix_script = "mkdir -p $INSTALLDIR/include/testing/base/public && cp $BUILD_TMPDIR/$INSTALL_PREFIX/include/benchmark/benchmark.h $INSTALLDIR/include/testing/base/public/benchmark.h", - static_libraries = select({ - "//bazel:windows_x86_64": ["benchmark.lib"], - "//conditions:default": ["libbenchmark.a"], - }), -) - envoy_cmake_external( name = "event", cache_entries = { "EVENT__DISABLE_OPENSSL": "on", "EVENT__DISABLE_REGRESS": "on", + "EVENT__DISABLE_TESTS": "on", + "EVENT__LIBRARY_TYPE": "STATIC", "CMAKE_BUILD_TYPE": "Release", }, copy_pdb = True, lib_source = "@com_github_libevent_libevent//:all", static_libraries = select({ "//bazel:windows_x86_64": ["event.lib"], - "//conditions:default": ["libevent.a"], + "//conditions:default": [ + "libevent.a", + "libevent_pthreads.a", + ], }), ) @@ -60,6 +92,7 @@ envoy_cmake_external( }, cmake_files_dir = "$BUILD_TMPDIR/lib/CMakeFiles", copy_pdb = True, + debug_cache_entries = {"ENABLE_DEBUG": "on"}, lib_source = "@com_github_nghttp2_nghttp2//:all", pdb_name = "nghttp2_static", static_libraries = select({ diff --git a/bazel/foreign_cc/luajit.patch b/bazel/foreign_cc/luajit.patch new file mode 100644 index 0000000000000..15c65c3672fb4 --- /dev/null +++ b/bazel/foreign_cc/luajit.patch @@ -0,0 +1,86 @@ +diff --git a/src/Makefile b/src/Makefile +index f56465d..3f4f2fa 100644 +--- a/src/Makefile ++++ b/src/Makefile +@@ -27,7 +27,7 @@ NODOTABIVER= 51 + DEFAULT_CC = gcc + # + # LuaJIT builds as a native 32 or 64 bit binary by default. +-CC= $(DEFAULT_CC) ++CC ?= $(DEFAULT_CC) + # + # Use this if you want to force a 32 bit build on a 64 bit multilib OS. + #CC= $(DEFAULT_CC) -m32 +@@ -71,10 +71,10 @@ CCWARN= -Wall + # as dynamic mode. + # + # Mixed mode creates a static + dynamic library and a statically linked luajit. +-BUILDMODE= mixed ++#BUILDMODE= mixed + # + # Static mode creates a static library and a statically linked luajit. +-#BUILDMODE= static ++BUILDMODE= static + # + # Dynamic mode creates a dynamic library and a dynamically linked luajit. + # Note: this executable will only run when the library is installed! +@@ -99,7 +99,7 @@ XCFLAGS= + # enabled by default. Some other features that *might* break some existing + # code (e.g. __pairs or os.execute() return values) can be enabled here. + # Note: this does not provide full compatibility with Lua 5.2 at this time. +-#XCFLAGS+= -DLUAJIT_ENABLE_LUA52COMPAT ++XCFLAGS+= -DLUAJIT_ENABLE_LUA52COMPAT + # + # Disable the JIT compiler, i.e. turn LuaJIT into a pure interpreter. + #XCFLAGS+= -DLUAJIT_DISABLE_JIT +@@ -587,7 +587,7 @@ endif + + Q= @ + E= @echo +-#Q= ++Q= + #E= @: + + ############################################################################## +EOF +diff --git a/build.py b/build.py +new file mode 100755 +index 0000000..9c71271 +--- /dev/null ++++ b/build.py +@@ -0,0 +1,35 @@ ++#!/usr/bin/env python ++ ++import argparse ++import os ++import shutil ++ ++def main(): ++ parser = argparse.ArgumentParser() ++ parser.add_argument("--prefix") ++ args = parser.parse_args() ++ src_dir = os.path.dirname(os.path.realpath(__file__)) ++ shutil.copytree(src_dir, os.path.basename(src_dir)) ++ os.chdir(os.path.basename(src_dir)) ++ ++ os.environ["MACOSX_DEPLOYMENT_TARGET"] = "10.6" ++ os.environ["DEFAULT_CC"] = os.environ.get("CC", "") ++ os.environ["TARGET_CFLAGS"] = os.environ.get("CFLAGS", "") ++ os.environ["TARGET_LDFLAGS"] = os.environ.get("CFLAGS", "") ++ os.environ["CFLAGS"] = "" ++ # LuaJIT compile process build a tool `buildvm` and use it, building `buildvm` with ASAN ++ # will cause LSAN detect its leak and fail the build, set exitcode to 0 to make LSAN doesn't ++ # fail on it. ++ os.environ["LSAN_OPTIONS"] = "exitcode=0" ++ ++ # Blacklist LuaJIT from ASAN for now. ++ # TODO(htuch): Remove this when https://github.com/envoyproxy/envoy/issues/6084 is resolved. ++ if "ENVOY_CONFIG_ASAN" in os.environ: ++ os.environ["TARGET_CFLAGS"] += " -fsanitize-blacklist=%s/com_github_luajit_luajit/clang-asan-blacklist.txt" % os.environ["PWD"] ++ with open("clang-asan-blacklist.txt", "w") as f: ++ f.write("fun:*\n") ++ ++ os.system('make V=1 PREFIX="{}" install'.format(args.prefix)) ++ ++main() ++ diff --git a/bazel/foreign_cc/zlib.patch b/bazel/foreign_cc/zlib.patch new file mode 100644 index 0000000000000..7d2524ca4ec45 --- /dev/null +++ b/bazel/foreign_cc/zlib.patch @@ -0,0 +1,18 @@ +diff --git a/trees.c b/trees.c +index 50cf4b4..e705576 100644 +--- a/trees.c ++++ b/trees.c +@@ -870,7 +870,9 @@ void ZLIB_INTERNAL _tr_stored_block(s, buf, stored_len, last) + bi_windup(s); /* align on byte boundary */ + put_short(s, (ush)stored_len); + put_short(s, (ush)~stored_len); +- zmemcpy(s->pending_buf + s->pending, (Bytef *)buf, stored_len); ++ if (stored_len > 0) { ++ zmemcpy(s->pending_buf + s->pending, (Bytef *)buf, stored_len); ++ } + s->pending += stored_len; + #ifdef ZLIB_DEBUG + s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L; +-- +2.21.0.593.g511ec345e18-goog + diff --git a/bazel/gen_compilation_database.sh b/bazel/gen_compilation_database.sh index 11492a1602f10..725083b3fb47a 100755 --- a/bazel/gen_compilation_database.sh +++ b/bazel/gen_compilation_database.sh @@ -1,6 +1,6 @@ #!/bin/bash -RELEASE_VERSION=0.3.1 +RELEASE_VERSION=0.3.3 if [[ ! -d bazel-compilation-database-${RELEASE_VERSION} ]]; then curl -L https://github.com/grailbio/bazel-compilation-database/archive/${RELEASE_VERSION}.tar.gz | tar -xz diff --git a/bazel/genrule_repository.bzl b/bazel/genrule_repository.bzl index cdddf14922de8..0689c39c88b0b 100644 --- a/bazel/genrule_repository.bzl +++ b/bazel/genrule_repository.bzl @@ -105,9 +105,11 @@ def _genrule_environment(ctx): # running. # # https://stackoverflow.com/questions/37603238/fsanitize-not-using-gold-linker-in-gcc-6-1 - force_ld_gold = [] - if "gcc" in c_compiler or "g++" in c_compiler: - force_ld_gold = ["-fuse-ld=gold"] + force_ld = [] + if "clang" in c_compiler: + force_ld = ["-fuse-ld=lld"] + elif "gcc" in c_compiler or "g++" in c_compiler: + force_ld = ["-fuse-ld=gold"] cc_flags = [] ld_flags = [] @@ -117,11 +119,11 @@ def _genrule_environment(ctx): if ctx.var.get("ENVOY_CONFIG_ASAN"): cc_flags += asan_flags ld_flags += asan_flags - ld_flags += force_ld_gold + ld_flags += force_ld if ctx.var.get("ENVOY_CONFIG_TSAN"): cc_flags += tsan_flags ld_flags += tsan_flags - ld_flags += force_ld_gold + ld_flags += force_ld lines.append("export CFLAGS=%r" % (" ".join(cc_flags),)) lines.append("export LDFLAGS=%r" % (" ".join(ld_flags),)) diff --git a/bazel/git_repository_info.py b/bazel/git_repository_info.py deleted file mode 100755 index 39b61fc0fb9e2..0000000000000 --- a/bazel/git_repository_info.py +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env python - -# Quick-and-dirty Python to fetch git repository info in bazel/repository_locations.bzl. - -from __future__ import print_function - -import imp -import sys -import subprocess as sp - -repolocs = imp.load_source('replocs', 'bazel/repository_locations.bzl') - -if __name__ == '__main__': - if len(sys.argv) != 2: - print('Usage: %s ' % sys.argv[0]) - sys.exit(1) - repo = sys.argv[1] - if repo not in repolocs.REPOSITORY_LOCATIONS: - print('Unknown repository: %s' % repo) - sys.exit(1) - repoloc = repolocs.REPOSITORY_LOCATIONS[repo] - print('%s %s' % (repoloc['remote'], repoloc['commit'])) diff --git a/bazel/protobuf.patch b/bazel/protobuf.patch new file mode 100644 index 0000000000000..69c7cc28e0baf --- /dev/null +++ b/bazel/protobuf.patch @@ -0,0 +1,34 @@ +diff --git a/src/google/protobuf/stubs/strutil.cc b/src/google/protobuf/stubs/strutil.cc +index 1d34870deb..3844fa6b8b 100644 +--- a/src/google/protobuf/stubs/strutil.cc ++++ b/src/google/protobuf/stubs/strutil.cc +@@ -1116,10 +1116,12 @@ char* FastUInt64ToBufferLeft(uint64 u64, char* buffer) { + } + + char* FastInt64ToBufferLeft(int64 i, char* buffer) { +- uint64 u = i; ++ uint64 u = 0; + if (i < 0) { + *buffer++ = '-'; +- u = -i; ++ u -= i; ++ } else { ++ u = i; + } + return FastUInt64ToBufferLeft(u, buffer); + } +diff --git a/src/google/protobuf/text_format.cc b/src/google/protobuf/text_format.cc +index ba0c3028ee..801a8e3786 100644 +--- a/src/google/protobuf/text_format.cc ++++ b/src/google/protobuf/text_format.cc +@@ -1315,7 +1315,9 @@ class TextFormat::Printer::TextGenerator + while (size > buffer_size_) { + // Data exceeds space in the buffer. Write what we can and request a new + // buffer. +- memset(buffer_, ' ', buffer_size_); ++ if (buffer_size_ > 0) { ++ memset(buffer_, ' ', buffer_size_); ++ } + size -= buffer_size_; + void* void_buffer; + failed_ = !output_->Next(&void_buffer, &buffer_size_); diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 75d2d327988d6..3edee86c1a400 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,5 +1,6 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") load(":genrule_repository.bzl", "genrule_repository") +load("@envoy_api//bazel:envoy_http_archive.bzl", "envoy_http_archive") load(":repository_locations.bzl", "REPOSITORY_LOCATIONS") load(":target_recipes.bzl", "TARGET_RECIPES") load( @@ -8,45 +9,22 @@ load( "setup_vc_env_vars", ) load("@bazel_tools//tools/cpp:lib_cc_configure.bzl", "get_env_var") +load("@envoy_api//bazel:repositories.bzl", "api_dependencies") # dict of {build recipe name: longform extension name,} PPC_SKIP_TARGETS = {"luajit": "envoy.filters.http.lua"} # go version for rules_go -GO_VERSION = "1.10.4" +GO_VERSION = "1.12.4" # Make all contents of an external repository accessible under a filegroup. Used for external HTTP # archives, e.g. cares. BUILD_ALL_CONTENT = """filegroup(name = "all", srcs = glob(["**"]), visibility = ["//visibility:public"])""" def _repository_impl(name, **kwargs): - # `existing_rule_keys` contains the names of repositories that have already - # been defined in the Bazel workspace. By skipping repos with existing keys, - # users can override dependency versions by using standard Bazel repository - # rules in their WORKSPACE files. - existing_rule_keys = native.existing_rules().keys() - if name in existing_rule_keys: - # This repository has already been defined, probably because the user - # wants to override the version. Do nothing. - return - - loc_key = kwargs.pop("repository_key", name) - location = REPOSITORY_LOCATIONS[loc_key] - - # Git tags are mutable. We want to depend on commit IDs instead. Give the - # user a useful error if they accidentally specify a tag. - if "tag" in location: - fail( - "Refusing to depend on Git tag %r for external dependency %r: use 'commit' instead." % - (location["tag"], name), - ) - - # HTTP tarball at a given URL. Add a BUILD file if requested. - http_archive( - name = name, - urls = location["urls"], - sha256 = location["sha256"], - strip_prefix = location.get("strip_prefix", ""), + envoy_http_archive( + name, + locations = REPOSITORY_LOCATIONS, **kwargs ) @@ -55,9 +33,6 @@ def _build_recipe_repository_impl(ctxt): if ctxt.os.name.upper().startswith("WINDOWS"): return - # modify the recipes list based on the build context - recipes = _apply_dep_blacklist(ctxt, ctxt.attr.recipes) - # Setup the build directory with links to the relevant files. ctxt.symlink(Label("//bazel:repositories.sh"), "repositories.sh") ctxt.symlink( @@ -66,7 +41,7 @@ def _build_recipe_repository_impl(ctxt): ) ctxt.symlink(Label("//ci/build_container:recipe_wrapper.sh"), "recipe_wrapper.sh") ctxt.symlink(Label("//ci/build_container:Makefile"), "Makefile") - for r in recipes: + for r in ctxt.attr.recipes: ctxt.symlink( Label("//ci/build_container/build_recipes:" + r + ".sh"), "build_recipes/" + r + ".sh", @@ -76,7 +51,8 @@ def _build_recipe_repository_impl(ctxt): # Run the build script. print("Fetching external dependencies...") result = ctxt.execute( - ["./repositories.sh"] + recipes, + ["./repositories.sh"] + ctxt.attr.recipes, + timeout = 3600, quiet = False, ) print(result.stdout) @@ -102,27 +78,6 @@ _default_envoy_build_config = repository_rule( }, ) -def _default_envoy_api_impl(ctx): - ctx.file("WORKSPACE", "") - ctx.file("BUILD.bazel", "") - api_dirs = [ - "bazel", - "docs", - "envoy", - "examples", - "test", - "tools", - ] - for d in api_dirs: - ctx.symlink(ctx.path(ctx.attr.api).dirname.get_child(d), d) - -_default_envoy_api = repository_rule( - implementation = _default_envoy_api_impl, - attrs = { - "api": attr.label(default = "@envoy//api:BUILD"), - }, -) - # Python dependencies. If these become non-trivial, we might be better off using a virtualenv to # wrap them, but for now we can treat them as first-class Bazel. def _python_deps(): @@ -158,6 +113,14 @@ def _python_deps(): name = "com_github_twitter_common_finagle_thrift", build_file = "@envoy//bazel/external:twitter_common_finagle_thrift.BUILD", ) + _repository_impl( + name = "six_archive", + build_file = "@com_google_protobuf//:six.BUILD", + ) + native.bind( + name = "six", + actual = "@six_archive//:six", + ) # Bazel native C++ dependencies. For the dependencies that doesn't provide autoconf/automake builds. def _cc_deps(): @@ -191,29 +154,6 @@ def _go_deps(skip_targets): _repository_impl("io_bazel_rules_go") _repository_impl("bazel_gazelle") -def _envoy_api_deps(): - # Treat the data plane API as an external repo, this simplifies exporting the API to - # https://github.com/envoyproxy/data-plane-api. - if "envoy_api" not in native.existing_rules().keys(): - _default_envoy_api(name = "envoy_api") - - native.bind( - name = "api_httpbody_protos", - actual = "@googleapis//:api_httpbody_protos", - ) - native.bind( - name = "http_api_protos", - actual = "@googleapis//:http_api_protos", - ) - _repository_impl( - name = "six_archive", - build_file = "@com_google_protobuf//:six.BUILD", - ) - native.bind( - name = "six", - actual = "@six_archive//:six", - ) - def envoy_dependencies(path = "@envoy_deps//", skip_targets = []): envoy_repository = repository_rule( implementation = _build_recipe_repository_impl, @@ -287,8 +227,10 @@ def envoy_dependencies(path = "@envoy_deps//", skip_targets = []): _com_github_grpc_grpc() _com_github_google_benchmark() _com_github_google_jwt_verify() + _com_github_gperftools_gperftools() _com_github_jbeder_yaml_cpp() _com_github_libevent_libevent() + _com_github_luajit_luajit() _com_github_madler_zlib() _com_github_nanopb_nanopb() _com_github_nghttp2_nghttp2() @@ -305,7 +247,7 @@ def envoy_dependencies(path = "@envoy_deps//", skip_targets = []): _python_deps() _cc_deps() _go_deps(skip_targets) - _envoy_api_deps() + api_dependencies() def _boringssl(): _repository_impl("boringssl") @@ -452,6 +394,10 @@ def _com_github_madler_zlib(): http_archive( name = "com_github_madler_zlib", build_file_content = BUILD_ALL_CONTENT, + # The patch is only needed due to https://github.com/madler/zlib/pull/420 + # TODO(htuch): remove this when zlib #420 merges. + patch_args = ["-p1"], + patches = ["@envoy//bazel/foreign_cc:zlib.patch"], **location ) native.bind( @@ -571,6 +517,10 @@ def _com_google_absl(): name = "abseil_node_hash_set", actual = "@com_google_absl//absl/container:node_hash_set", ) + native.bind( + name = "abseil_str_format", + actual = "@com_google_absl//absl/strings:str_format", + ) native.bind( name = "abseil_strings", actual = "@com_google_absl//absl/strings:strings", @@ -604,7 +554,14 @@ def _com_google_absl(): ) def _com_google_protobuf(): - _repository_impl("com_google_protobuf") + _repository_impl( + "com_google_protobuf", + # The patch is only needed until + # https://github.com/protocolbuffers/protobuf/pull/5901 is available. + # TODO(htuch): remove this when > protobuf 3.7.1 is released. + patch_args = ["-p1"], + patches = ["@envoy//bazel:protobuf.patch"], + ) # Needed for cc_proto_library, Bazel doesn't support aliases today for repos, # see https://groups.google.com/forum/#!topic/bazel-discuss/859ybHQZnuI and @@ -612,6 +569,11 @@ def _com_google_protobuf(): _repository_impl( "com_google_protobuf_cc", repository_key = "com_google_protobuf", + # The patch is only needed until + # https://github.com/protocolbuffers/protobuf/pull/5901 is available. + # TODO(htuch): remove this when > protobuf 3.7.1 is released. + patch_args = ["-p1"], + patches = ["@envoy//bazel:protobuf.patch"], ) native.bind( name = "protobuf", @@ -715,19 +677,39 @@ def _com_github_google_jwt_verify(): actual = "@com_github_google_jwt_verify//:jwt_verify_lib", ) +def _com_github_luajit_luajit(): + location = REPOSITORY_LOCATIONS["com_github_luajit_luajit"] + http_archive( + name = "com_github_luajit_luajit", + build_file_content = BUILD_ALL_CONTENT, + patches = ["@envoy//bazel/foreign_cc:luajit.patch"], + patch_args = ["-p1"], + patch_cmds = ["chmod u+x build.py"], + **location + ) + + native.bind( + name = "luajit", + actual = "@envoy//bazel/foreign_cc:luajit", + ) + +def _com_github_gperftools_gperftools(): + location = REPOSITORY_LOCATIONS["com_github_gperftools_gperftools"] + http_archive( + name = "com_github_gperftools_gperftools", + build_file_content = BUILD_ALL_CONTENT, + patch_cmds = ["./autogen.sh"], + **location + ) + + native.bind( + name = "gperftools", + actual = "@envoy//bazel/foreign_cc:gperftools", + ) + def _foreign_cc_dependencies(): _repository_impl("rules_foreign_cc") -def _apply_dep_blacklist(ctxt, recipes): - newlist = [] - skip_list = [] - if _is_linux_ppc(ctxt): - skip_list += PPC_SKIP_TARGETS.keys() - for t in recipes: - if t not in skip_list: - newlist.append(t) - return newlist - def _is_linux(ctxt): return ctxt.os.name == "linux" diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 95a4bfd86e2d6..2d8a077d15a82 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -1,14 +1,14 @@ REPOSITORY_LOCATIONS = dict( bazel_gazelle = dict( - sha256 = "7949fc6cc17b5b191103e97481cf8889217263acf52e00b560683413af204fcb", - urls = ["https://github.com/bazelbuild/bazel-gazelle/releases/download/0.16.0/bazel-gazelle-0.16.0.tar.gz"], + sha256 = "3c681998538231a2d24d0c07ed5a7658cb72bfb5fd4bf9911157c0e9ac6a2687", + urls = ["https://github.com/bazelbuild/bazel-gazelle/releases/download/0.17.0/bazel-gazelle-0.17.0.tar.gz"], ), boringssl = dict( # Use commits from branch "chromium-stable-with-bazel" - sha256 = "de3fc9ff61e2fa736681b401ac94ea016f6ff522da419c312eca6aec60ad6f50", - strip_prefix = "boringssl-a7d9ef75919900bf4dca947ad3d5d03f0dee3f2a", - # chromium-72.0.3626.81 - urls = ["https://github.com/google/boringssl/archive/a7d9ef75919900bf4dca947ad3d5d03f0dee3f2a.tar.gz"], + sha256 = "4825306f702fa5cb76fd86c987a88c9bbb241e75f4d86dbb3714530ca73c1fb1", + strip_prefix = "boringssl-8cb07520451f0dc454654f2da5cdecf0b806f823", + # chromium-74.0.3729.131 + urls = ["https://github.com/google/boringssl/archive/8cb07520451f0dc454654f2da5cdecf0b806f823.tar.gz"], ), boringssl_fips = dict( sha256 = "b12ad676ee533824f698741bd127f6fbc82c46344398a6d78d25e62c6c418c73", @@ -32,15 +32,15 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/c-ares/c-ares/archive/cares-1_15_0.tar.gz"], ), com_github_circonus_labs_libcircllhist = dict( - sha256 = "9949e2864b8ad00ee5c3e9c1c3c01e51b6b68bb442a919652fc66b9776477987", - strip_prefix = "libcircllhist-fd8a14463739d247b414825cc56ca3946792a3b9", - # 2018-09-17 - urls = ["https://github.com/circonus-labs/libcircllhist/archive/fd8a14463739d247b414825cc56ca3946792a3b9.tar.gz"], + sha256 = "8165aa25e529d7d4b9ae849d3bf30371255a99d6db0421516abcff23214cdc2c", + strip_prefix = "libcircllhist-63a16dd6f2fc7bc841bb17ff92be8318df60e2e1", + # 2019-02-11 + urls = ["https://github.com/circonus-labs/libcircllhist/archive/63a16dd6f2fc7bc841bb17ff92be8318df60e2e1.tar.gz"], ), com_github_cyan4973_xxhash = dict( - sha256 = "19030315f4fc1b4b2cdb9d7a317069a109f90e39d1fe4c9159b7aaa39030eb95", - strip_prefix = "xxHash-0.6.5", - urls = ["https://github.com/Cyan4973/xxHash/archive/v0.6.5.tar.gz"], + sha256 = "b34792646d5e19964bb7bba24f06cb13aecaac623ab91a54da08aa19d3686d7e", + strip_prefix = "xxHash-0.7.0", + urls = ["https://github.com/Cyan4973/xxHash/archive/v0.7.0.tar.gz"], ), com_github_envoyproxy_sqlparser = dict( sha256 = "425dfee0c4fe9aff8acf2365cde3dd2ba7fb878d2ba37562d33920e34c40c05e", @@ -58,9 +58,9 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/fmtlib/fmt/releases/download/5.3.0/fmt-5.3.0.zip"], ), com_github_gabime_spdlog = dict( - sha256 = "78786c641ca278388107e30f1f0fa0307e7e98e1c5279c3d29f71a143f9176b6", - strip_prefix = "spdlog-1.3.0", - urls = ["https://github.com/gabime/spdlog/archive/v1.3.0.tar.gz"], + sha256 = "160845266e94db1d4922ef755637f6901266731c4cb3b30b45bf41efa0e6ab70", + strip_prefix = "spdlog-1.3.1", + urls = ["https://github.com/gabime/spdlog/archive/v1.3.1.tar.gz"], ), com_github_gcovr_gcovr = dict( sha256 = "8a60ba6242d67a58320e9e16630d80448ef6d5284fda5fb3eff927b63c8b04a2", @@ -73,20 +73,33 @@ REPOSITORY_LOCATIONS = dict( # 2018-03-06 urls = ["https://github.com/google/libprotobuf-mutator/archive/c3d2faf04a1070b0b852b0efdef81e1a81ba925e.tar.gz"], ), + com_github_gperftools_gperftools = dict( + # TODO(cmluciano): Bump to release 2.8 + # This sha is specifically chosen to fix ppc64le builds that require inclusion + # of asm/ptrace.h + sha256 = "18574813a062eee487bc1b761e8024a346075a7cb93da19607af362dc09565ef", + strip_prefix = "gperftools-fc00474ddc21fff618fc3f009b46590e241e425e", + urls = ["https://github.com/gperftools/gperftools/archive/fc00474ddc21fff618fc3f009b46590e241e425e.tar.gz"], + ), com_github_grpc_grpc = dict( - sha256 = "a5342629fe1b689eceb3be4d4f167b04c70a84b9d61cf8b555e968bc500bdb5a", - strip_prefix = "grpc-1.16.1", - urls = ["https://github.com/grpc/grpc/archive/v1.16.1.tar.gz"], + sha256 = "ba8b08a697b66e14af35da07753583cf32ff3d14dcd768f91b1bbe2e6c07c349", + strip_prefix = "grpc-1.20.1", + urls = ["https://github.com/grpc/grpc/archive/v1.20.1.tar.gz"], + ), + com_github_luajit_luajit = dict( + sha256 = "409f7fe570d3c16558e594421c47bdd130238323c9d6fd6c83dedd2aaeb082a8", + strip_prefix = "LuaJIT-2.1.0-beta3", + urls = ["https://github.com/LuaJIT/LuaJIT/archive/v2.1.0-beta3.tar.gz"], ), com_github_nanopb_nanopb = dict( - sha256 = "b8dd5cb0d184d424ddfea13ddee3f7b0920354334cbb44df434d55e5f0086b12", - strip_prefix = "nanopb-0.3.9.2", - urls = ["https://github.com/nanopb/nanopb/archive/0.3.9.2.tar.gz"], + sha256 = "5fb4dab0b7f6a239908407fe07c9d03877cd0502abb637e38c41091cb9c1d438", + strip_prefix = "nanopb-0.3.9.3", + urls = ["https://github.com/nanopb/nanopb/archive/0.3.9.3.tar.gz"], ), com_github_nghttp2_nghttp2 = dict( - sha256 = "cb70261634c33dc5adbe780afcfc5dab17838ee303631a02b983c6a217bc16ba", - strip_prefix = "nghttp2-1.35.1", - urls = ["https://github.com/nghttp2/nghttp2/releases/download/v1.35.1/nghttp2-1.35.1.tar.gz"], + sha256 = "fe9a75ec44e3a2e8f7f0cb83ad91e663bbc4c5085baf37b57ee2610846d7cf5d", + strip_prefix = "nghttp2-1.38.0", + urls = ["https://github.com/nghttp2/nghttp2/releases/download/v1.38.0/nghttp2-1.38.0.tar.gz"], ), io_opentracing_cpp = dict( sha256 = "015c4187f7a6426a2b5196f0ccd982aa87f010cf61f507ae3ce5c90523f92301", @@ -116,9 +129,13 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/google/benchmark/archive/505be96ab23056580a3a2315abba048f4428b04e.tar.gz"], ), com_github_libevent_libevent = dict( - sha256 = "316ddb401745ac5d222d7c529ef1eada12f58f6376a66c1118eee803cb70f83d", - strip_prefix = "libevent-release-2.1.8-stable", - urls = ["https://github.com/libevent/libevent/archive/release-2.1.8-stable.tar.gz"], + sha256 = "ab3af422b7e4c6d9276b3637d87edb6cf628fd91c9206260b759778c3a28b330", + # This SHA includes the new "prepare" and "check" watchers, used for event loop performance + # stats (see https://github.com/libevent/libevent/pull/793) and the fix for a race condition + # in the watchers (see https://github.com/libevent/libevent/pull/802). + # TODO(mergeconflict): Update to v2.2 when it is released. + strip_prefix = "libevent-1cd8830de27c30c5324c75bfb6012c969c09ca2c", + urls = ["https://github.com/libevent/libevent/archive/1cd8830de27c30c5324c75bfb6012c969c09ca2c.tar.gz"], ), com_github_madler_zlib = dict( sha256 = "629380c90a77b964d896ed37163f5c3a34f6e6d897311f1df2a7016355c45eff", @@ -147,14 +164,14 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/nodejs/http-parser/archive/v2.9.0.tar.gz"], ), com_github_pallets_jinja = dict( - sha256 = "f84be1bb0040caca4cea721fcbbbbd61f9be9464ca236387158b0feea01914a4", - strip_prefix = "Jinja2-2.10", - urls = ["https://github.com/pallets/jinja/releases/download/2.10/Jinja2-2.10.tar.gz"], + sha256 = "e9baab084b8d84b511c75aca98bba8585041dbe971d5476ee53d9c6eea1b58b3", + strip_prefix = "jinja-2.10.1", + urls = ["https://github.com/pallets/jinja/archive/2.10.1.tar.gz"], ), com_github_pallets_markupsafe = dict( - sha256 = "62f6154071d1ceac8d7dfb5ed7a21dc502cc12e2348c032e5a1cedd018548381", - strip_prefix = "markupsafe-1.1.0/src", - urls = ["https://github.com/pallets/markupsafe/archive/1.1.0.tar.gz"], + sha256 = "222a10e3237d92a9cd45ed5ea882626bc72bc5e0264d3ed0f2c9129fa69fc167", + strip_prefix = "markupsafe-1.1.1/src", + urls = ["https://github.com/pallets/markupsafe/archive/1.1.1.tar.gz"], ), com_github_tencent_rapidjson = dict( sha256 = "bf7ced29704a1e696fbccf2a2b4ea068e7774fa37f6d7dd4039d0787f8bed98e", @@ -184,15 +201,9 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/google/googletest/archive/eb9225ce361affe561592e0912320b9db84985d0.tar.gz"], ), com_google_protobuf = dict( - sha256 = "46f1da3a6a6db66dd240cf95a5553198f7c6e98e6ac942fceb8a1cf03291d96e", - strip_prefix = "protobuf-7492b5681231c79f0265793fa57dc780ae2481d6", - # TODO(htuch): Switch back to released versions for protobuf when a release > 3.6.0 happens - # that includes: - # - https://github.com/protocolbuffers/protobuf/commit/f35669b8d3f46f7f1236bd21f14d744bba251e60 - # - https://github.com/protocolbuffers/protobuf/commit/6a4fec616ec4b20f54d5fb530808b855cb664390 - # - https://github.com/protocolbuffers/protobuf/commit/fa252ec2a54acb24ddc87d48fed1ecfd458445fd - # - https://github.com/protocolbuffers/protobuf/commit/7492b5681231c79f0265793fa57dc780ae2481d6 - urls = ["https://github.com/protocolbuffers/protobuf/archive/7492b5681231c79f0265793fa57dc780ae2481d6.tar.gz"], + sha256 = "c10ef8d8ad5a9e5f850483051b7f9ee2c8bb3ca2e0e16a4cf105bd1321afb2d6", + strip_prefix = "protobuf-3.7.1", + urls = ["https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/protobuf-all-3.7.1.tar.gz"], ), grpc_httpjson_transcoding = dict( sha256 = "dedd76b0169eb8c72e479529301a1d9b914a4ccb4d2b5ddb4ebe92d63a7b2152", @@ -201,22 +212,21 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/grpc-ecosystem/grpc-httpjson-transcoding/archive/64d6ac985360b624d8e95105701b64a3814794cd.tar.gz"], ), com_github_golang_protobuf = dict( - # TODO(sesmith177): Remove this dependency when both: - # 1. There's a release of golang/protobuf that includes - # https://github.com/golang/protobuf/commit/31e0d063dd98c052257e5b69eeb006818133f45c - # 2. That release is included in rules_go - sha256 = "4cbd5303a5cf85791b3c310a50a479027c035d75091bb90c482ba67b0a2cf5b4", - strip_prefix = "protobuf-31e0d063dd98c052257e5b69eeb006818133f45c", - urls = ["https://github.com/golang/protobuf/archive/31e0d063dd98c052257e5b69eeb006818133f45c.tar.gz"], + # TODO(sesmith177): Remove this dependency when: + # 1. There's a release of rules_go that includes golang/protobuf v1.3.1 + sha256 = "3f3a6123054a9847093c119895f1660612f301fe95358f3a6a1a33fd0933e6cf", + strip_prefix = "protobuf-1.3.1", + urls = ["https://github.com/golang/protobuf/archive/v1.3.1.tar.gz"], ), io_bazel_rules_go = dict( - sha256 = "7be7dc01f1e0afdba6c8eb2b43d2fa01c743be1b9273ab1eaf6c233df078d705", - urls = ["https://github.com/bazelbuild/rules_go/releases/download/0.16.5/rules_go-0.16.5.tar.gz"], + sha256 = "91b79f4758fd16f2c6426279ce00c1d2d8577d61c519db39675ed84657e1a95e", + urls = ["https://github.com/bazelbuild/rules_go/releases/download/0.17.4/rules_go-0.17.4.tar.gz"], ), rules_foreign_cc = dict( - sha256 = "78cbd1a8134b2f0ead8e637228d8ac1ac7c0ab3f0fbcd149a85e55330697d9a3", - strip_prefix = "rules_foreign_cc-216ded8acb95d81e312b228dce3c39872c7a7c34", - urls = ["https://github.com/bazelbuild/rules_foreign_cc/archive/216ded8acb95d81e312b228dce3c39872c7a7c34.tar.gz"], + sha256 = "136470a38dcd00c7890230402b43004dc947bf1e3dd0289dd1bd2bfb1e0a3484", + strip_prefix = "rules_foreign_cc-e3f4b5e0bc9dac9cf036616c13de25e6cd5051a2", + # 2019-04-04 + urls = ["https://github.com/bazelbuild/rules_foreign_cc/archive/e3f4b5e0bc9dac9cf036616c13de25e6cd5051a2.tar.gz"], ), six_archive = dict( sha256 = "105f8d68616f8248e24bf0e9372ef04d3cc10104f1980f54d57b2ce73a5ad56a", @@ -230,8 +240,8 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/google/subpar/archive/1.3.0.tar.gz"], ), com_googlesource_quiche = dict( - # Static snapshot of https://quiche.googlesource.com/quiche/+archive/dc5ce1a82e342bfd366a1ccdf2a2717edb46e4ec.tar.gz - sha256 = "ed4aec9af6b251385b720d3a23a22a4264d649806ff95dc0b29dab9f786387a0", - urls = ["https://storage.googleapis.com/quiche-envoy-integration/dc5ce1a82e342bfd366a1ccdf2a2717edb46e4ec.tar.gz"], + # Static snapshot of https://quiche.googlesource.com/quiche/+archive/7bf7c3c358eb954e463bde14ea27444f4bd8ea05.tar.gz + sha256 = "36fe180d532a9ccb18cd32328af5231636c7408104523f9ed5eebbad75f1e039", + urls = ["https://storage.googleapis.com/quiche-envoy-integration/7bf7c3c358eb954e463bde14ea27444f4bd8ea05.tar.gz"], ), ) diff --git a/bazel/target_recipes.bzl b/bazel/target_recipes.bzl index f27dd1a829e5c..50481b4bcf54b 100644 --- a/bazel/target_recipes.bzl +++ b/bazel/target_recipes.bzl @@ -2,8 +2,6 @@ # target in //ci/prebuilt/BUILD to the underlying build recipe in # ci/build_container/build_recipes. TARGET_RECIPES = { - "tcmalloc_and_profiler": "gperftools", - "tcmalloc_debug": "gperftools", - "luajit": "luajit", + "v8": "v8", "wavm_with_llvm": "wavm", } diff --git a/ci/README.md b/ci/README.md index 15fcee177563c..1676ab83ace6f 100644 --- a/ci/README.md +++ b/ci/README.md @@ -2,16 +2,16 @@ Two flavors of Envoy Docker images, based on Ubuntu and Alpine Linux, are built. -## Ubuntu envoy image +## Ubuntu Envoy image The Ubuntu based Envoy Docker image at [`envoyproxy/envoy-build:`](https://hub.docker.com/r/envoyproxy/envoy-build/) is used for CircleCI checks, where `` is specified in [`envoy_build_sha.sh`](https://github.com/envoyproxy/envoy/blob/master/ci/envoy_build_sha.sh). Developers may work with `envoyproxy/envoy-build:latest` to provide a self-contained environment for building Envoy binaries and running tests that reflects the latest built Ubuntu Envoy image. Moreover, the Docker image at [`envoyproxy/envoy:`](https://hub.docker.com/r/envoyproxy/envoy/) is an image that has an Envoy binary at `/usr/local/bin/envoy`. The `` -corresponds to the master commit at which the binary was compiled. Lastly, `envoyproxy/envoy:latest` contains an Envoy +corresponds to the master commit at which the binary was compiled. Lastly, `envoyproxy/envoy-dev:latest` contains an Envoy binary built from the latest tip of master that passed tests. -## Alpine envoy image +## Alpine Envoy image Minimal images based on Alpine Linux allow for quicker deployment of Envoy. Two Alpine based images are built, one with an Envoy binary with debug (`envoyproxy/envoy-alpine-debug`) symbols and one stripped of them (`envoyproxy/envoy-alpine`). @@ -25,7 +25,7 @@ Currently there are three build images: * `envoyproxy/envoy-build` — alias to `envoyproxy/envoy-build-ubuntu`. * `envoyproxy/envoy-build-ubuntu` — based on Ubuntu 16.04 (Xenial) which uses the GCC 5.4 compiler. -We also install and use the clang-7 compiler for some sanitizing runs. +We also install and use the clang-8 compiler for some sanitizing runs. # Building and running tests as a developer @@ -91,8 +91,8 @@ The `./ci/run_envoy_docker.sh './ci/do_ci.sh '` targets are: * `bazel.tsan` — build and run tests under `-c dbg --config=clang-tsan` with clang. * `bazel.compile_time_options` — build Envoy and test with various compile-time options toggled to their non-default state, to ensure they still build. * `bazel.clang_tidy` — build and run clang-tidy over all source files. -* `check_format`— run `clang-format-6.0` and `buildifier` on entire source tree. -* `fix_format`— run and enforce `clang-format-6.0` and `buildifier` on entire source tree. +* `check_format`— run `clang-format` and `buildifier` on entire source tree. +* `fix_format`— run and enforce `clang-format` and `buildifier` on entire source tree. * `check_spelling`— run `misspell` on entire project. * `fix_spelling`— run and enforce `misspell` on entire project. * `check_spelling_pedantic`— run `aspell` on C++ and proto comments. diff --git a/ci/WORKSPACE b/ci/WORKSPACE deleted file mode 100644 index 2f2197641d11f..0000000000000 --- a/ci/WORKSPACE +++ /dev/null @@ -1,31 +0,0 @@ -workspace(name = "ci") - -load("//bazel:repositories.bzl", "GO_VERSION", "envoy_dependencies") -load("//bazel:cc_configure.bzl", "cc_configure") - -# We shouldn't need this, but it's a workaround for https://github.com/bazelbuild/bazel/issues/3580. -local_repository( - name = "envoy", - path = "/source", -) - -envoy_dependencies( - path = "@envoy//ci/prebuilt", -) - -# TODO(htuch): Roll this into envoy_dependencies() -load("@rules_foreign_cc//:workspace_definitions.bzl", "rules_foreign_cc_dependencies") - -rules_foreign_cc_dependencies() - -cc_configure() - -load("@envoy_api//bazel:repositories.bzl", "api_dependencies") - -api_dependencies() - -load("@io_bazel_rules_go//go:def.bzl", "go_register_toolchains", "go_rules_dependencies") - -go_rules_dependencies() - -go_register_toolchains(go_version = GO_VERSION) diff --git a/ci/WORKSPACE.filter.example b/ci/WORKSPACE.filter.example index c528e921e3e68..4eb98345a13f7 100644 --- a/ci/WORKSPACE.filter.example +++ b/ci/WORKSPACE.filter.example @@ -1,25 +1,24 @@ -workspace(name = "envoy") +workspace(name = "envoy_filter_example") local_repository( name = "envoy", path = "/source", ) -load("//bazel:repositories.bzl", "envoy_dependencies", "GO_VERSION") -load("//bazel:cc_configure.bzl", "cc_configure") +load("@envoy//bazel:api_repositories.bzl", "envoy_api_dependencies") +envoy_api_dependencies() + +load("@envoy//bazel:repositories.bzl", "envoy_dependencies", "GO_VERSION") +load("@envoy//bazel:cc_configure.bzl", "cc_configure") + +envoy_dependencies() -envoy_dependencies( - path = "@envoy//ci/prebuilt", -) # TODO(htuch): Roll this into envoy_dependencies() load("@rules_foreign_cc//:workspace_definitions.bzl", "rules_foreign_cc_dependencies") rules_foreign_cc_dependencies() cc_configure() -load("@envoy_api//bazel:repositories.bzl", "api_dependencies") -api_dependencies() - -load("@io_bazel_rules_go//go:def.bzl", "go_rules_dependencies", "go_register_toolchains") +load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies") go_rules_dependencies() go_register_toolchains(go_version = GO_VERSION) diff --git a/ci/build_container/Dockerfile-centos b/ci/build_container/Dockerfile-centos new file mode 100644 index 0000000000000..bbfd259e29908 --- /dev/null +++ b/ci/build_container/Dockerfile-centos @@ -0,0 +1,7 @@ +FROM centos:7 + +COPY ./build_container_common.sh / +COPY ./build_container_centos.sh / + +ENV PATH /opt/rh/rh-git218/root/usr/bin:/opt/rh/devtoolset-7/root/usr/bin:/opt/llvm/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin +RUN ./build_container_centos.sh diff --git a/ci/build_container/Dockerfile-ubuntu b/ci/build_container/Dockerfile-ubuntu index 2d1cf1896736a..1b2586ca2a144 100644 --- a/ci/build_container/Dockerfile-ubuntu +++ b/ci/build_container/Dockerfile-ubuntu @@ -1,11 +1,6 @@ FROM ubuntu:xenial -COPY ./build_and_install_deps.sh ./recipe_wrapper.sh ./Makefile ./build_container_common.sh / -COPY WORKSPACE /bazel-prebuilt/ -COPY ./api /bazel-prebuilt/api -COPY ./bazel /bazel-prebuilt/bazel -COPY ./build_recipes/*.sh /build_recipes/ - +COPY ./build_container_common.sh / COPY ./build_container_ubuntu.sh / RUN ./build_container_ubuntu.sh diff --git a/ci/build_container/build_container_centos.sh b/ci/build_container/build_container_centos.sh new file mode 100755 index 0000000000000..bf45bcc22a658 --- /dev/null +++ b/ci/build_container/build_container_centos.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +set -e + +# Note: rh-git218 is needed to run `git -C` in docs build process. +yum install -y centos-release-scl epel-release +yum update -y +yum install -y devtoolset-7-gcc devtoolset-7-gcc-c++ devtoolset-7-binutils java-1.8.0-openjdk-headless rsync \ + rh-git218 wget unzip which make cmake3 patch ninja-build devtoolset-7-libatomic-devel openssl python27 \ + libtool autoconf tcpdump + +ln -s /usr/bin/cmake3 /usr/bin/cmake +ln -s /usr/bin/ninja-build /usr/bin/ninja + +BAZEL_VERSION="$(curl -s https://api.github.com/repos/bazelbuild/bazel/releases/latest | + python -c "import json, sys; print json.load(sys.stdin)['tag_name']")" +BAZEL_INSTALLER="bazel-${BAZEL_VERSION}-installer-linux-x86_64.sh" +curl -OL "https://github.com/bazelbuild/bazel/releases/download/${BAZEL_VERSION}/${BAZEL_INSTALLER}" +chmod u+x "./${BAZEL_INSTALLER}" +"./${BAZEL_INSTALLER}" +rm "./${BAZEL_INSTALLER}" + +# SLES 11 has older glibc than CentOS 7, so pre-built binary for it works on CentOS 7 +LLVM_VERSION=8.0.0 +LLVM_RELEASE="clang+llvm-${LLVM_VERSION}-x86_64-linux-sles11.3" +curl -OL "https://releases.llvm.org/${LLVM_VERSION}/${LLVM_RELEASE}.tar.xz" +tar Jxf "${LLVM_RELEASE}.tar.xz" +mv "./${LLVM_RELEASE}" /opt/llvm +rm "./${LLVM_RELEASE}.tar.xz" + +# httpd24 is equired by rh-git218 +echo "/opt/rh/httpd24/root/usr/lib64" > /etc/ld.so.conf.d/httpd24.conf +echo "/opt/llvm/lib" > /etc/ld.so.conf.d/llvm.conf +ldconfig + +# Setup tcpdump for non-root. +groupadd pcap +chgrp pcap /usr/sbin/tcpdump +chmod 750 /usr/sbin/tcpdump +setcap cap_net_raw,cap_net_admin=eip /usr/sbin/tcpdump + +./build_container_common.sh diff --git a/ci/build_container/build_container_common.sh b/ci/build_container/build_container_common.sh index e914a2a0eb1cd..4d3218d6482cf 100755 --- a/ci/build_container/build_container_common.sh +++ b/ci/build_container/build_container_common.sh @@ -1,34 +1,8 @@ #!/bin/bash -e # buildifier -VERSION=0.20.0 -SHA256=92c74a3c2331a12f578fcf9c5ace645b7537e1a18f02f91d0fdbb6f0655e8493 +VERSION=0.25.0 +SHA256=6e6aea35b2ea2b4951163f686dfbfe47b49c840c56b873b3a7afe60939772fc1 curl --location --output /usr/local/bin/buildifier https://github.com/bazelbuild/buildtools/releases/download/"$VERSION"/buildifier \ && echo "$SHA256" '/usr/local/bin/buildifier' | sha256sum --check \ && chmod +x /usr/local/bin/buildifier - -# GCC for everything. -export CC=gcc -export CXX=g++ - -export THIRDPARTY_DEPS=/tmp -export THIRDPARTY_SRC=/thirdparty -DEPS=$(python <(cat /bazel-prebuilt/bazel/target_recipes.bzl; \ - echo "print ' '.join(\"${THIRDPARTY_DEPS}/%s.dep\" % r for r in set(TARGET_RECIPES.values()))")) - -# TODO(htuch): We build twice as a workaround for https://github.com/google/protobuf/issues/3322. -# Fix this. This will be gone real soon now. -export THIRDPARTY_BUILD=/thirdparty_build -export CPPFLAGS="-DNDEBUG" -echo "Building opt deps ${DEPS}" -"$(dirname "$0")"/build_and_install_deps.sh ${DEPS} - -echo "Building Bazel-managed deps (//bazel/external:all_external)" -mkdir /bazel-prebuilt-root /bazel-prebuilt-output -BAZEL_OPTIONS="--output_user_root=/bazel-prebuilt-root --output_base=/bazel-prebuilt-output" -cd /bazel-prebuilt -for BAZEL_MODE in opt dbg fastbuild; do - bazel ${BAZEL_OPTIONS} build -c "${BAZEL_MODE}" //bazel/external:all_external -done -# Allow access by non-root for building. -chmod -R a+rX /bazel-prebuilt-root /bazel-prebuilt-output diff --git a/ci/build_container/build_container_ubuntu.sh b/ci/build_container/build_container_ubuntu.sh index a997ac82a54ba..aa27e14f84da4 100755 --- a/ci/build_container/build_container_ubuntu.sh +++ b/ci/build_container/build_container_ubuntu.sh @@ -6,27 +6,33 @@ set -e apt-get update export DEBIAN_FRONTEND=noninteractive apt-get install -y curl wget software-properties-common make cmake git python python-pip python3 python3-pip \ - unzip bc libtool ninja-build automake zip time golang gdb strace wireshark tshark tcpdump + unzip bc libtool automake zip time golang gdb strace wireshark tshark tcpdump lcov apt-transport-https # Install cmake 3.12. curl -sLO https://cmake.org/files/v3.12/cmake-3.12.3-Linux-x86_64.tar.gz echo "0210f500c71af0ee7e8c42da76954298144d5f72f725ea381ae5db7b766b000e cmake-3.12.3-Linux-x86_64.tar.gz" | sha256sum --check tar -zxf cmake-3.12.3-Linux-x86_64.tar.gz -C /usr --strip-components=1 -# clang 7. -curl http://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - -apt-add-repository "deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-7 main" +# Install ninja 1.8.2. +curl -sLO https://github.com/ninja-build/ninja/releases/download/v1.8.2/ninja-linux.zip +echo "d2fea9ff33b3ef353161ed906f260d565ca55b8ca0568fa07b1d2cab90a84a07 ninja-linux.zip" | sha256sum --check +unzip ninja-linux.zip && mv ninja /usr/bin +# clang 8. +curl https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - +apt-add-repository "deb https://apt.llvm.org/xenial/ llvm-toolchain-xenial-8 main" apt-get update -apt-get install -y clang-7 clang-format-7 clang-tidy-7 lld-7 libc++-7-dev libc++abi-7-dev +apt-get install -y clang-8 clang-format-8 clang-tidy-8 lld-8 libc++-8-dev libc++abi-8-dev # gcc-7 add-apt-repository -y ppa:ubuntu-toolchain-r/test apt update apt install -y g++-7 update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-7 1000 update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-7 1000 +update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-7 1000 update-alternatives --config gcc update-alternatives --config g++ +update-alternatives --config gcov # Bazel and related dependencies. apt-get install -y openjdk-8-jdk -echo "deb [arch=amd64] http://storage.googleapis.com/bazel-apt stable jdk1.8" | tee /etc/apt/sources.list.d/bazel.list +echo "deb [arch=amd64] https://storage.googleapis.com/bazel-apt stable jdk1.8" | tee /etc/apt/sources.list.d/bazel.list curl https://bazel.build/bazel-release.pub.gpg | apt-key add - apt-get update apt-get install -y bazel diff --git a/ci/build_container/build_recipes/gperftools.sh b/ci/build_container/build_recipes/gperftools.sh deleted file mode 100755 index a6db1f5066a6e..0000000000000 --- a/ci/build_container/build_recipes/gperftools.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -set -e - -if [[ "${OS}" == "Windows_NT" ]]; then - exit 0 -fi - -VERSION=2.7 -SHA256=1ee8c8699a0eff6b6a203e59b43330536b22bbcbe6448f54c7091e5efb0763c9 - -curl https://github.com/gperftools/gperftools/releases/download/gperftools-"$VERSION"/gperftools-"$VERSION".tar.gz -sLo gperftools-"$VERSION".tar.gz \ - && echo "$SHA256" gperftools-"$VERSION".tar.gz | sha256sum --check -tar xf gperftools-"$VERSION".tar.gz -cd gperftools-"$VERSION" - -export LDFLAGS="${LDFLAGS} -lpthread" -./configure --prefix="$THIRDPARTY_BUILD" --enable-shared=no --enable-frame-pointers --disable-libunwind - -# Don't build tests, since malloc_extension_c_test hardcodes -lstdc++, which breaks build when linking against libc++. -make V=1 install-libLTLIBRARIES install-perftoolsincludeHEADERS diff --git a/ci/build_container/build_recipes/luajit.sh b/ci/build_container/build_recipes/luajit.sh deleted file mode 100644 index 0484631212bcd..0000000000000 --- a/ci/build_container/build_recipes/luajit.sh +++ /dev/null @@ -1,70 +0,0 @@ -#!/bin/bash - -set -e - -VERSION=2.1.0-beta3 -SHA256=409f7fe570d3c16558e594421c47bdd130238323c9d6fd6c83dedd2aaeb082a8 -if [[ "${OS}" == "Windows_NT" ]]; then - exit 0 -fi - -curl https://github.com/LuaJIT/LuaJIT/archive/v"$VERSION".tar.gz -sLo LuaJIT-"$VERSION".tar.gz \ - && echo "$SHA256" LuaJIT-"$VERSION".tar.gz | sha256sum --check -tar xf LuaJIT-"$VERSION".tar.gz - -# Fixup Makefile with things that cannot be set via env var. -cat > luajit_make.diff << 'EOF' -diff --git a/src/Makefile b/src/Makefile -index f56465d..3f4f2fa 100644 ---- a/src/Makefile -+++ b/src/Makefile -@@ -27,7 +27,7 @@ NODOTABIVER= 51 - DEFAULT_CC = gcc - # - # LuaJIT builds as a native 32 or 64 bit binary by default. --CC= $(DEFAULT_CC) -+CC ?= $(DEFAULT_CC) - # - # Use this if you want to force a 32 bit build on a 64 bit multilib OS. - #CC= $(DEFAULT_CC) -m32 -@@ -71,10 +71,10 @@ CCWARN= -Wall - # as dynamic mode. - # - # Mixed mode creates a static + dynamic library and a statically linked luajit. --BUILDMODE= mixed -+#BUILDMODE= mixed - # - # Static mode creates a static library and a statically linked luajit. --#BUILDMODE= static -+BUILDMODE= static - # - # Dynamic mode creates a dynamic library and a dynamically linked luajit. - # Note: this executable will only run when the library is installed! -@@ -99,7 +99,7 @@ XCFLAGS= - # enabled by default. Some other features that *might* break some existing - # code (e.g. __pairs or os.execute() return values) can be enabled here. - # Note: this does not provide full compatibility with Lua 5.2 at this time. --#XCFLAGS+= -DLUAJIT_ENABLE_LUA52COMPAT -+XCFLAGS+= -DLUAJIT_ENABLE_LUA52COMPAT - # - # Disable the JIT compiler, i.e. turn LuaJIT into a pure interpreter. - #XCFLAGS+= -DLUAJIT_DISABLE_JIT -@@ -587,7 +587,7 @@ endif - - Q= @ - E= @echo --#Q= -+Q= - #E= @: - - ############################################################################## -EOF - -cd LuaJIT-"$VERSION" -patch -p1 < ../luajit_make.diff - -# Default MACOSX_DEPLOYMENT_TARGET is 10.4, which will fail the build at link time on macOS 10.14: -# ld: library not found for -lgcc_s.10.4 -# This doesn't affect other platforms -MACOSX_DEPLOYMENT_TARGET=10.6 DEFAULT_CC=${CC} TARGET_CFLAGS=${CFLAGS} TARGET_LDFLAGS=${CFLAGS} \ - CFLAGS="" make V=1 PREFIX="$THIRDPARTY_BUILD" install diff --git a/ci/build_container/build_recipes/v8.sh b/ci/build_container/build_recipes/v8.sh new file mode 100755 index 0000000000000..52380a8c4d3cb --- /dev/null +++ b/ci/build_container/build_recipes/v8.sh @@ -0,0 +1,116 @@ +#!/bin/bash + +set -e + +# Get wasm-c-api. + +COMMIT=111a3e4a0962fae4da2428b8680f7dfbc8deef47 # Mon May 13 11:10:04 2019 +0200 +SHA256=4eb700586902d0f6ebdcbc0147f5674df95743cc831495191b7df4cb32fb3ef0 + +curl https://github.com/WebAssembly/wasm-c-api/archive/"$COMMIT".tar.gz -sLo wasm-c-api-"$COMMIT".tar.gz \ + && echo "$SHA256" wasm-c-api-"$COMMIT".tar.gz | sha256sum --check +tar xf wasm-c-api-"$COMMIT".tar.gz +cd wasm-c-api-"$COMMIT" + +# Build v8 inside v8 subdirectory to match wasm-c-api's Makefile. + +mkdir v8 +cd v8 + +# Get depot_tools. + +git clone https://chromium.googlesource.com/chromium/tools/depot_tools.git +export PATH="$PATH:$PWD/depot_tools" + +# Get v8. + +VERSION=7.4.288.28 # match wasm-c-api (branch-heads/7.4) + +fetch v8 +cd v8 +git checkout "$VERSION" +gclient sync + +# Patch v8 for wasm-c-api. + +patch -p1 < ../../patch/0001-BUILD.gn-add-wasm-v8-lowlevel.patch +cp -p ../../src/wasm-v8-lowlevel.cc src/wasm-v8-lowlevel.cc +cp -p ../../src/wasm-v8-lowlevel.hh include/wasm-v8-lowlevel.hh + +# Build v8 static library. + +tools/dev/v8gen.py x64.release -- v8_monolithic=true v8_use_external_startup_data=false v8_enable_i18n_support=false v8_enable_gdbjit=false use_custom_libcxx=false +ninja -v -C out.gn/x64.release v8_monolith + +# Install v8. + +mkdir -p "$THIRDPARTY_BUILD/include/v8/libplatform" +cp -p include/v8*.h "$THIRDPARTY_BUILD/include/v8/" +cp -p include/libplatform/*.h "$THIRDPARTY_BUILD/include/v8/libplatform/" +cp -p out.gn/x64.release/obj/libv8_monolith.a "$THIRDPARTY_BUILD/lib/" + +cd ../.. + +# Patch wasm-c-api. + +# 1. Disable DEBUG (alloc/free accounting), since it seems to be broken +# in optimized builds and/or when using sanitizers. +# 2. Disable hardcoded sanitizers. + +cat <<\EOF | patch -p1 +--- a/Makefile ++++ b/Makefile +@@ -7,10 +7,10 @@ V8_VERSION = branch-heads/7.4 + V8_ARCH = x64 + V8_MODE = release + +-WASM_FLAGS = -DDEBUG # -DDEBUG_LOG +-C_FLAGS = ${WASM_FLAGS} -Wall -Werror -ggdb -O -fsanitize=address ++WASM_FLAGS = ++C_FLAGS = ${WASM_FLAGS} -Wall -Werror -ggdb -O + CC_FLAGS = ${C_FLAGS} +-LD_FLAGS = -fsanitize-memory-track-origins -fsanitize-memory-use-after-dtor ++LD_FLAGS = + + C_COMP = clang +EOF + +# 3. Enable "wasm_bulk_memory" required to load WASM modules with DataCount +# section, even when DataCount = 1. +# 4. Force full GC when destroying VMs. + +cat <<\EOF | patch -p1 +--- a/src/wasm-v8.cc ++++ b/src/wasm-v8.cc +@@ -296,7 +296,7 @@ auto Engine::make(own&& config) -> own { + v8::internal::FLAG_experimental_wasm_bigint = true; + v8::internal::FLAG_experimental_wasm_mv = true; + // v8::internal::FLAG_experimental_wasm_anyref = true; +- // v8::internal::FLAG_experimental_wasm_bulk_memory = true; ++ v8::internal::FLAG_experimental_wasm_bulk_memory = true; + // v8::V8::SetFlagsFromCommandLine(&argc, const_cast(argv), false); + auto engine = new(std::nothrow) EngineImpl; + if (!engine) return own(); +@@ -349,7 +349,7 @@ public: + } + + ~StoreImpl() { +-#ifdef DEBUG ++#if 1 //def DEBUG + isolate_->RequestGarbageCollectionForTesting( + v8::Isolate::kFullGarbageCollection); + #endif +EOF + +# Build wasm-c-api. + +# TODO(PiotrSikora): respect CC/CXX/CFLAGS/CXXFLAGS/LDFLAGS upstream. + +make wasm + +# Install wasm-c-api. + +mkdir -p "$THIRDPARTY_BUILD/include/wasm-c-api" +cp -p include/wasm.hh "$THIRDPARTY_BUILD/include/wasm-c-api/" +cp -p src/wasm-bin.hh "$THIRDPARTY_BUILD/include/wasm-c-api/" +ar -r -c -s "$THIRDPARTY_BUILD/lib/libwasm.a" out/wasm-bin.o out/wasm-v8.o diff --git a/ci/build_container/build_recipes/wavm.sh b/ci/build_container/build_recipes/wavm.sh index 21298189172fc..779dc247cf825 100755 --- a/ci/build_container/build_recipes/wavm.sh +++ b/ci/build_container/build_recipes/wavm.sh @@ -31,8 +31,8 @@ cp -pR include/ "$THIRDPARTY_BUILD"/lib/llvm-6.0/include/ # WAVM. -COMMIT=275e15fff5928d5cac3b8c5410c5a3b3fa7168ac # Wed Feb 20 16:13:27 2019 -0500 -SHA256=1e8306322c7de31ecb6cceb198ff1a3f37e31702402235f242a4daf68b54242f +COMMIT=0c764ba09fd9aaa31645761001c64fdb4d13b4ae # Tue Apr 30 06:35:31 2019 -0400 +SHA256=abf650ec5a53d18150ed7025277dcd1b076a89bed29eff73345cb308df3926b5 curl https://github.com/WAVM/WAVM/archive/"$COMMIT".tar.gz -sLo WAVM-"$COMMIT".tar.gz \ && echo "$SHA256" WAVM-"$COMMIT".tar.gz | sha256sum --check diff --git a/ci/build_container/docker_build.sh b/ci/build_container/docker_build.sh index c5c7a7915f97f..eb539b7c4f0da 100755 --- a/ci/build_container/docker_build.sh +++ b/ci/build_container/docker_build.sh @@ -3,8 +3,4 @@ [[ -z "${LINUX_DISTRO}" ]] && LINUX_DISTRO="ubuntu" [[ -z "${IMAGE_NAME}" ]] && IMAGE_NAME=envoyproxy/envoy-build-"${LINUX_DISTRO}" -# We need //bazel/... and WORKSPACE for the build, but it's not in ci/build_container. Using Docker -# relative path workaround from https://github.com/docker/docker/issues/2745#issuecomment-253230025 -# to get this to work. -tar cf - . -C ../../ bazel WORKSPACE api \ - | docker build -f Dockerfile-${LINUX_DISTRO} -t ${IMAGE_NAME}:$CIRCLE_SHA1 - +docker build -f Dockerfile-${LINUX_DISTRO} -t ${IMAGE_NAME}:$CIRCLE_SHA1 . diff --git a/ci/build_container/docker_push.sh b/ci/build_container/docker_push.sh index 72523f5ffe7b0..57d8f1290f960 100755 --- a/ci/build_container/docker_push.sh +++ b/ci/build_container/docker_push.sh @@ -27,7 +27,7 @@ then cd ci/build_container docker login -u "$DOCKERHUB_USERNAME" -p "$DOCKERHUB_PASSWORD" - for distro in ubuntu + for distro in ubuntu centos do echo "Updating envoyproxy/envoy-build-${distro} image" LINUX_DISTRO=$distro ./docker_build.sh diff --git a/ci/build_setup.sh b/ci/build_setup.sh index e586191606967..beef261cc69e1 100755 --- a/ci/build_setup.sh +++ b/ci/build_setup.sh @@ -17,10 +17,10 @@ function setup_gcc_toolchain() { } function setup_clang_toolchain() { - export PATH=/usr/lib/llvm-7/bin:$PATH + export PATH=/usr/lib/llvm-8/bin:$PATH export CC=clang export CXX=clang++ - export ASAN_SYMBOLIZER_PATH=/usr/lib/llvm-7/bin/llvm-symbolizer + export ASAN_SYMBOLIZER_PATH=/usr/lib/llvm-8/bin/llvm-symbolizer echo "$CC/$CXX toolchain configured" } @@ -59,26 +59,22 @@ rm -f "${SENTINEL}" export USER=bazel export TEST_TMPDIR=/build/tmp export BAZEL="bazel" + +if [[ -f "/etc/redhat-release" ]] +then + export BAZEL_BUILD_EXTRA_OPTIONS="--copt=-DENVOY_IGNORE_GLIBCXX_USE_CXX11_ABI_ERROR=1 --action_env=PATH ${BAZEL_BUILD_EXTRA_OPTIONS}" +else + export BAZEL_BUILD_EXTRA_OPTIONS="--action_env=PATH=/bin:/usr/bin:/usr/lib/llvm-8/bin --linkopt=-fuse-ld=lld ${BAZEL_BUILD_EXTRA_OPTIONS}" +fi + # Not sandboxing, since non-privileged Docker can't do nested namespaces. -BAZEL_OPTIONS="--package_path %workspace%:${ENVOY_SRCDIR}" export BAZEL_QUERY_OPTIONS="${BAZEL_OPTIONS}" export BAZEL_BUILD_OPTIONS="--strategy=Genrule=standalone --spawn_strategy=standalone \ --verbose_failures ${BAZEL_OPTIONS} --action_env=HOME --action_env=PYTHONUSERBASE \ --jobs=${NUM_CPUS} --show_task_finish --experimental_generate_json_trace_profile ${BAZEL_BUILD_EXTRA_OPTIONS}" export BAZEL_TEST_OPTIONS="${BAZEL_BUILD_OPTIONS} --test_env=HOME --test_env=PYTHONUSERBASE \ - --test_env=UBSAN_OPTIONS=print_stacktrace=1 \ --cache_test_results=no --test_output=all ${BAZEL_EXTRA_TEST_OPTIONS}" [[ "${BAZEL_EXPUNGE}" == "1" ]] && "${BAZEL}" clean --expunge -ln -sf /thirdparty "${ENVOY_SRCDIR}"/ci/prebuilt -ln -sf /thirdparty_build "${ENVOY_SRCDIR}"/ci/prebuilt - -# Replace the existing Bazel output cache with a copy of the image's prebuilt deps. -if [[ -d /bazel-prebuilt-output && ! -d "${TEST_TMPDIR}/_bazel_${USER}" ]]; then - BAZEL_OUTPUT_BASE="$(bazel info output_base)" - mkdir -p "${TEST_TMPDIR}/_bazel_${USER}/install" - rsync -a /bazel-prebuilt-root/install/* "${TEST_TMPDIR}/_bazel_${USER}/install/" - rsync -a /bazel-prebuilt-output "${BAZEL_OUTPUT_BASE}" -fi if [ "$1" != "-nofetch" ]; then # Setup Envoy consuming project. @@ -86,7 +82,7 @@ if [ "$1" != "-nofetch" ]; then then git clone https://github.com/envoyproxy/envoy-filter-example.git "${ENVOY_FILTER_EXAMPLE_SRCDIR}" fi - + # This is the hash on https://github.com/envoyproxy/envoy-filter-example.git we pin to. (cd "${ENVOY_FILTER_EXAMPLE_SRCDIR}" && git fetch origin && git checkout -f 6c0625cb4cc9a21df97cef2a1d065463f2ae81ae) cp -f "${ENVOY_SRCDIR}"/ci/WORKSPACE.filter.example "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/WORKSPACE @@ -95,7 +91,6 @@ fi # Also setup some space for building Envoy standalone. export ENVOY_BUILD_DIR="${BUILD_DIR}"/envoy mkdir -p "${ENVOY_BUILD_DIR}" -cp -f "${ENVOY_SRCDIR}"/ci/WORKSPACE "${ENVOY_BUILD_DIR}" # This is where we copy build deliverables to. export ENVOY_DELIVERY_DIR="${ENVOY_BUILD_DIR}"/source/exe @@ -113,29 +108,17 @@ mkdir -p "${ENVOY_FAILED_TEST_LOGS}" export ENVOY_BUILD_PROFILE="${ENVOY_BUILD_DIR}"/generated/build-profile mkdir -p "${ENVOY_BUILD_PROFILE}" -# This is where we build for bazel.release* and bazel.dev. -export ENVOY_CI_DIR="${ENVOY_SRCDIR}"/ci - function cleanup() { # Remove build artifacts. This doesn't mess with incremental builds as these # are just symlinks. rm -rf "${ENVOY_SRCDIR}"/bazel-* - rm -rf "${ENVOY_CI_DIR}"/bazel-* - rm -rf "${ENVOY_CI_DIR}"/bazel - rm -rf "${ENVOY_CI_DIR}"/tools - rm -f "${ENVOY_CI_DIR}"/.bazelrc } cleanup trap cleanup EXIT -# Hack due to https://github.com/envoyproxy/envoy/issues/838 and the need to have -# .bazelrc available for build linkstamping. mkdir -p "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/bazel -mkdir -p "${ENVOY_CI_DIR}"/bazel ln -sf "${ENVOY_SRCDIR}"/bazel/get_workspace_status "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/bazel/ -ln -sf "${ENVOY_SRCDIR}"/bazel/get_workspace_status "${ENVOY_CI_DIR}"/bazel/ cp -f "${ENVOY_SRCDIR}"/.bazelrc "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/ -cp -f "${ENVOY_SRCDIR}"/.bazelrc "${ENVOY_CI_DIR}"/ export BUILDIFIER_BIN="/usr/local/bin/buildifier" diff --git a/ci/do_ci.sh b/ci/do_ci.sh index e85a6eba21b33..bab2fa9e8b035 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -7,12 +7,13 @@ set -e build_setup_args="" if [[ "$1" == "fix_format" || "$1" == "check_format" || "$1" == "check_repositories" || \ "$1" == "check_spelling" || "$1" == "fix_spelling" || "$1" == "bazel.clang_tidy" || \ - "$1" == "check_spelling_pedantic" || "$1" == "fix_spelling_pedantic" ]]; then + "$1" == "check_spelling_pedantic" || "$1" == "fix_spelling_pedantic" || "$1" == "bazel.compile_time_options" ]]; then build_setup_args="-nofetch" fi . "$(dirname "$0")"/setup_gcs_cache.sh . "$(dirname "$0")"/build_setup.sh $build_setup_args +cd "${ENVOY_SRCDIR}" echo "building using ${NUM_CPUS} CPUs" @@ -27,11 +28,12 @@ function bazel_with_collection() { if [ "${BAZEL_STATUS}" != "0" ] then declare -r FAILED_TEST_LOGS="$(grep " /build.*test.log" "${BAZEL_OUTPUT}" | sed -e 's/ \/build.*\/testlogs\/\(.*\)/\1/')" - cd bazel-testlogs + pushd bazel-testlogs for f in ${FAILED_TEST_LOGS} do cp --parents -f $f "${ENVOY_FAILED_TEST_LOGS}" done + popd exit "${BAZEL_STATUS}" fi collect_build_profile $1 @@ -39,13 +41,12 @@ function bazel_with_collection() { function bazel_release_binary_build() { echo "Building..." - cd "${ENVOY_CI_DIR}" bazel build ${BAZEL_BUILD_OPTIONS} -c opt //source/exe:envoy-static collect_build_profile release_build # Copy the envoy-static binary somewhere that we can access outside of the # container. cp -f \ - "${ENVOY_CI_DIR}"/bazel-bin/source/exe/envoy-static \ + "${ENVOY_SRCDIR}"/bazel-bin/source/exe/envoy-static \ "${ENVOY_DELIVERY_DIR}"/envoy # TODO(mattklein123): Replace this with caching and a different job which creates images. @@ -58,26 +59,17 @@ function bazel_release_binary_build() { function bazel_debug_binary_build() { echo "Building..." - cd "${ENVOY_CI_DIR}" bazel build ${BAZEL_BUILD_OPTIONS} -c dbg //source/exe:envoy-static collect_build_profile debug_build # Copy the envoy-static binary somewhere that we can access outside of the # container. cp -f \ - "${ENVOY_CI_DIR}"/bazel-bin/source/exe/envoy-static \ + "${ENVOY_SRCDIR}"/bazel-bin/source/exe/envoy-static \ "${ENVOY_DELIVERY_DIR}"/envoy-debug } if [[ "$1" == "bazel.release" ]]; then - # The release build step still runs during tag events. Avoid rebuilding for no reason. - # TODO(mattklein123): Consider moving this into its own "build". - if [[ -n "$CIRCLE_TAG" ]] - then - echo 'Ignoring build for git tag event' - exit 0 - fi - - setup_gcc_toolchain + setup_clang_toolchain echo "bazel release build with tests..." bazel_release_binary_build @@ -92,52 +84,47 @@ if [[ "$1" == "bazel.release" ]]; then # We have various test binaries in the test directory such as tools, benchmarks, etc. We # run a build pass to make sure they compile. - # Reduce the amount of memory and number of cores Bazel tries to use to - # prevent it from launching too many subprocesses. This should prevent the - # system from running out of memory and killing tasks. See discussion on - # https://github.com/envoyproxy/envoy/pull/5611. - # TODO(akonradi): use --local_cpu_resources flag once Bazel has a release - # after 0.21. - [ -z "$CIRCLECI" ] || export BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --local_resources=12288,5,1" - [ -z "$CIRCLECI" ] || export BAZEL_TEST_OPTIONS="${BAZEL_TEST_OPTIONS} --local_resources=12288,5,1 --local_test_jobs=8" - bazel build ${BAZEL_BUILD_OPTIONS} -c opt //include/... //source/... //test/... # Now run all of the tests which should already be compiled. bazel_with_collection test ${BAZEL_TEST_OPTIONS} -c opt //test/... fi exit 0 elif [[ "$1" == "bazel.release.server_only" ]]; then - setup_gcc_toolchain + setup_clang_toolchain echo "bazel release build..." bazel_release_binary_build exit 0 elif [[ "$1" == "bazel.debug" ]]; then - setup_gcc_toolchain + setup_clang_toolchain echo "bazel debug build with tests..." bazel_debug_binary_build echo "Testing..." bazel test ${BAZEL_TEST_OPTIONS} -c dbg //test/... exit 0 elif [[ "$1" == "bazel.debug.server_only" ]]; then - setup_gcc_toolchain + setup_clang_toolchain echo "bazel debug build..." bazel_debug_binary_build exit 0 elif [[ "$1" == "bazel.asan" ]]; then setup_clang_toolchain - echo "bazel ASAN/UBSAN debug build with tests..." - cd "${ENVOY_FILTER_EXAMPLE_SRCDIR}" - echo "Building and testing..." - bazel_with_collection test ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-asan @envoy//test/... \ + echo "bazel ASAN/UBSAN debug build with tests" + echo "Building and testing envoy tests..." + bazel_with_collection test ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-asan //test/... + echo "Building and testing envoy-filter-example tests..." + pushd "${ENVOY_FILTER_EXAMPLE_SRCDIR}" + bazel_with_collection test ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-asan \ //:echo2_integration_test //:envoy_binary_test + popd # Also validate that integration test traffic tapping (useful when debugging etc.) # works. This requires that we set TAP_PATH. We do this under bazel.asan to # ensure a debug build in CI. + echo "Validating integration test traffic tapping..." TAP_TMP=/tmp/tap/ rm -rf "${TAP_TMP}" mkdir -p "${TAP_TMP}" bazel_with_collection test ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-asan \ - @envoy//test/extensions/transport_sockets/tls/integration:ssl_integration_test \ + //test/extensions/transport_sockets/tls/integration:ssl_integration_test \ --test_env=TAP_PATH="${TAP_TMP}/tap" # Verify that some pb_text files have been created. We can't check for pcap, # since tcpdump is not available in general due to CircleCI lack of support @@ -146,23 +133,24 @@ elif [[ "$1" == "bazel.asan" ]]; then exit 0 elif [[ "$1" == "bazel.tsan" ]]; then setup_clang_toolchain - echo "bazel TSAN debug build with tests..." + echo "bazel TSAN debug build with tests" + echo "Building and testing envoy tests..." + bazel_with_collection test ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-tsan //test/... + echo "Building and testing envoy-filter-example tests..." cd "${ENVOY_FILTER_EXAMPLE_SRCDIR}" - echo "Building and testing..." - bazel_with_collection test ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-tsan @envoy//test/... \ + bazel_with_collection test ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-tsan \ //:echo2_integration_test //:envoy_binary_test exit 0 elif [[ "$1" == "bazel.dev" ]]; then setup_clang_toolchain # This doesn't go into CI but is available for developer convenience. echo "bazel fastbuild build with tests..." - cd "${ENVOY_CI_DIR}" echo "Building..." bazel build ${BAZEL_BUILD_OPTIONS} -c fastbuild //source/exe:envoy-static # Copy the envoy-static binary somewhere that we can access outside of the # container for developers. cp -f \ - "${ENVOY_CI_DIR}"/bazel-bin/source/exe/envoy-static \ + "${ENVOY_SRCDIR}"/bazel-bin/source/exe/envoy-static \ "${ENVOY_DELIVERY_DIR}"/envoy-fastbuild echo "Building and testing..." bazel test ${BAZEL_TEST_OPTIONS} -c fastbuild //test/... @@ -170,6 +158,7 @@ elif [[ "$1" == "bazel.dev" ]]; then elif [[ "$1" == "bazel.compile_time_options" ]]; then # Right now, none of the available compile-time options conflict with each other. If this # changes, this build type may need to be broken up. + # TODO(mpwarres): remove quiche=enabled once QUICHE is built by default. COMPILE_TIME_OPTIONS="\ --config libc++ \ --define signal_trace=disabled \ @@ -177,13 +166,13 @@ elif [[ "$1" == "bazel.compile_time_options" ]]; then --define google_grpc=disabled \ --define boringssl=fips \ --define log_debug_assert_in_release=enabled \ - --define tcmalloc=debug \ + --define quiche=enabled \ + --define path_normalization_by_default=true \ " setup_clang_toolchain # This doesn't go into CI but is available for developer convenience. echo "bazel with different compiletime options build with tests..." # Building all the dependencies from scratch to link them against libc++. - cd "${ENVOY_SRCDIR}" echo "Building..." bazel build ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c dbg //source/exe:envoy-static echo "Building and testing..." @@ -209,13 +198,11 @@ elif [[ "$1" == "bazel.ipv6_tests" ]]; then setup_clang_toolchain echo "Testing..." - cd "${ENVOY_CI_DIR}" bazel_with_collection test ${BAZEL_TEST_OPTIONS} --test_env=ENVOY_IP_TEST_VERSIONS=v6only -c fastbuild \ //test/integration/... //test/common/network/... exit 0 elif [[ "$1" == "bazel.api" ]]; then setup_clang_toolchain - cd "${ENVOY_CI_DIR}" echo "Building API..." bazel build ${BAZEL_BUILD_OPTIONS} -c fastbuild @envoy_api//envoy/... echo "Testing API..." @@ -228,13 +215,9 @@ elif [[ "$1" == "bazel.coverage" ]]; then # gcovr is a pain to run with `bazel run`, so package it up into a # relocatable and hermetic-ish .par file. - cd "${ENVOY_SRCDIR}" bazel build @com_github_gcovr_gcovr//:gcovr.par - export GCOVR="${ENVOY_SRCDIR}/bazel-bin/external/com_github_gcovr_gcovr/gcovr.par" - - export GCOVR_DIR="${ENVOY_BUILD_DIR}/bazel-envoy" - export TESTLOGS_DIR="${ENVOY_BUILD_DIR}/bazel-testlogs" - export WORKSPACE=ci + export GCOVR="/tmp/gcovr.par" + cp -f "${ENVOY_SRCDIR}/bazel-bin/external/com_github_gcovr_gcovr/gcovr.par" ${GCOVR} # Reduce the amount of memory and number of cores Bazel tries to use to # prevent it from launching too many subprocesses. This should prevent the @@ -244,23 +227,12 @@ elif [[ "$1" == "bazel.coverage" ]]; then # after 0.21. [ -z "$CIRCLECI" ] || export BAZEL_TEST_OPTIONS="${BAZEL_TEST_OPTIONS} --local_resources=12288,4,1" - # There is a bug in gcovr 3.3, where it takes the -r path, - # in our case /source, and does a regex replacement of various - # source file paths during HTML generation. It attempts to strip - # out the prefix (e.g. /source), but because it doesn't do a match - # and only strip at the start of the string, it removes /source from - # the middle of the string, corrupting the path. The workaround is - # to point -r in the gcovr invocation in run_envoy_bazel_coverage.sh at - # some Bazel created symlinks to the source directory in its output - # directory. Wow. - cd "${ENVOY_BUILD_DIR}" - SRCDIR="${GCOVR_DIR}" "${ENVOY_SRCDIR}"/test/run_envoy_bazel_coverage.sh + test/run_envoy_bazel_coverage.sh collect_build_profile coverage exit 0 elif [[ "$1" == "bazel.clang_tidy" ]]; then setup_clang_toolchain - cd "${ENVOY_CI_DIR}" - ./run_clang_tidy.sh + ci/run_clang_tidy.sh exit 0 elif [[ "$1" == "bazel.coverity" ]]; then # Coverity Scan version 2017.07 fails to analyze the entirely of the Envoy @@ -270,7 +242,6 @@ elif [[ "$1" == "bazel.coverity" ]]; then setup_gcc_toolchain echo "bazel Coverity Scan build" echo "Building..." - cd "${ENVOY_CI_DIR}" /build/cov-analysis/bin/cov-build --dir "${ENVOY_BUILD_DIR}"/cov-int bazel build --action_env=LD_PRELOAD ${BAZEL_BUILD_OPTIONS} \ -c opt //source/exe:envoy-static # tar up the coverity results @@ -282,11 +253,10 @@ elif [[ "$1" == "bazel.coverity" ]]; then exit 0 elif [[ "$1" == "fix_format" ]]; then echo "fix_format..." - cd "${ENVOY_SRCDIR}" ./tools/check_format.py fix + ./tools/format_python_tools.sh fix exit 0 elif [[ "$1" == "check_format" ]]; then - cd "${ENVOY_SRCDIR}" echo "check_format_test..." ./tools/check_format_test_helper.py --log=WARN echo "check_format..." @@ -294,27 +264,22 @@ elif [[ "$1" == "check_format" ]]; then ./tools/format_python_tools.sh check exit 0 elif [[ "$1" == "check_repositories" ]]; then - cd "${ENVOY_SRCDIR}" echo "check_repositories..." ./tools/check_repositories.sh exit 0 elif [[ "$1" == "check_spelling" ]]; then - cd "${ENVOY_SRCDIR}" echo "check_spelling..." ./tools/check_spelling.sh check exit 0 elif [[ "$1" == "fix_spelling" ]];then - cd "${ENVOY_SRCDIR}" echo "fix_spell..." ./tools/check_spelling.sh fix exit 0 elif [[ "$1" == "check_spelling_pedantic" ]]; then - cd "${ENVOY_SRCDIR}" echo "check_spelling_pedantic..." ./tools/check_spelling_pedantic.py check exit 0 elif [[ "$1" == "fix_spelling_pedantic" ]]; then - cd "${ENVOY_SRCDIR}" echo "fix_spelling_pedantic..." ./tools/check_spelling_pedantic.py fix exit 0 diff --git a/ci/do_coverity_local.sh b/ci/do_coverity_local.sh index a7a7776c7e0c5..fa00c27ece646 100755 --- a/ci/do_coverity_local.sh +++ b/ci/do_coverity_local.sh @@ -46,7 +46,7 @@ then --form file=@"${COVERITY_OUTPUT_FILE}" \ --form version="${ENVOY_BUILD_SHA}" \ --form description="Envoy Proxy Build ${ENVOY_BUILD_SHA}" \ - https://scan.coverity.com/builds?project=Envoy+Proxy + https://scan.coverity.com/projects/envoy-proxy else echo "Coverity Scan output file appears to be too small." echo "Not submitting build for analysis." diff --git a/ci/docker_build.sh b/ci/docker_build.sh new file mode 100755 index 0000000000000..b16eef309c086 --- /dev/null +++ b/ci/docker_build.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +set -ex + +docker build -f ci/Dockerfile-envoy-image -t envoyproxy/envoy-dev:latest . +docker build -f ci/Dockerfile-envoy-alpine -t envoyproxy/envoy-alpine-dev:latest . +docker build -f ci/Dockerfile-envoy-alpine-debug -t envoyproxy/envoy-alpine-debug-dev:latest . diff --git a/ci/docker_push.sh b/ci/docker_push.sh index 8f27a60b06221..e8bb38b1bb9f6 100755 --- a/ci/docker_push.sh +++ b/ci/docker_push.sh @@ -1,50 +1,30 @@ -#!/bin/bash +#!/bin/sh # Do not ever set -x here, it is a security hazard as it will place the credentials below in the # CircleCI logs. set -e -# push the envoy image on merge to master -want_push='false' -for branch in "master" -do - if [ "$CIRCLE_BRANCH" == "$branch" ] - then - want_push='true' - fi -done -if [ -z "$CIRCLE_PULL_REQUEST" ] && [ -z "$CIRCLE_TAG" ] && [ "$want_push" == "true" ] +if [ -n "$CIRCLE_PULL_REQUEST" ] then - # TODO(mattklein123): Currently we are doing this push in the context of the release job which - # happens inside of our build image. We should switch to using Circle caching so each of these - # are discrete jobs that work with the binary. All of these commands run on a remote docker - # server also so we have to temporarily install docker here. - # https://circleci.com/docs/2.0/building-docker-images/ - VER="17.03.0-ce" - curl -L -o /tmp/docker-"$VER".tgz https://get.docker.com/builds/Linux/x86_64/docker-"$VER".tgz - tar -xz -C /tmp -f /tmp/docker-"$VER".tgz - mv /tmp/docker/* /usr/bin - - docker build -f ci/Dockerfile-envoy-image -t envoyproxy/envoy:latest . - docker login -u "$DOCKERHUB_USERNAME" -p "$DOCKERHUB_PASSWORD" - docker push envoyproxy/envoy:latest - docker tag envoyproxy/envoy:latest envoyproxy/envoy:"$CIRCLE_SHA1" - docker push envoyproxy/envoy:"$CIRCLE_SHA1" + echo 'Ignoring PR branch for docker push.' + exit 0 +fi - docker build -f ci/Dockerfile-envoy-alpine -t envoyproxy/envoy-alpine:latest . - docker tag envoyproxy/envoy-alpine:latest envoyproxy/envoy-alpine:"$CIRCLE_SHA1" - docker push envoyproxy/envoy-alpine:"$CIRCLE_SHA1" - docker push envoyproxy/envoy-alpine:latest +# push the envoy image on tags or merge to master +if [ -n "$CIRCLE_TAG" ] || [ "$CIRCLE_BRANCH" = 'master' ] +then + docker login -u "$DOCKERHUB_USERNAME" -p "$DOCKERHUB_PASSWORD" - docker build -f ci/Dockerfile-envoy-alpine-debug -t envoyproxy/envoy-alpine-debug:latest . - docker tag envoyproxy/envoy-alpine-debug:latest envoyproxy/envoy-alpine-debug:"$CIRCLE_SHA1" - docker push envoyproxy/envoy-alpine-debug:"$CIRCLE_SHA1" - docker push envoyproxy/envoy-alpine-debug:latest + for BUILD_TYPE in "envoy" "envoy-alpine" "envoy-alpine-debug"; do + docker push envoyproxy/"$BUILD_TYPE"-dev:latest + docker tag envoyproxy/"$BUILD_TYPE"-dev:latest envoyproxy/"$BUILD_TYPE"-dev:"$CIRCLE_SHA1" + docker push envoyproxy/"$BUILD_TYPE"-dev:"$CIRCLE_SHA1" + done - # This script tests the docker examples. - # TODO(mattklein123): This almost always times out on CircleCI. Do not run for now until we - # have a better CI setup. - #./ci/verify_examples.sh + # This script tests the docker examples. + # TODO(mattklein123): This almost always times out on CircleCI. Do not run for now until we + # have a better CI setup. + #./ci/verify_examples.sh else - echo 'Ignoring PR branch for docker push.' + echo 'Ignoring non-master branch for docker push.' fi diff --git a/ci/docker_tag.sh b/ci/docker_tag.sh index 8e64dd418c750..7eed670a9ddf0 100755 --- a/ci/docker_tag.sh +++ b/ci/docker_tag.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/sh # Do not ever set -x here, it is a security hazard as it will place the credentials below in the # CircleCI logs. @@ -6,29 +6,15 @@ set -e if [ -n "$CIRCLE_TAG" ] then - # TODO(mattklein123): Currently we are doing this push in the context of the release job which - # happens inside of our build image. We should switch to using Circle caching so each of these - # are discrete jobs that work with the binary. All of these commands run on a remote docker - # server also so we have to temporarily install docker here. - # https://circleci.com/docs/2.0/building-docker-images/ - VER="17.03.0-ce" - curl -L -o /tmp/docker-"$VER".tgz https://get.docker.com/builds/Linux/x86_64/docker-"$VER".tgz - tar -xz -C /tmp -f /tmp/docker-"$VER".tgz - mv /tmp/docker/* /usr/bin + docker login -u "$DOCKERHUB_USERNAME" -p "$DOCKERHUB_PASSWORD" - docker login -u "$DOCKERHUB_USERNAME" -p "$DOCKERHUB_PASSWORD" - - docker pull envoyproxy/envoy:"$CIRCLE_SHA1" - docker tag envoyproxy/envoy:"$CIRCLE_SHA1" envoyproxy/envoy:"$CIRCLE_TAG" - docker push envoyproxy/envoy:"$CIRCLE_TAG" - - docker pull envoyproxy/envoy-alpine:"$CIRCLE_SHA1" - docker tag envoyproxy/envoy-alpine:"$CIRCLE_SHA1" envoyproxy/envoy-alpine:"$CIRCLE_TAG" - docker push envoyproxy/envoy-alpine:"$CIRCLE_TAG" - - docker pull envoyproxy/envoy-alpine-debug:"$CIRCLE_SHA1" - docker tag envoyproxy/envoy-alpine-debug:"$CIRCLE_SHA1" envoyproxy/envoy-alpine-debug:"$CIRCLE_TAG" - docker push envoyproxy/envoy-alpine-debug:"$CIRCLE_TAG" + for BUILD_TYPE in "envoy" "envoy-alpine" "envoy-alpine-debug"; do + docker pull envoyproxy/"$BUILD_TYPE"-dev:"$CIRCLE_SHA1" + docker tag envoyproxy/"$BUILD_TYPE"-dev:"$CIRCLE_SHA1" envoyproxy/"$BUILD_TYPE":"$CIRCLE_TAG" + docker push envoyproxy/"$BUILD_TYPE":"$CIRCLE_TAG" + docker tag envoyproxy/"$BUILD_TYPE"-dev:"$CIRCLE_SHA1" envoyproxy/"$BUILD_TYPE":latest + docker push envoyproxy/"$BUILD_TYPE":latest + done else - echo 'Ignoring non-tag event for docker tag.' + echo 'Ignoring non-tag event for docker tag.' fi diff --git a/ci/mac_ci_steps.sh b/ci/mac_ci_steps.sh index e08c43675d9a1..3f8c95c9368bc 100755 --- a/ci/mac_ci_steps.sh +++ b/ci/mac_ci_steps.sh @@ -2,9 +2,19 @@ set -e +function finish { + echo "disk space at end of build:" + df -h +} +trap finish EXIT + +echo "disk space at beginning of build:" +df -h + . "$(dirname "$0")"/setup_gcs_cache.sh BAZEL_BUILD_OPTIONS="--curses=no --show_task_finish --verbose_failures ${BAZEL_BUILD_EXTRA_OPTIONS} \ + --deleted_packages //test/extensions/quic_listeners/quiche/platform \ --action_env=PATH=/usr/local/bin:/opt/local/bin:/usr/bin:/bin" # TODO(zuercher): remove --flaky_test_attempts when https://github.com/envoyproxy/envoy/issues/2428 # is resolved. @@ -14,6 +24,5 @@ BAZEL_TEST_OPTIONS="${BAZEL_BUILD_OPTIONS} --test_output=all --flaky_test_attemp # is somewhat more deterministic (rather than interleaving the build # and test steps). -bazel build ${BAZEL_BUILD_OPTIONS} //source/... -bazel build ${BAZEL_BUILD_OPTIONS} //test/... +bazel build ${BAZEL_BUILD_OPTIONS} //source/... //test/... bazel test ${BAZEL_TEST_OPTIONS} //test/... diff --git a/ci/prebuilt/BUILD b/ci/prebuilt/BUILD index 8fba80bad2d45..e0539d8767312 100644 --- a/ci/prebuilt/BUILD +++ b/ci/prebuilt/BUILD @@ -3,26 +3,20 @@ licenses(["notice"]) # Apache 2 package(default_visibility = ["//visibility:public"]) cc_library( - name = "luajit", - srcs = ["thirdparty_build/lib/libluajit-5.1.a"], - hdrs = glob(["thirdparty_build/include/luajit-2.1/*"]), + name = "v8", + srcs = select({ + "@envoy//bazel:windows_x86_64": ["WINDOWS_IS_NOT_SUPPORTED_YET"], + "//conditions:default": [ + # Order matters! + "thirdparty_build/lib/libwasm.a", + "thirdparty_build/lib/libv8_monolith.a", + ], + }), + hdrs = [ + "thirdparty_build/include/wasm-c-api/wasm.hh", + "thirdparty_build/include/wasm-c-api/wasm-bin.hh", + ], includes = ["thirdparty_build/include"], - # TODO(mattklein123): We should strip luajit-2.1 here for consumers. However, if we do that - # the headers get included using -I vs. -isystem which then causes old-style-cast warnings. -) - -cc_library( - name = "tcmalloc_and_profiler", - srcs = ["thirdparty_build/lib/libtcmalloc_and_profiler.a"], - hdrs = glob(["thirdparty_build/include/gperftools/**/*.h"]), - strip_include_prefix = "thirdparty_build/include", -) - -cc_library( - name = "tcmalloc_debug", - srcs = ["thirdparty_build/lib/libtcmalloc_debug.a"], - hdrs = glob(["thirdparty_build/include/gperftools/**/*.h"]), - strip_include_prefix = "thirdparty_build/include", ) cc_library( @@ -30,19 +24,7 @@ cc_library( srcs = select({ "@envoy//bazel:windows_x86_64": ["WINDOWS_IS_NOT_SUPPORTED_YET"], "//conditions:default": [ - # ld: Runtime and WASTParse need to be listed first. - "thirdparty_build/lib/WAVM/RelWithDebInfo/libRuntime.a", - "thirdparty_build/lib/WAVM/RelWithDebInfo/libWASTParse.a", - "thirdparty_build/lib/WAVM/RelWithDebInfo/libEmscripten.a", - "thirdparty_build/lib/WAVM/RelWithDebInfo/libIR.a", - "thirdparty_build/lib/WAVM/RelWithDebInfo/libLLVMJIT.a", - "thirdparty_build/lib/WAVM/RelWithDebInfo/libLogging.a", - "thirdparty_build/lib/WAVM/RelWithDebInfo/libNFA.a", - "thirdparty_build/lib/WAVM/RelWithDebInfo/libPlatform.a", - "thirdparty_build/lib/WAVM/RelWithDebInfo/libRegExp.a", - "thirdparty_build/lib/WAVM/RelWithDebInfo/libThreadTest.a", - "thirdparty_build/lib/WAVM/RelWithDebInfo/libWASM.a", - "thirdparty_build/lib/WAVM/RelWithDebInfo/libWASTPrint.a", + "thirdparty_build/lib/libWAVM.a", "thirdparty_build/lib/WAVM/libWAVMUnwind.a", ] + [ # ld: listed in order from llvm-config --libnames. diff --git a/ci/run_clang_tidy.sh b/ci/run_clang_tidy.sh index e9df93bfcb652..8adbbd5089d83 100755 --- a/ci/run_clang_tidy.sh +++ b/ci/run_clang_tidy.sh @@ -3,6 +3,15 @@ set -e echo "Generating compilation database..." + +cp -f .bazelrc .bazelrc.bak + +function cleanup() { + cp -f .bazelrc.bak .bazelrc + rm -f .bazelrc.bak +} +trap cleanup EXIT + # The compilation database generate script doesn't support passing build options via CLI. # Writing them into bazelrc echo "build ${BAZEL_BUILD_OPTIONS}" >> .bazelrc @@ -11,25 +20,30 @@ echo "build ${BAZEL_BUILD_OPTIONS}" >> .bazelrc # by clang-tidy "${ENVOY_SRCDIR}/tools/gen_compilation_database.py" --run_bazel_build --include_headers -# It had to be in ENVOY_CI_DIR to run bazel to generate compile database, but clang-tidy-diff -# diff against current directory, moving them to ENVOY_SRCDIR. -mv ./compile_commands.json "${ENVOY_SRCDIR}/compile_commands.json" -cd "${ENVOY_SRCDIR}" - # Do not run incremental clang-tidy on check_format testdata files. function exclude_testdata() { grep -v tools/testdata/check_format/ } +# Do not run clang-tidy against Chromium URL import, this needs to largely +# reflect the upstream structure. +function exclude_chromium_url() { + grep -v source/common/chromium_url/ +} + +function filter_excludes() { + exclude_testdata | exclude_chromium_url +} + if [[ "${RUN_FULL_CLANG_TIDY}" == 1 ]]; then echo "Running full clang-tidy..." - run-clang-tidy-7 + run-clang-tidy-8 elif [[ -z "${CIRCLE_PR_NUMBER}" && "$CIRCLE_BRANCH" == "master" ]]; then echo "On master branch, running clang-tidy-diff against previous commit..." - git diff HEAD^ | exclude_testdata | clang-tidy-diff-7.py -p 1 + git diff HEAD^ | filter_excludes | clang-tidy-diff-8.py -p 1 else echo "Running clang-tidy-diff against master branch..." git fetch https://github.com/envoyproxy/envoy.git master - git diff $(git merge-base HEAD FETCH_HEAD)..HEAD | exclude_testdata | \ - clang-tidy-diff-7.py -p 1 + git diff $(git merge-base HEAD FETCH_HEAD)..HEAD | filter_excludes | \ + clang-tidy-diff-8.py -p 1 fi diff --git a/configs/BUILD b/configs/BUILD index d7403d3da506d..9846609607e9e 100644 --- a/configs/BUILD +++ b/configs/BUILD @@ -24,24 +24,15 @@ filegroup( srcs = [ "original-dst-cluster/proxy_config.yaml", ] + select({ - "@bazel_tools//tools/osx:darwin": [], + "//bazel:apple": [], "//conditions:default": ["freebind/freebind.yaml"], }), ) -genrule( - name = "v1_upgraded_configs", - srcs = ["google_com_proxy.yaml"], - outs = ["google_com_proxy.v2.upgraded.json"], - cmd = "$(location //tools:v1_to_bootstrap) $(location google_com_proxy.yaml) > $@", - tools = ["//tools:v1_to_bootstrap"], -) - genrule( name = "example_configs", srcs = [ ":configs", - ":v1_upgraded_configs", "//examples:configs", "//test/config/integration/certs", ], diff --git a/configs/Dockerfile b/configs/Dockerfile index e81237686687b..2d7b7a6a5e3bf 100644 --- a/configs/Dockerfile +++ b/configs/Dockerfile @@ -1,7 +1,7 @@ # This configuration will build a Docker container containing # an Envoy proxy that routes to Google. -FROM envoyproxy/envoy:latest +FROM envoyproxy/envoy-dev:latest RUN apt-get update COPY google_com_proxy.v2.yaml /etc/envoy.yaml CMD /usr/local/bin/envoy -c /etc/envoy.yaml diff --git a/configs/configgen.py b/configs/configgen.py index 44a796c23d9f3..6c5ad0c08078f 100755 --- a/configs/configgen.py +++ b/configs/configgen.py @@ -2,8 +2,8 @@ import json from collections import OrderedDict import os -import sys import shutil +import sys SCRIPT_DIR = os.path.dirname(__file__) OUT_DIR = sys.argv[1] diff --git a/configs/configgen.sh b/configs/configgen.sh index 2ecf6b77ba06d..2e82ebff3dd98 100755 --- a/configs/configgen.sh +++ b/configs/configgen.sh @@ -25,4 +25,4 @@ for FILE in $*; do done # tar is having issues with -C for some reason so just cd into OUT_DIR. -(cd "$OUT_DIR"; tar -hcvf example_configs.tar *.json *.yaml certs/*.pem) +(cd "$OUT_DIR"; tar -hcvf example_configs.tar *.yaml certs/*.pem) diff --git a/configs/envoy_double_proxy_v2.template.yaml b/configs/envoy_double_proxy_v2.template.yaml index 0d638a6fe85dc..2c08332f795d8 100644 --- a/configs/envoy_double_proxy_v2.template.yaml +++ b/configs/envoy_double_proxy_v2.template.yaml @@ -25,7 +25,8 @@ {%endif -%} filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager codec_type: AUTO stat_prefix: router route_config: @@ -42,14 +43,18 @@ timeout: 20s http_filters: - name: envoy.health_check - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck pass_through_mode: false - endpoint: /healthcheck - name: envoy.buffer - config: + headers: + - exact_match: /healthcheck + name: :path + - name: envoy.buffer + typed_config: + "@type": type.googleapis.com/envoy.config.filter.http.buffer.v2.Buffer max_request_bytes: 5242880 - name: envoy.router - config: {} + - name: envoy.router + typed_config: {} tracing: operation_name: INGRESS idle_timeout: 840s @@ -71,7 +76,8 @@ default_value: 1000 runtime_key: access_log.access_error.duration - traceable_filter: {} - config: + typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog path: /var/log/envoy/access_error.log format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%REQ(X-LYFT-USER-ID)%\" \"%RESP(GRPC-STATUS)%\"\n" {% if proxy_proto %} @@ -91,20 +97,30 @@ static_resources: type: STATIC connect_timeout: 0.25s lb_policy: ROUND_ROBIN - hosts: - - socket_address: - protocol: TCP - address: 127.0.0.1 - port_value: 8125 + load_assignment: + cluster_name: statsd + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8125 + protocol: TCP - name: backhaul type: STRICT_DNS connect_timeout: 1s lb_policy: ROUND_ROBIN - hosts: - - socket_address: - protocol: TCP - address: front-proxy.yourcompany.net - port_value: 9400 + load_assignment: + cluster_name: backhaul + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: front-proxy.yourcompany.net + port_value: 9400 + protocol: TCP # There are so few connections going back # that we can get some imbalance. Until we come up # with a better solution just limit the requests @@ -127,11 +143,16 @@ static_resources: type: LOGICAL_DNS connect_timeout: 1s lb_policy: ROUND_ROBIN - hosts: - - socket_address: - protocol: TCP - address: collector-grpc.lightstep.com - port_value: 443 + load_assignment: + cluster_name: lightstep_saas + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: collector-grpc.lightstep.com + port_value: 443 + protocol: TCP http2_protocol_options: {} tls_context: common_tls_context: @@ -143,12 +164,14 @@ static_resources: flags_path: "/etc/envoy/flags" stats_sinks: - name: envoy.statsd - config: + typed_config: + "@type": type.googleapis.com/envoy.config.metrics.v2.StatsdSink tcp_cluster_name: statsd tracing: http: name: envoy.lightstep - config: + typed_config: + "@type": type.googleapis.com/envoy.config.trace.v2.LightstepConfig access_token_file: "/etc/envoy/lightstep_access_token" collector_cluster: lightstep_saas runtime: @@ -156,7 +179,7 @@ runtime: subdirectory: envoy override_subdirectory: envoy_override admin: - access_log_path: "var/log/envoy/admin_access.log" + access_log_path: "/var/log/envoy/admin_access.log" address: socket_address: protocol: TCP diff --git a/configs/envoy_front_proxy_v2.template.yaml b/configs/envoy_front_proxy_v2.template.yaml index f931ab70f7850..35f734f80ad2e 100644 --- a/configs/envoy_front_proxy_v2.template.yaml +++ b/configs/envoy_front_proxy_v2.template.yaml @@ -31,7 +31,8 @@ {%endif %} filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager codec_type: AUTO stat_prefix: router {% if proxy_proto -%} @@ -42,20 +43,26 @@ {{ router_file_content(router_file='envoy_router_v2.template.yaml')|indent(10) }} http_filters: - name: envoy.health_check - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck pass_through_mode: false headers: - name: ":path" exact_match: "/healthcheck" - name: envoy.buffer - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.http.buffer.v2.Buffer max_request_bytes: 5242880 - name: envoy.rate_limit config: domain: envoy_front request_type: external + rate_limit_service: + grpc_service: + envoy_grpc: + cluster_name: ratelimit - name: envoy.router - config: {} + typed_config: {} add_user_agent: true tracing: operation_name: INGRESS @@ -78,7 +85,8 @@ default_value: 1000 runtime_key: access_log.access_error.duration - traceable_filter: {} - config: + typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog path: "/var/log/envoy/access_error.log" format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%REQ(X-LYFT-USER-ID)%\" \"%RESP(GRPC-STATUS)%\"\n" {% endmacro -%} @@ -96,45 +104,57 @@ static_resources: type: STRICT_DNS connect_timeout: 0.25s lb_policy: ROUND_ROBIN - hosts: - - socket_address: - protocol: TCP - address: disccovery.yourcompany.net - port_value: 80 + load_assignment: + cluster_name: sds + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: discovery.yourcompany.net + port_value: 80 + protocol: TCP - name: statsd type: STATIC connect_timeout: 0.25s lb_policy: ROUND_ROBIN - hosts: - - socket_address: - protocol: TCP - address: 127.0.0.1 - port_value: 8125 + load_assignment: + cluster_name: statsd + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8125 + protocol: TCP - name: lightstep_saas type: LOGICAL_DNS connect_timeout: 1s lb_policy: ROUND_ROBIN - hosts: - - socket_address: - protocol: TCP - address: collector-grpc.lightstep.com - port_value: 443 + load_assignment: + cluster_name: lightstep_saas + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: collector-grpc.lightstep.com + port_value: 443 + protocol: TCP http2_protocol_options: {} - {% for service, options in clusters.iteritems() -%} + {% for service, options in clusters.items() -%} - {{ helper.internal_cluster_definition(service, options)|indent(2) }} {% endfor %} cluster_manager: outlier_detection: event_log_path: /var/log/envoy/outlier_events.log flags_path: /etc/envoy/flags -rate_limit_service: - grpc_service: - envoy_grpc: - cluster_name: ratelimit tracing: http: name: envoy.lightstep - config: + typed_config: + "@type": type.googleapis.com/envoy.config.trace.v2.LightstepConfig collector_cluster: lightstep_saas access_token_file: "/etc/envoy/lightstep_access_token" runtime: diff --git a/configs/envoy_service_to_service_v2.template.yaml b/configs/envoy_service_to_service_v2.template.yaml index 69a25e9122f94..083a8c39a2926 100644 --- a/configs/envoy_service_to_service_v2.template.yaml +++ b/configs/envoy_service_to_service_v2.template.yaml @@ -9,7 +9,8 @@ filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager codec_type: AUTO stat_prefix: ingress_http route_config: @@ -32,22 +33,25 @@ cluster: local_service http_filters: - name: envoy.health_check - config: - pass_through_mode: true - headers: - - name: ":path" - exact_match: "/healthcheck" - cache_time: 2.5s + typed_config: + "@type": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck + pass_through_mode: true + headers: + - name: ":path" + exact_match: "/healthcheck" + cache_time: 2.5s - name: envoy.buffer - config: - max_request_bytes: 5242880 + typed_config: + "@type": type.googleapis.com/envoy.config.filter.http.buffer.v2.Buffer + max_request_bytes: 5242880 - name: envoy.router - config: {} + typed_config: {} access_log: - name: envoy.file_access_log filter: not_health_check_filter: {} - config: + typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog path: "/var/log/envoy/ingress_http.log" {{ access_log_helper.ingress_full()|indent(10)}} - name: envoy.file_access_log @@ -75,7 +79,8 @@ default_value: 2000 runtime_key: access_log.access_error.duration - not_health_check_filter: {} - config: + typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog path: "/var/log/envoy/ingress_http_error.log" {{ access_log_helper.ingress_sampled_log()|indent(10)}} - name: envoy.file_access_log @@ -85,7 +90,8 @@ - not_health_check_filter: {} - runtime_filter: runtime_key: access_log.ingress_http - config: + typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog path: "/var/log/envoy/ingress_http_sampled.log" {{ access_log_helper.ingress_sampled_log()|indent(10)}} idle_timeout: 840s @@ -103,13 +109,14 @@ static_resources: filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager codec_type: AUTO stat_prefix: egress_http route_config: name: local_route virtual_hosts: - {% for service, options in internal_virtual_hosts.iteritems() %} + {% for service, options in internal_virtual_hosts.items() %} - name: {{ service }} domains: - {{ service }} @@ -141,18 +148,23 @@ static_resources: default_value: 2000 runtime_key: access_log.access_error.duration - traceable_filter: {} - config: + typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog path: "/var/log/envoy/egress_http_error.log" - {{ access_log_helper.egress_error_log()|indent(10)}} + {{ access_log_helper.egress_error_log()|indent(10) }} use_remote_address: true http_filters: - name: envoy.rate_limit config: domain: envoy_service_to_service + rate_limit_service: + grpc_service: + envoy_grpc: + cluster_name: ratelimit - name: envoy.grpc_http1_bridge - config: {} + typed_config: {} - name: envoy.router - config: {} + typed_config: {} - address: socket_address: @@ -162,7 +174,8 @@ static_resources: filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager codec_type: AUTO stat_prefix: egress_http rds: @@ -195,7 +208,8 @@ static_resources: default_value: 2000 runtime_key: access_log.access_error.duration - traceable_filter: {} - config: + typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog path: "/var/log/envoy/egress_http_error.log" {{ access_log_helper.egress_error_log()|indent(10) }} use_remote_address: true @@ -203,10 +217,14 @@ static_resources: - name: envoy.rate_limit config: domain: envoy_service_to_service + rate_limit_service: + grpc_service: + envoy_grpc: + cluster_name: ratelimit - name: envoy.grpc_http1_bridge - config: {} + typed_config: {} - name: envoy.router - config: {} + typed_config: {} {% if external_virtual_hosts|length > 0 or mongos_servers|length > 0 %}{% endif -%} {% for mapping in external_virtual_hosts -%} - name: "{{ mapping['address']}}" @@ -218,7 +236,8 @@ static_resources: filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager codec_type: AUTO idle_timeout: 840s stat_prefix: egress_{{ mapping['name'] }} @@ -243,10 +262,10 @@ static_resources: http_filters: {% if mapping['name'] in ['dynamodb_iad', 'dynamodb_legacy'] -%} - name: envoy.http_dynamo_filter - config: {} + typed_config: {} {% endif -%} - name: envoy.router - config: {} + typed_config: {} access_log: - name: envoy.file_access_log filter: @@ -272,7 +291,8 @@ static_resources: default_value: 2000 runtime_key: access_log.access_error.duration {% endif %} - config: + typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog path: "/var/log/envoy/egress_{{ mapping['name'] }}_http_error.log" {% if mapping.get('is_amzn_service', False) -%} {{ access_log_helper.egress_error_amazon_service()|indent(10) }} @@ -281,7 +301,7 @@ static_resources: {% endif %} {% if (mongos_servers|length > 0) or (mongos_servers|length == 0 and not loop.last ) %}{% endif -%} {% endfor -%} - {% for key, value in mongos_servers.iteritems() -%} + {% for key, value in mongos_servers.items() -%} - name : "{{ value['address'] }}" address: socket_address: @@ -291,7 +311,8 @@ static_resources: filter_chains: - filters: - name: envoy.tcp_proxy - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy stat_prefix: mongo_{{ key }} cluster: mongo_{{ key }} - name: envoy.mongo_proxy @@ -310,7 +331,7 @@ static_resources: {% endif %} {% endfor -%} clusters: - {% for service, options in internal_virtual_hosts.iteritems() -%} + {% for service, options in internal_virtual_hosts.items() -%} - {{ helper.internal_cluster_definition(service, options)|indent(2)}} {% endfor -%} {% for mapping in external_virtual_hosts -%} @@ -334,25 +355,35 @@ static_resources: {% endif %} type: LOGICAL_DNS lb_policy: ROUND_ROBIN - hosts: - - socket_address: - address: {{ host['remote_address'] }} - port_value: {{ host['port_value'] }} - protocol: {{ host['protocol'] }} + load_assignment: + cluster_name: egress_{{ host['name'] }} + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: {{ host['remote_address'] }} + port_value: {{ host['port_value'] }} + protocol: {{ host['protocol'] }} {% endfor -%} {% endfor -%} - {% for key, value in mongos_servers.iteritems() -%} + {% for key, value in mongos_servers.items() -%} - name: mongo_{{ key }} connect_timeout: 0.25s type: STRICT_DNS lb_policy: RANDOM - hosts: - {% for server in value['hosts'] -%} - - socket_address: - protocol: {{ server['protocol'] }} - port_value: {{ server['port_value'] }} - address: {{ server['address'] }} - {% endfor -%} + load_assignment: + cluster_name: mongo_{{ key }} + endpoints: + - lb_endpoints: + {% for server in value['hosts'] -%} + - endpoint: + address: + socket_address: + address: {{ server['address'] }} + port_value: {{ server['port_value'] }} + protocol: {{ server['protocol'] }} + {% endfor -%} {% endfor %} - name: main_website connect_timeout: 0.25s @@ -360,20 +391,32 @@ static_resources: # Comment out the following line to test on v6 networks dns_lookup_family: V4_ONLY lb_policy: ROUND_ROBIN - hosts: - - socket_address: - address: main_website.com - port_value: 443 - tls_context: { sni: www.main_website.com } + load_assignment: + cluster_name: main_website + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: main_website.com + port_value: 443 + protocol: TCP + tls_context: + sni: www.main_website.com - name: local_service connect_timeout: 0.25s type: STATIC lb_policy: ROUND_ROBIN - hosts: - - socket_address: - protocol: TCP - address: 127.0.0.1 - port_value: 8080 + load_assignment: + cluster_name: main_website + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8080 + protocol: TCP circuit_breakers: thresholds: max_pending_requests: 30 @@ -383,11 +426,16 @@ static_resources: type: STATIC lb_policy: ROUND_ROBIN http2_protocol_options: {} - hosts: - - socket_address: - protocol: TCP - address: 127.0.0.1 - port_value: 8081 + load_assignment: + cluster_name: local_service_grpc + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8081 + protocol: TCP circuit_breakers: thresholds: max_requests: 200 @@ -396,31 +444,46 @@ static_resources: connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN - hosts: - - socket_address: - protocol: TCP - address: rds.yourcompany.net - port_value: 80 + load_assignment: + cluster_name: local_service_grpc + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: rds.yourcompany.net + port_value: 80 + protocol: TCP dns_lookup_family: V4_ONLY - name: statsd connect_timeout: 0.25s type: STATIC lb_policy: ROUND_ROBIN - hosts: - - socket_address: - protocol: TCP - address: 127.0.0.1 - port_value: 8125 + load_assignment: + cluster_name: statsd + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8125 + protocol: TCP dns_lookup_family: V4_ONLY - name: lightstep_saas connect_timeout: 1s type: LOGICAL_DNS lb_policy: ROUND_ROBIN - hosts: - - socket_address: - protocol: TCP - address: collector-grpc.lightstep.com - port_value: 443 + load_assignment: + cluster_name: lightstep_saas + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: collector-grpc.lightstep.com + port_value: 443 + protocol: TCP http2_protocol_options: max_concurrent_streams: 100 tls_context: @@ -434,20 +497,30 @@ static_resources: connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN - hosts: - - socket_address: - protocol: TCP - address: cds.yourcompany.net - port_value: 80 + load_assignment: + cluster_name: cds_cluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: cds.yourcompany.net + port_value: 80 + protocol: TCP - name: sds connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN - hosts: - - socket_address: - protocol: TCP - address: discovery.yourcompany.net - port_value: 80 + load_assignment: + cluster_name: sds + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: discovery.yourcompany.net + port_value: 80 + protocol: TCP dynamic_resources: cds_config: api_config_source: @@ -459,19 +532,17 @@ cluster_manager: {} flags_path: "/etc/envoy/flags" stats_sinks: - name: envoy.statsd - config: + typed_config: + "@type": type.googleapis.com/envoy.config.metrics.v2.StatsdSink tcp_cluster_name: statsd watchdog: {} tracing: http: name: envoy.lightstep - config: + typed_config: + "@type": type.googleapis.com/envoy.config.trace.v2.LightstepConfig access_token_file: "/etc/envoy/lightstep_access_token" collector_cluster: lightstep_saas -rate_limit_service: - grpc_service: - envoy_grpc: - cluster_name: ratelimit runtime: symlink_root: "/srv/runtime_data/current" subdirectory: envoy diff --git a/configs/freebind/freebind.yaml b/configs/freebind/freebind.yaml index 9b494bd05742f..1171795589250 100644 --- a/configs/freebind/freebind.yaml +++ b/configs/freebind/freebind.yaml @@ -1,18 +1,23 @@ admin: access_log_path: /tmp/admin_access.log address: - socket_address: { address: 127.0.0.1, port_value: 9901 } + socket_address: + address: 127.0.0.1 + port_value: 9901 static_resources: listeners: - name: listener_0 address: - socket_address: { address: 192.168.42.1, port_value: 10000 } + socket_address: + address: 192.168.42.1 + port_value: 10000 freebind: true filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager stat_prefix: ingress_http route_config: name: local_route @@ -29,10 +34,15 @@ static_resources: connect_timeout: 30s type: STATIC lb_policy: ROUND_ROBIN - hosts: - - socket_address: - address: 127.0.0.1 - port_value: 10001 + load_assignment: + cluster_name: service_local + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 10001 # TODO(htuch): Figure out how to do end-to-end testing with # outgoing connections and free bind. # upstream_bind_config: diff --git a/configs/google_com_proxy.json b/configs/google_com_proxy.json deleted file mode 100644 index 6e131e1e1e543..0000000000000 --- a/configs/google_com_proxy.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "listeners": [{ - "address": "tcp://127.0.0.1:10000", - "filters": [{ - "name": "http_connection_manager", - "config": { - "codec_type": "auto", - "stat_prefix": "ingress_http", - "route_config": { - "virtual_hosts": [{ - "name": "local_service", - "domains": [ - "*" - ], - "routes": [{ - "timeout_ms": 0, - "prefix": "/", - "host_rewrite": "www.google.com", - "cluster": "service_google" - }] - }] - }, - "filters": [{ - "name": "router", - "config": {} - }] - } - }] - }], - "admin": { - "access_log_path": "/tmp/admin_access.log", - "address": "tcp://127.0.0.1:9901" - }, - "cluster_manager": { - "clusters": [{ - "name": "service_google", - "connect_timeout_ms": 250, - "type": "logical_dns", - "lb_type": "round_robin", - "hosts": [{ - "url": "tcp://google.com:443" - }], - "ssl_context": { - "sni": "www.google.com" - } - }] - } -} diff --git a/configs/google_com_proxy.v2.yaml b/configs/google_com_proxy.v2.yaml index b97092748b9ec..532ba06bf6c73 100644 --- a/configs/google_com_proxy.v2.yaml +++ b/configs/google_com_proxy.v2.yaml @@ -16,7 +16,8 @@ static_resources: filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager stat_prefix: ingress_http route_config: name: local_route @@ -38,8 +39,14 @@ static_resources: # Comment out the following line to test on v6 networks dns_lookup_family: V4_ONLY lb_policy: ROUND_ROBIN - hosts: - - socket_address: - address: google.com - port_value: 443 - tls_context: { sni: www.google.com } + load_assignment: + cluster_name: service_google + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: www.google.com + port_value: 443 + tls_context: + sni: www.google.com diff --git a/configs/google_com_proxy.yaml b/configs/google_com_proxy.yaml deleted file mode 100644 index 8683e9e4c9254..0000000000000 --- a/configs/google_com_proxy.yaml +++ /dev/null @@ -1,31 +0,0 @@ -listeners: -- address: tcp://127.0.0.1:10000 - filters: - - name: http_connection_manager - config: - codec_type: auto - stat_prefix: ingress_http - route_config: - virtual_hosts: - - name: local_service - domains: ["*"] - routes: - - prefix: "/" - timeout_ms: 0 - host_rewrite: www.google.com - cluster: service_google - filters: - - { name: router, config: {} } - -admin: - access_log_path: /tmp/admin_access.log - address: tcp://127.0.0.1:9901 - -cluster_manager: - clusters: - - name: service_google - connect_timeout_ms: 250 - type: logical_dns - lb_type: round_robin - hosts: [{ url: tcp://google.com:443 }] - ssl_context: { sni: www.google.com } diff --git a/configs/original-dst-cluster/netns_cleanup.sh b/configs/original-dst-cluster/netns_cleanup.sh index ad85b75e60f9b..7adedcd4c8bb6 100755 --- a/configs/original-dst-cluster/netns_cleanup.sh +++ b/configs/original-dst-cluster/netns_cleanup.sh @@ -1,3 +1,4 @@ +#!/usr/bin/env bash # # Cleanup network namespace after testing Envoy original_dst cluster # diff --git a/configs/original-dst-cluster/netns_setup.sh b/configs/original-dst-cluster/netns_setup.sh index 628ae02ccc45d..6b1cedcbd1f60 100755 --- a/configs/original-dst-cluster/netns_setup.sh +++ b/configs/original-dst-cluster/netns_setup.sh @@ -1,3 +1,4 @@ +#!/usr/bin/env bash # # Example setup network namespace for testing Envoy original_dst cluster # Clean up with the cleanup script with the same arguments. diff --git a/configs/original-dst-cluster/proxy_config.yaml b/configs/original-dst-cluster/proxy_config.yaml index 19f770e7fa5fa..7f0515d61c10e 100644 --- a/configs/original-dst-cluster/proxy_config.yaml +++ b/configs/original-dst-cluster/proxy_config.yaml @@ -7,24 +7,27 @@ static_resources: filter_chains: - filters: - name: envoy.http_connection_manager - config: - deprecated_v1: true - value: - stat_prefix: ingress_http - route_config: - virtual_hosts: - - routes: - - prefix: "/" - timeout_ms: 0 + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + stat_prefix: ingress_http + route_config: + name: local_service + virtual_hosts: + - name: backend + domains: + - "*" + routes: + - match: + prefix: "/" + route: cluster: cluster1 - domains: - - "*" - name: local_service - filters: - - name: router - config: {} - codec_type: auto - use_original_dst: true + http_filters: + - name: envoy.router + typed_config: {} + codec_type: auto + listener_filters: + - name: envoy.listener.original_dst + typed_config: {} clusters: - name: cluster1 type: ORIGINAL_DST @@ -34,7 +37,7 @@ static_resources: cluster_manager: {} watchdog: {} admin: - access_log_path: "/tmp/admin_access.log" + access_log_path: /tmp/admin_access.log address: socket_address: address: 127.0.0.1 diff --git a/configs/requirements.txt b/configs/requirements.txt index b60338e30ada0..f4c7b793c7b9c 100644 --- a/configs/requirements.txt +++ b/configs/requirements.txt @@ -1 +1 @@ -jinja2==2.10 +jinja2==2.10.1 diff --git a/docs/build.sh b/docs/build.sh index 426ec44422981..caa6264ee0eed 100755 --- a/docs/build.sh +++ b/docs/build.sh @@ -16,18 +16,23 @@ then exit 1 fi # Check the version_history.rst contains current release version. - grep --fixed-strings "$VERSION_NUMBER" docs/root/intro/version_history.rst + grep --fixed-strings "$VERSION_NUMBER" docs/root/intro/version_history.rst \ + || (echo "Git tag not found in version_history.rst" && exit 1) + # Now that we now there is a match, we can use the tag. export ENVOY_DOCS_VERSION_STRING="tag-$CIRCLE_TAG" export ENVOY_DOCS_RELEASE_LEVEL=tagged + export ENVOY_BLOB_SHA="$CIRCLE_TAG" else BUILD_SHA=$(git rev-parse HEAD) VERSION_NUM=$(cat VERSION) export ENVOY_DOCS_VERSION_STRING="${VERSION_NUM}"-"${BUILD_SHA:0:6}" export ENVOY_DOCS_RELEASE_LEVEL=pre-release + export ENVOY_BLOB_SHA="$BUILD_SHA" fi SCRIPT_DIR=$(dirname "$0") +API_DIR=$(dirname "$dir")/api BUILD_DIR=build_docs [[ -z "${DOCS_OUTPUT_DIR}" ]] && DOCS_OUTPUT_DIR=generated/docs [[ -z "${GENERATED_RST_DIR}" ]] && GENERATED_RST_DIR=generated/rst @@ -42,7 +47,8 @@ source_venv "$BUILD_DIR" pip install -r "${SCRIPT_DIR}"/requirements.txt bazel build ${BAZEL_BUILD_OPTIONS} @envoy_api//docs:protos --aspects \ - tools/protodoc/protodoc.bzl%proto_doc_aspect --output_groups=rst --action_env=CPROFILE_ENABLED --spawn_strategy=standalone + tools/protodoc/protodoc.bzl%proto_doc_aspect --output_groups=rst --action_env=CPROFILE_ENABLED \ + --action_env=ENVOY_BLOB_SHA --spawn_strategy=standalone # These are the protos we want to put in docs, this list will grow. # TODO(htuch): Factor this out of this script. @@ -71,12 +77,14 @@ PROTO_RST=" /envoy/api/v2/cluster/circuit_breaker/envoy/api/v2/cluster/circuit_breaker.proto.rst /envoy/api/v2/rds/envoy/api/v2/rds.proto.rst /envoy/api/v2/route/route/envoy/api/v2/route/route.proto.rst + /envoy/api/v2/srds/envoy/api/v2/srds.proto.rst /envoy/api/v2/lds/envoy/api/v2/lds.proto.rst /envoy/api/v2/listener/listener/envoy/api/v2/listener/listener.proto.rst /envoy/api/v2/ratelimit/ratelimit/envoy/api/v2/ratelimit/ratelimit.proto.rst /envoy/config/accesslog/v2/als/envoy/config/accesslog/v2/als.proto.rst /envoy/config/accesslog/v2/file/envoy/config/accesslog/v2/file.proto.rst /envoy/config/bootstrap/v2/bootstrap/envoy/config/bootstrap/v2/bootstrap.proto.rst + /envoy/config/cluster/redis/redis_cluster/envoy/config/cluster/redis/redis_cluster.proto.rst /envoy/config/common/tap/v2alpha/common/envoy/config/common/tap/v2alpha/common.proto.rst /envoy/config/ratelimit/v2/rls/envoy/config/ratelimit/v2/rls.proto.rst /envoy/config/metrics/v2/metrics_service/envoy/config/metrics/v2/metrics_service.proto.rst @@ -85,6 +93,7 @@ PROTO_RST=" /envoy/config/filter/accesslog/v2/accesslog/envoy/config/filter/accesslog/v2/accesslog.proto.rst /envoy/config/filter/fault/v2/fault/envoy/config/filter/fault/v2/fault.proto.rst /envoy/config/filter/http/buffer/v2/buffer/envoy/config/filter/http/buffer/v2/buffer.proto.rst + /envoy/config/filter/http/csrf/v2/csrf/envoy/config/filter/http/csrf/v2/csrf.proto.rst /envoy/config/filter/http/ext_authz/v2/ext_authz/envoy/config/filter/http/ext_authz/v2/ext_authz.proto.rst /envoy/config/filter/http/fault/v2/fault/envoy/config/filter/http/fault/v2/fault.proto.rst /envoy/config/filter/http/gzip/v2/gzip/envoy/config/filter/http/gzip/v2/gzip.proto.rst @@ -100,6 +109,9 @@ PROTO_RST=" /envoy/config/filter/http/tap/v2alpha/tap/envoy/config/filter/http/tap/v2alpha/tap.proto.rst /envoy/config/filter/http/transcoder/v2/transcoder/envoy/config/filter/http/transcoder/v2/transcoder.proto.rst /envoy/config/filter/listener/original_src/v2alpha1/original_src/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto.rst + /envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto.rst + /envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto.rst + /envoy/config/filter/dubbo/router/v2alpha1/router/envoy/config/filter/dubbo/router/v2alpha1/router.proto.rst /envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto.rst /envoy/config/filter/network/ext_authz/v2/ext_authz/envoy/config/filter/network/ext_authz/v2/ext_authz.proto.rst /envoy/config/filter/network/http_connection_manager/v2/http_connection_manager/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto.rst @@ -120,6 +132,7 @@ PROTO_RST=" /envoy/config/transport_socket/tap/v2alpha/tap/envoy/config/transport_socket/tap/v2alpha/tap.proto.rst /envoy/data/accesslog/v2/accesslog/envoy/data/accesslog/v2/accesslog.proto.rst /envoy/data/core/v2alpha/health_check_event/envoy/data/core/v2alpha/health_check_event.proto.rst + /envoy/data/tap/v2alpha/common/envoy/data/tap/v2alpha/common.proto.rst /envoy/data/tap/v2alpha/transport/envoy/data/tap/v2alpha/transport.proto.rst /envoy/data/tap/v2alpha/http/envoy/data/tap/v2alpha/http.proto.rst /envoy/data/tap/v2alpha/wrapper/envoy/data/tap/v2alpha/wrapper.proto.rst @@ -150,6 +163,12 @@ do [ -n "${CPROFILE_ENABLED}" ] && cp -f bazel-bin/"${p}".profile "$(dirname "${DEST}")" done +mkdir -p ${GENERATED_RST_DIR}/api-docs + +cp -f $API_DIR/xds_protocol.rst "${GENERATED_RST_DIR}/api-docs/xds_protocol.rst" + +rsync -rav $API_DIR/diagrams "${GENERATED_RST_DIR}/api-docs" + rsync -av "${SCRIPT_DIR}"/root/ "${SCRIPT_DIR}"/conf.py "${GENERATED_RST_DIR}" sphinx-build -W --keep-going -b html "${GENERATED_RST_DIR}" "${DOCS_OUTPUT_DIR}" diff --git a/docs/conf.py b/docs/conf.py index 631cc3e3bb722..64c48a8f6c793 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -13,19 +13,46 @@ # serve to show the default. from datetime import datetime +import os +from sphinx.directives.code import CodeBlock import sphinx_rtd_theme import sys -import os + + +# https://stackoverflow.com/questions/44761197/how-to-use-substitution-definitions-with-code-blocks +class SubstitutionCodeBlock(CodeBlock): + """ + Similar to CodeBlock but replaces placeholders with variables. See "substitutions" below. + """ + + def run(self): + """ + Replace placeholders with given variables. + """ + app = self.state.document.settings.env.app + new_content = [] + existing_content = self.content + for item in existing_content: + for pair in app.config.substitutions: + original, replacement = pair + item = item.replace(original, replacement) + new_content.append(item) + + self.content = new_content + return list(CodeBlock.run(self)) def setup(app): app.add_config_value('release_level', '', 'env') + app.add_config_value('substitutions', [], 'html') + app.add_directive('substitution-code-block', SubstitutionCodeBlock) if not os.environ.get('ENVOY_DOCS_RELEASE_LEVEL'): raise Exception("ENVOY_DOCS_RELEASE_LEVEL env var must be defined") release_level = os.environ['ENVOY_DOCS_RELEASE_LEVEL'] +blob_sha = os.environ['ENVOY_BLOB_SHA'] # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the @@ -42,10 +69,16 @@ def setup(app): # ones. extensions = ['sphinxcontrib.httpdomain', 'sphinx.ext.extlinks', 'sphinx.ext.ifconfig'] extlinks = { - 'repo': ('https://github.com/envoyproxy/envoy/blob/master/%s', ''), - 'api': ('https://github.com/envoyproxy/envoy/blob/master/api/%s', ''), + 'repo': ('https://github.com/envoyproxy/envoy/blob/{}/%s'.format(blob_sha), ''), + 'api': ('https://github.com/envoyproxy/envoy/blob/{}/api/%s'.format(blob_sha), ''), } +# Setup global substitutions +if 'pre-release' in release_level: + substitutions = [('|envoy_docker_image|', 'envoy-dev:{}'.format(blob_sha))] +else: + substitutions = [('|envoy_docker_image|', 'envoy:{}'.format(blob_sha))] + # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] diff --git a/docs/requirements.txt b/docs/requirements.txt index 44a91ddeecc9b..79ee5cd0ad46b 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,6 +1,6 @@ GitPython==2.0.8 -Jinja2==2.10 -MarkupSafe==1.1.0 +Jinja2==2.10.1 +MarkupSafe==1.1.1 Pygments==2.2.0 alabaster==0.7.10 babel==2.4.0 diff --git a/docs/root/api-v2/config/cluster/cluster.rst b/docs/root/api-v2/config/cluster/cluster.rst new file mode 100644 index 0000000000000..7bb5343e81dd2 --- /dev/null +++ b/docs/root/api-v2/config/cluster/cluster.rst @@ -0,0 +1,8 @@ +Cluster +======= + +.. toctree:: + :glob: + :maxdepth: 1 + + redis/* diff --git a/docs/root/api-v2/config/config.rst b/docs/root/api-v2/config/config.rst index 65e276183e5fb..134d5101c7c83 100644 --- a/docs/root/api-v2/config/config.rst +++ b/docs/root/api-v2/config/config.rst @@ -12,3 +12,4 @@ Extensions transport_socket/transport_socket resource_monitor/resource_monitor common/common + cluster/cluster diff --git a/docs/root/api-v2/config/filter/dubbo/dubbo.rst b/docs/root/api-v2/config/filter/dubbo/dubbo.rst new file mode 100644 index 0000000000000..d90e49b707dae --- /dev/null +++ b/docs/root/api-v2/config/filter/dubbo/dubbo.rst @@ -0,0 +1,8 @@ +Dubbo filters +============== + +.. toctree:: + :glob: + :maxdepth: 2 + + */v2alpha1/* diff --git a/docs/root/api-v2/config/filter/filter.rst b/docs/root/api-v2/config/filter/filter.rst index 88385094a2f44..6ddd5e15abf30 100644 --- a/docs/root/api-v2/config/filter/filter.rst +++ b/docs/root/api-v2/config/filter/filter.rst @@ -11,3 +11,4 @@ Filters accesslog/v2/accesslog.proto fault/v2/fault.proto listener/listener + dubbo/dubbo diff --git a/docs/root/api-v2/http_routes/http_routes.rst b/docs/root/api-v2/http_routes/http_routes.rst index 45a2dbca1d930..241f94601a5c8 100644 --- a/docs/root/api-v2/http_routes/http_routes.rst +++ b/docs/root/api-v2/http_routes/http_routes.rst @@ -6,4 +6,5 @@ HTTP route management :maxdepth: 2 ../api/v2/rds.proto + ../api/v2/srds.proto ../api/v2/route/route.proto diff --git a/docs/root/api/api.rst b/docs/root/api/api.rst new file mode 100644 index 0000000000000..27e7731090095 --- /dev/null +++ b/docs/root/api/api.rst @@ -0,0 +1,11 @@ +.. _api: + +API +=== + +.. toctree:: + :glob: + :maxdepth: 2 + + ../api-v2/api + ../api-docs/xds_protocol diff --git a/docs/root/configuration/access_log.rst b/docs/root/configuration/access_log.rst index 6f2a90e7c4e7c..4faa310a4848a 100644 --- a/docs/root/configuration/access_log.rst +++ b/docs/root/configuration/access_log.rst @@ -28,7 +28,7 @@ Format Strings -------------- Format strings are plain strings, specified using the ``format`` key. They may contain -either command operators or other characters interpreted as a plain string. +either command operators or other characters interpreted as a plain string. The access log formatter does not make any assumptions about a new line separator, so one has to specified as part of the format string. See the :ref:`default format ` for an example. @@ -78,7 +78,7 @@ For example, with the following format provided in the configuration: } } } - + The following JSON object would be written to the log file: .. code-block:: json @@ -112,7 +112,7 @@ The following command operators are supported: TCP Downstream connection start time including milliseconds. - START_TIME can be customized using a `format string `_. + START_TIME can be customized using a `format string `_. In addition to that, START_TIME also accepts following specifiers: +------------------------+-------------------------------------------------------------+ @@ -162,6 +162,16 @@ The following command operators are supported: TCP Not implemented ("-"). +.. _config_access_log_format_response_code_details: + +%RESPONSE_CODE_DETAILS% + HTTP + HTTP response code details provides additional information about the response code, such as + who set it (the upstream or envoy) and why. + + TCP + Not implemented ("-") + %BYTES_SENT% HTTP Body bytes sent. For WebSocket connection it will also include response header bytes. @@ -208,6 +218,7 @@ The following command operators are supported: * **RL**: The request was ratelimited locally by the :ref:`HTTP rate limit filter ` in addition to 429 response code. * **UAEX**: The request was denied by the external authorization service. * **RLSE**: The request was rejected because there was an error in rate limit service. + * **SI**: Stream idle timeout in addition to 408 response code. %RESPONSE_TX_DURATION% HTTP @@ -227,6 +238,17 @@ The following command operators are supported: Local address of the upstream connection. If the address is an IP address it includes both address and port. +.. _config_access_log_format_upstream_transport_failure_reason: + +%UPSTREAM_TRANSPORT_FAILURE_REASON% + HTTP + If upstream connection failed due to transport socket (e.g. TLS handshake), provides the failure + reason from the transport socket. The format of this field depends on the configured upstream + transport socket. Common TLS failures are in :ref:`TLS trouble shooting `. + + TCP + Not implemented ("-") + %DOWNSTREAM_REMOTE_ADDRESS% Remote address of the downstream connection. If the address is an IP address it includes both address and port. @@ -286,7 +308,7 @@ The following command operators are supported: %DYNAMIC_METADATA(NAMESPACE:KEY*):Z% HTTP :ref:`Dynamic Metadata ` info, - where NAMESPACE is the the filter namespace used when setting the metadata, KEY is an optional + where NAMESPACE is the filter namespace used when setting the metadata, KEY is an optional lookup up key in the namespace with the option of specifying nested keys separated by ':', and Z is an optional parameter denoting string truncation up to Z characters long. Dynamic Metadata can be set by filters using the :repo:`StreamInfo ` API: @@ -311,3 +333,28 @@ The following command operators are supported: TCP String value set on ssl connection socket for Server Name Indication (SNI) +%DOWNSTREAM_LOCAL_URI_SAN% + HTTP + The URIs present in the SAN of the local certificate used to establish the downstream TLS connection. + TCP + The URIs present in the SAN of the local certificate used to establish the downstream TLS connection. + +%DOWNSTREAM_PEER_URI_SAN% + HTTP + The URIs present in the SAN of the peer certificate used to establish the downstream TLS connection. + TCP + The URIs present in the SAN of the peer certificate used to establish the downstream TLS connection. + +%DOWNSTREAM_LOCAL_SUBJECT% + HTTP + The subject present in the local certificate used to establish the downstream TLS connection. + TCP + The subject present in the local certificate used to establish the downstream TLS connection. + +%DOWNSTREAM_PEER_SUBJECT% + HTTP + The subject present in the peer certificate used to establish the downstream TLS connection. + TCP + The subject present in the peer certificate used to establish the downstream TLS connection. + + diff --git a/docs/root/configuration/cluster_manager/cds.rst b/docs/root/configuration/cluster_manager/cds.rst index aa87dfee9121e..89f2dbcd4b186 100644 --- a/docs/root/configuration/cluster_manager/cds.rst +++ b/docs/root/configuration/cluster_manager/cds.rst @@ -29,4 +29,4 @@ CDS has a statistics tree rooted at *cluster_manager.cds.* with the following st update_failure, Counter, Total API fetches that failed because of network errors update_rejected, Counter, Total API fetches that failed because of schema/validation errors version, Gauge, Hash of the contents from the last successful API fetch - control_plane.connected_state, Gauge, A boolan (1 for connected and 0 for disconnected) that indicates the current connection state with management server + control_plane.connected_state, Gauge, A boolean (1 for connected and 0 for disconnected) that indicates the current connection state with management server diff --git a/docs/root/configuration/cluster_manager/cluster_circuit_breakers.rst b/docs/root/configuration/cluster_manager/cluster_circuit_breakers.rst index e8a14f30447e7..2e3b768b0f3c1 100644 --- a/docs/root/configuration/cluster_manager/cluster_circuit_breakers.rst +++ b/docs/root/configuration/cluster_manager/cluster_circuit_breakers.rst @@ -11,6 +11,6 @@ Runtime All circuit breaking settings are runtime configurable for all defined priorities based on cluster name. They follow the following naming scheme ``circuit_breakers...``. -``cluster_name`` is the name field in each cluster's configuration, which is set in the envoy +``cluster_name`` is the name field in each cluster's configuration, which is set in the Envoy :ref:`config file `. Available runtime settings will override -settings set in the envoy config file. +settings set in the Envoy config file. diff --git a/docs/root/configuration/cluster_manager/cluster_stats.rst b/docs/root/configuration/cluster_manager/cluster_stats.rst index 711f0cd7e28c0..b5b6554be7b63 100644 --- a/docs/root/configuration/cluster_manager/cluster_stats.rst +++ b/docs/root/configuration/cluster_manager/cluster_stats.rst @@ -56,6 +56,7 @@ Every cluster has a statistics tree rooted at *cluster..* with the followi upstream_cx_rx_bytes_buffered, Gauge, Received connection bytes currently buffered upstream_cx_tx_bytes_total, Counter, Total sent connection bytes upstream_cx_tx_bytes_buffered, Gauge, Send connection bytes currently buffered + upstream_cx_pool_overflow, Counter, Total times that the cluster's connection pool circuit breaker overflowed upstream_cx_protocol_error, Counter, Total connection protocol errors upstream_cx_max_requests, Counter, Total connections closed due to maximum requests upstream_cx_none_healthy, Counter, Total times connection not established due to no healthy hosts @@ -94,6 +95,8 @@ Every cluster has a statistics tree rooted at *cluster..* with the followi version, Gauge, Hash of the contents from the last successful API fetch max_host_weight, Gauge, Maximum weight of any host in the cluster bind_errors, Counter, Total errors binding the socket to the configured source address + assignment_timeout_received, Counter, Total assignments received with endpoint lease information. + assignment_stale, Counter, Number of times the received assignments went stale before new assignments arrived. Health check statistics ----------------------- @@ -149,9 +152,14 @@ Circuit breakers statistics will be rooted at *cluster..circuit_breakers.< :widths: 1, 1, 2 cx_open, Gauge, Whether the connection circuit breaker is closed (0) or open (1) + cx_pool_open, Gauge, Whether the connection pool circuit breaker is closed (0) or open (1) rq_pending_open, Gauge, Whether the pending requests circuit breaker is closed (0) or open (1) rq_open, Gauge, Whether the requests circuit breaker is closed (0) or open (1) rq_retry_open, Gauge, Whether the retry circuit breaker is closed (0) or open (1) + remaining_cx, Gauge, Number of remaining connections until the circuit breaker opens + remaining_pending, Gauge, Number of remaining pending requests until the circuit breaker opens + remaining_rq, Gauge, Number of remaining requests until the circuit breaker opens + remaining_retries, Gauge, Number of remaining retries until the circuit breaker opens .. _config_cluster_manager_cluster_stats_dynamic_http: @@ -235,7 +243,7 @@ the following statistics: Load balancer subset statistics ------------------------------- -Statistics for monitoring `load balancer subset ` +Statistics for monitoring :ref:`load balancer subset ` decisions. Stats are rooted at *cluster..* and contain the following statistics: .. csv-table:: @@ -247,3 +255,37 @@ decisions. Stats are rooted at *cluster..* and contain the following stati lb_subsets_removed, Counter, Number of subsets removed due to no hosts lb_subsets_selected, Counter, Number of times any subset was selected for load balancing lb_subsets_fallback, Counter, Number of times the fallback policy was invoked + lb_subsets_fallback_panic, Counter, Number of times the subset panic mode triggered + +.. _config_cluster_manager_cluster_stats_ring_hash_lb: + +Ring hash load balancer statistics +---------------------------------- + +Statistics for monitoring the size and effective distribution of hashes when using the +:ref:`ring hash load balancer `. Stats are rooted at +*cluster..ring_hash_lb.* and contain the following statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + size, Gauge, Total number of host hashes on the ring + min_hashes_per_host, Gauge, Minimum number of hashes for a single host + max_hashes_per_host, Gauge, Maximum number of hashes for a single host + +.. _config_cluster_manager_cluster_stats_maglev_lb: + +Maglev load balancer statistics +------------------------------- + +Statistics for monitoring effective host weights when using the +:ref:`Maglev load balancer `. Stats are rooted at +*cluster..maglev_lb.* and contain the following statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + min_entries_per_host, Gauge, Minimum number of entries for a single host + max_entries_per_host, Gauge, Maximum number of entries for a single host diff --git a/docs/root/configuration/configuration.rst b/docs/root/configuration/configuration.rst index fca889b67559a..3effeaa8e8554 100644 --- a/docs/root/configuration/configuration.rst +++ b/docs/root/configuration/configuration.rst @@ -14,6 +14,7 @@ Configuration reference http_conn_man/http_conn_man http_filters/http_filters thrift_filters/thrift_filters + dubbo_filters/dubbo_filters cluster_manager/cluster_manager health_checkers/health_checkers access_log diff --git a/docs/root/configuration/dubbo_filters/dubbo_filters.rst b/docs/root/configuration/dubbo_filters/dubbo_filters.rst new file mode 100644 index 0000000000000..2577324dd3554 --- /dev/null +++ b/docs/root/configuration/dubbo_filters/dubbo_filters.rst @@ -0,0 +1,11 @@ +.. _config_dubbo_filters: + +Dubbo filters +=============== + +Envoy has the following builtin Dubbo filters. + +.. toctree:: + :maxdepth: 2 + + router_filter diff --git a/docs/root/configuration/dubbo_filters/router_filter.rst b/docs/root/configuration/dubbo_filters/router_filter.rst new file mode 100644 index 0000000000000..f4393238d9836 --- /dev/null +++ b/docs/root/configuration/dubbo_filters/router_filter.rst @@ -0,0 +1,11 @@ +.. _config_dubbo_filters_router: + +Router +====== + +The router filter implements Dubbo forwarding. It will be used in almost all Dubbo proxying +scenarios. The filter's main job is to follow the instructions specified in the configured +:ref:`route table `. + +* :ref:`v2 API reference ` +* This filter should be configured with the name *envoy.router*. diff --git a/docs/root/configuration/health_checkers/redis.rst b/docs/root/configuration/health_checkers/redis.rst index 37af55665b53a..9cf820f9a9705 100644 --- a/docs/root/configuration/health_checkers/redis.rst +++ b/docs/root/configuration/health_checkers/redis.rst @@ -18,7 +18,8 @@ Redis health checker is shown below: custom_health_check: name: envoy.health_checkers.redis - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.redis_proxy.v2.RedisProxy key: foo * :ref:`v2 API reference ` \ No newline at end of file diff --git a/docs/root/configuration/http_conn_man/headers.rst b/docs/root/configuration/http_conn_man/headers.rst index 2c767a58f9ded..d48bac34152f6 100644 --- a/docs/root/configuration/http_conn_man/headers.rst +++ b/docs/root/configuration/http_conn_man/headers.rst @@ -131,7 +131,7 @@ should be replaced by backslash-double-quote (\"). The following keys are supported: 1. ``By`` The Subject Alternative Name (URI type) of the current proxy's certificate. -2. ``Hash`` The SHA 256 diguest of the current client certificate. +2. ``Hash`` The SHA 256 digest of the current client certificate. 3. ``Cert`` The entire client certificate in URL encoded PEM format. 4. ``Subject`` The Subject field of the current client certificate. The value is always double-quoted. 5. ``URI`` The URI type Subject Alternative Name field of the current client certificate. @@ -325,12 +325,6 @@ A few very important notes about XFF: Envoy will not consider it internal. This is a known "bug" due to the simplification of how XFF is parsed to determine if a request is internal. In this scenario, do not forward XFF and allow Envoy to generate a new one with a single internal origin IP. -3. Testing IPv6 in a large multi-hop system can be difficult from a change management perspective. - For testing IPv6 compatibility of upstream services which parse XFF header values, - :ref:`represent_ipv4_remote_address_as_ipv4_mapped_ipv6 ` - can be enabled in the v2 API. Envoy will append an IPv4 address in mapped IPv6 format, e.g. - ::FFFF:50.0.0.1. This change will also apply to - :ref:`config_http_conn_man_headers_x-envoy-external-address`. .. _config_http_conn_man_headers_x-forwarded-proto: diff --git a/docs/root/configuration/http_conn_man/runtime.rst b/docs/root/configuration/http_conn_man/runtime.rst index 9b5286bd02b68..dcc85412c6315 100644 --- a/docs/root/configuration/http_conn_man/runtime.rst +++ b/docs/root/configuration/http_conn_man/runtime.rst @@ -5,16 +5,13 @@ Runtime The HTTP connection manager supports the following runtime settings: -.. _config_http_conn_man_runtime_represent_ipv4_remote_address_as_ipv4_mapped_ipv6: - -http_connection_manager.represent_ipv4_remote_address_as_ipv4_mapped_ipv6 - % of requests with a remote address that will have their IPv4 address mapped to IPv6. Defaults to - 0. - :ref:`use_remote_address ` - must also be enabled. See - :ref:`represent_ipv4_remote_address_as_ipv4_mapped_ipv6 - ` - for more details. +.. _config_http_conn_man_runtime_normalize_path: + +http_connection_manager.normalize_path + % of requests that will have path normalization applied if not already configured in + :ref:`normalize_path `. + This is evaluated at configuration load time and will apply to all requests for a given + configuration. .. _config_http_conn_man_runtime_client_enabled: diff --git a/docs/root/configuration/http_conn_man/stats.rst b/docs/root/configuration/http_conn_man/stats.rst index 174fa99bdb105..0269fefc1d4bf 100644 --- a/docs/root/configuration/http_conn_man/stats.rst +++ b/docs/root/configuration/http_conn_man/stats.rst @@ -34,7 +34,7 @@ statistics: downstream_cx_tx_bytes_buffered, Gauge, Total sent bytes currently buffered downstream_cx_drain_close, Counter, Total connections closed due to draining downstream_cx_idle_timeout, Counter, Total connections closed due to idle timeout - downstream_cx_overload_disable_keepalive, Counter, Total connections for which HTTP 1.x keepalive has been disabled due to envoy overload + downstream_cx_overload_disable_keepalive, Counter, Total connections for which HTTP 1.x keepalive has been disabled due to Envoy overload downstream_flow_control_paused_reading_total, Counter, Total number of times reads were disabled due to flow control downstream_flow_control_resumed_reading_total, Counter, Total number of times reads were enabled on the connection due to flow control downstream_rq_total, Counter, Total requests @@ -56,7 +56,7 @@ statistics: downstream_rq_time, Histogram, Total time for request and response (milliseconds) downstream_rq_idle_timeout, Counter, Total requests closed due to idle timeout downstream_rq_timeout, Counter, Total requests closed due to a timeout on the request path - downstream_rq_overload_close, Counter, Total requests closed due to envoy overload + downstream_rq_overload_close, Counter, Total requests closed due to Envoy overload rs_too_large, Counter, Total response errors due to buffering an overly large body Per user agent statistics diff --git a/docs/root/configuration/http_filters/cors_filter.rst b/docs/root/configuration/http_filters/cors_filter.rst index 366a4025ac41b..46a7f52e3e092 100644 --- a/docs/root/configuration/http_filters/cors_filter.rst +++ b/docs/root/configuration/http_filters/cors_filter.rst @@ -22,7 +22,7 @@ filter_enabled The % of requests for which the filter is enabled. The default is 100/:ref:`HUNDRED `. - To utilize runtime to enabled/disable the CORS filter set the + To utilize runtime to enable/disable the CORS filter set the :ref:`runtime_key ` value of the :ref:`filter_enabled ` field. @@ -37,7 +37,7 @@ shadow_enabled If present, this will evaluate a request's *Origin* to determine if it's valid but will not enforce any policies. - To utilize runtime to enabled/disable the CORS filter's shadow mode set the + To utilize runtime to enable/disable the CORS filter's shadow mode set the :ref:`runtime_key ` value of the :ref:`shadow_enabled ` field. diff --git a/docs/root/configuration/http_filters/csrf_filter.rst b/docs/root/configuration/http_filters/csrf_filter.rst new file mode 100644 index 0000000000000..e6319bf733a15 --- /dev/null +++ b/docs/root/configuration/http_filters/csrf_filter.rst @@ -0,0 +1,94 @@ +.. _config_http_filters_csrf: + +CSRF +==== + +This is a filter which prevents Cross-Site Request Forgery based on a route or virtual host settings. +At it's simplest, CSRF is an attack that occurs when a malicious third-party +exploits a vulnerability that allows them to submit an undesired request on the +user's behalf. + +A real-life example is cited in section 1 of `Robust Defenses for Cross-Site Request Forgery `_: + + "For example, in late 2007 [42], Gmail had a CSRF vulnerability. When a Gmail user visited + a malicious site, the malicious site could generate a request to Gmail that Gmail treated + as part of its ongoing session with the victim. In November 2007, a web attacker exploited + this CSRF vulnerability to inject an email filter into David Airey’s Gmail account [1]." + +There are many ways to mitigate CSRF, some of which have been outlined in the +`OWASP Prevention Cheat Sheet `_. +This filter employs a stateless mitigation pattern known as origin verification. + +This pattern relies on two pieces of information used in determining if +a request originated from the same host. +* The origin that caused the user agent to issue the request (source origin). +* The origin that the request is going to (target origin). + +When the filter is evaluating a request, it ensures both pieces of information are present +and compares their values. If the source origin is missing or the origins do not match +the request is rejected. + + .. note:: + Due to differing functionality between browsers this filter will determine + a request's source origin from the Host header. If that is not present it will + fall back to the host and port value from the requests Referer header. + + +For more information on CSRF please refer to the pages below. + +* https://www.owasp.org/index.php/Cross-Site_Request_Forgery_%28CSRF%29 +* https://seclab.stanford.edu/websec/csrf/csrf.pdf +* :ref:`v2 API reference ` + + .. note:: + + This filter should be configured with the name *envoy.csrf*. + +.. _csrf-runtime: + +Runtime +------- + +The CSRF filter supports the following RuntimeFractionalPercent settings: + +filter_enabled + The % of requests for which the filter is enabled. The default is + 100/:ref:`HUNDRED `. + + To utilize runtime to enabled/disable the CSRF filter set the + :ref:`runtime_key ` + value of the :ref:`filter_enabled ` + field. + +shadow_enabled + The % of requests for which the filter is enabled in shadow only mode. Default is 0. + If present, this will evaluate a request's *Origin* and *Destination* to determine + if the request is valid but will not enforce any policies. + + To utilize runtime to enabled/disable the CSRF filter's shadow mode set the + :ref:`runtime_key ` + value of the :ref:`shadow_enabled ` + field. + +To determine if the filter and/or shadow mode are enabled you can check the runtime +values via the admin panel at :http:get:`/runtime`. + +.. note:: + + If both ``filter_enabled`` and ``shadow_enabled`` are on, the ``filter_enabled`` + flag will take precedence. + +.. _csrf-statistics: + +Statistics +---------- + +The CSRF filter outputs statistics in the .csrf.* namespace. + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + missing_source_origin, Counter, Number of requests that are missing a source origin header. + request_invalid, Counter, Number of requests whose source and target origins do not match. + request_valid, Counter, Number of requests whose source and target origins match. diff --git a/docs/root/configuration/http_filters/ext_authz_filter.rst b/docs/root/configuration/http_filters/ext_authz_filter.rst index 33578a30b48a0..d5dd4fcc3ee01 100644 --- a/docs/root/configuration/http_filters/ext_authz_filter.rst +++ b/docs/root/configuration/http_filters/ext_authz_filter.rst @@ -44,8 +44,15 @@ A sample filter configuration for a gRPC authorization server: - name: ext-authz type: static http2_protocol_options: {} - hosts: - - socket_address: { address: 127.0.0.1, port_value: 10003 } + load_assignment: + cluster_name: ext-authz + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 10003 # This timeout controls the initial TCP handshake timeout - not the timeout for the # entire request. @@ -72,8 +79,15 @@ A sample filter configuration for a raw HTTP authorization server: connect_timeout: 0.25s type: logical_dns lb_policy: round_robin - hosts: - - socket_address: { address: 127.0.0.1, port_value: 10003 } + load_assignment: + cluster_name: ext-authz + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 10003 Per-Route Configuration ----------------------- diff --git a/docs/root/configuration/http_filters/fault_filter.rst b/docs/root/configuration/http_filters/fault_filter.rst index 9bb02fdd0b1de..90b11404b90bc 100644 --- a/docs/root/configuration/http_filters/fault_filter.rst +++ b/docs/root/configuration/http_filters/fault_filter.rst @@ -16,15 +16,6 @@ The scope of failures is restricted to those that are observable by an application communicating over the network. CPU and disk failures on the local host cannot be emulated. -Currently, the fault injection filter has the following limitations: - -* Abort codes are restricted to HTTP status codes only -* Delays are restricted to fixed duration. - -Future versions will include support for restricting faults to specific -routes, injecting *gRPC* and *HTTP/2* specific error codes and delay -durations based on distributions. - Configuration ------------- @@ -36,31 +27,96 @@ Configuration * :ref:`v2 API reference ` * This filter should be configured with the name *envoy.fault*. +.. _config_http_filters_fault_injection_http_header: + +Controlling fault injection via HTTP headers +-------------------------------------------- + +The fault filter has the capability to allow fault configuration to be specified by the caller. +This is useful in certain scenarios in which it is desired to allow the client to specify its own +fault configuration. The currently supported header controls are: + +* Request delay configuration via the *x-envoy-fault-delay-request* header. The header value + should be an integer that specifies the number of milliseconds to throttle the latency for. +* Response rate limit configuration via the *x-envoy-fault-throughput-response* header. The + header value should be an integer that specified the limit in KiB/s and must be > 0. + +.. attention:: + + Allowing header control is inherently dangerous if exposed to untrusted clients. In this case, + it is suggested to use the :ref:`max_active_faults + ` setting to limit the + maximum concurrent faults that can be active at any given time. + +The following is an example configuration that enables header control for both of the above +options: + +.. code-block:: yaml + + name: envoy.fault + config: + max_active_faults: 100 + delay: + header_delay: {} + percentage: + numerator: 100 + response_rate_limit: + header_limit: {} + percentage: + numerator: 100 + +.. _config_http_filters_fault_injection_runtime: + Runtime ------- The HTTP fault injection filter supports the following global runtime settings: +.. attention:: + + Some of the following runtime keys require the filter to be configured for the specific fault + type and some do not. Please consult the documentation for each key for more information. + fault.http.abort.abort_percent % of requests that will be aborted if the headers match. Defaults to the *abort_percent* specified in config. If the config does not contain an - *abort* block, then *abort_percent* defaults to 0. + *abort* block, then *abort_percent* defaults to 0. For historic reasons, this runtime key is + available regardless of whether the filter is :ref:`configured for abort + `. fault.http.abort.http_status HTTP status code that will be used as the of requests that will be aborted if the headers match. Defaults to the HTTP status code specified in the config. If the config does not contain an *abort* block, then - *http_status* defaults to 0. + *http_status* defaults to 0. For historic reasons, this runtime key is + available regardless of whether the filter is :ref:`configured for abort + `. fault.http.delay.fixed_delay_percent % of requests that will be delayed if the headers match. Defaults to the - *delay_percent* specified in the config or 0 otherwise. + *delay_percent* specified in the config or 0 otherwise. This runtime key is only available when + the filter is :ref:`configured for delay + `. fault.http.delay.fixed_duration_ms The delay duration in milliseconds. If not specified, the *fixed_duration_ms* specified in the config will be used. If this field is missing from both the runtime and the config, no delays will be - injected. + injected. This runtime key is only available when the filter is :ref:`configured for delay + `. + +fault.http.max_active_faults + The maximum number of active faults (of all types) that Envoy will will inject via the fault + filter. This can be used in cases where it is desired that faults are 100% injected, + but the user wants to avoid a situation in which too many unexpected concurrent faulting requests + cause resource constraint issues. If not specified, the :ref:`max_active_faults + ` setting will be used. + +fault.http.rate_limit.response_percent + % of requests which will have a response rate limit fault injected. Defaults to the value set in + the :ref:`percentage ` field. + This runtime key is only available when the filter is :ref:`configured for response rate limiting + `. *Note*, fault filter runtime settings for the specific downstream cluster override the default ones if present. The following are downstream specific @@ -76,6 +132,8 @@ Downstream cluster name is taken from header. If the following settings are not found in the runtime it defaults to the global runtime settings which defaults to the config settings. +.. _config_http_filters_fault_injection_stats: + Statistics ---------- @@ -89,5 +147,8 @@ owning HTTP connection manager. delays_injected, Counter, Total requests that were delayed aborts_injected, Counter, Total requests that were aborted + response_rl_injected, Counter, "Total requests that had a response rate limit selected for injection (actually injection may not occur due to disconnect, reset, no body, etc.)" + faults_overflow, Counter, Total number of faults that were not injected due to overflowing the :ref:`max_active_faults ` setting + active_faults, Gauge, Total number of faults active at the current time .delays_injected, Counter, Total delayed requests for the given downstream cluster .aborts_injected, Counter, Total aborted requests for the given downstream cluster diff --git a/docs/root/configuration/http_filters/grpc_http1_reverse_bridge_filter.rst b/docs/root/configuration/http_filters/grpc_http1_reverse_bridge_filter.rst index 04334a483ae80..1ef27594b6e9d 100644 --- a/docs/root/configuration/http_filters/grpc_http1_reverse_bridge_filter.rst +++ b/docs/root/configuration/http_filters/grpc_http1_reverse_bridge_filter.rst @@ -1,11 +1,11 @@ -.. _config_http_filters_grpc_reverse_bridge: +.. _config_http_filters_grpc_http1_reverse_bridge: gRPC HTTP/1.1 reverse bridge ============================ * gRPC :ref:`architecture overview ` * :ref:`v2 API reference ` -* This filter should be configured with the name *envoy.grpc_http1_reverse_bridge*. +* This filter should be configured with the name *envoy.filters.http.grpc_http1_reverse_bridge*. This is a filter that enables converting an incoming gRPC request into a HTTP/1.1 request to allow a server that does not understand HTTP/2 or gRPC semantics to handle the request. diff --git a/docs/root/configuration/http_filters/grpc_json_transcoder_filter.rst b/docs/root/configuration/http_filters/grpc_json_transcoder_filter.rst index 8a974319f7134..fed6cbef5e3ad 100644 --- a/docs/root/configuration/http_filters/grpc_json_transcoder_filter.rst +++ b/docs/root/configuration/http_filters/grpc_json_transcoder_filter.rst @@ -106,7 +106,8 @@ gRPC or RESTful JSON requests to localhost:51051. filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager stat_prefix: grpc_json codec_type: AUTO route_config: @@ -136,7 +137,16 @@ gRPC or RESTful JSON requests to localhost:51051. lb_policy: round_robin dns_lookup_family: V4_ONLY http2_protocol_options: {} - hosts: - - socket_address: - address: docker.for.mac.localhost - port_value: 50051 + load_assignment: + cluster_name: grpc + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + # WARNING: "docker.for.mac.localhost" has been deprecated from Docker v18.03.0. + # If you're running an older version of Docker, please use "docker.for.mac.localhost" instead. + # Reference: https://docs.docker.com/docker-for-mac/release-notes/#docker-community-edition-18030-ce-mac59-2018-03-26 + address: host.docker.internal + port_value: 50051 + diff --git a/docs/root/configuration/http_filters/gzip_filter.rst b/docs/root/configuration/http_filters/gzip_filter.rst index 18c473ac4a3e7..6d899fd6ae4ea 100644 --- a/docs/root/configuration/http_filters/gzip_filter.rst +++ b/docs/root/configuration/http_filters/gzip_filter.rst @@ -56,7 +56,7 @@ By *default* compression will be *skipped* when: - Neither *content-length* nor *transfer-encoding* headers are present in the response. - Response size is smaller than 30 bytes (only applicable when *transfer-encoding* - is not chuncked). + is not chunked). When compression is *applied*: diff --git a/docs/root/configuration/http_filters/http_filters.rst b/docs/root/configuration/http_filters/http_filters.rst index 6c2d38b8c81e7..5f0c00275d80b 100644 --- a/docs/root/configuration/http_filters/http_filters.rst +++ b/docs/root/configuration/http_filters/http_filters.rst @@ -8,6 +8,7 @@ HTTP filters buffer_filter cors_filter + csrf_filter dynamodb_filter ext_authz_filter fault_filter diff --git a/docs/root/configuration/http_filters/jwt_authn_filter.rst b/docs/root/configuration/http_filters/jwt_authn_filter.rst index 759eafbc68e13..878adea96a48d 100644 --- a/docs/root/configuration/http_filters/jwt_authn_filter.rst +++ b/docs/root/configuration/http_filters/jwt_authn_filter.rst @@ -80,10 +80,15 @@ Following cluster **example_jwks_cluster** is needed to fetch JWKS. cluster: name: example_jwks_cluster type: STRICT_DNS - hosts: - socket_address: - address: example.com - port_value: 80 + load_assignment: + cluster_name: example_jwks_cluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: example.com + port_value: 80 Inline JWKS config example diff --git a/docs/root/configuration/http_filters/lua_filter.rst b/docs/root/configuration/http_filters/lua_filter.rst index 912aa73f26c57..815e5aef96322 100644 --- a/docs/root/configuration/http_filters/lua_filter.rst +++ b/docs/root/configuration/http_filters/lua_filter.rst @@ -13,9 +13,9 @@ Overview -------- The HTTP Lua filter allows `Lua `_ scripts to be run during both the request -and response flows. `LuaJIT `_ is used as the runtime. Because of this, the +and response flows. `LuaJIT `_ is used as the runtime. Because of this, the supported Lua version is mostly 5.1 with some 5.2 features. See the `LuaJIT documentation -`_ for more details. +`_ for more details. The filter only supports loading Lua code in-line in the configuration. If local filesystem code is desired, a trivial in-line script can be used to load the rest of the code from the local @@ -74,7 +74,7 @@ more details on the supported API. -- Called on the response path. function envoy_on_response(response_handle) - -- Wait for the entire response body and a response header with the the body size. + -- Wait for the entire response body and a response header with the body size. response_handle:headers():add("response_body_size", response_handle:body():length()) -- Remove a response header named 'foo' response_handle:headers():remove("foo") diff --git a/docs/root/configuration/http_filters/router_filter.rst b/docs/root/configuration/http_filters/router_filter.rst index ac43e35c96a19..d33a974eaad51 100644 --- a/docs/root/configuration/http_filters/router_filter.rst +++ b/docs/root/configuration/http_filters/router_filter.rst @@ -28,21 +28,27 @@ x-envoy-max-retries ^^^^^^^^^^^^^^^^^^^ If a :ref:`route config retry policy ` or a :ref:`virtual host retry policy ` is in place, Envoy will default to retrying -one time unless explicitly specified. The number of retries can be explicitly set in either the virtual host retry config, -or the route retry config, or by using this header. If a retry policy is not configured and -:ref:`config_http_filters_router_x-envoy-retry-on` or :ref:`config_http_filters_router_x-envoy-retry-grpc-on` headers -are not specified, Envoy will not retry a failed request. +one time unless explicitly specified. The number of retries can be explicitly set in the virtual host retry config, +the route retry config, or by using this header. If this header is used, its value takes precedence over the number of +retries set in either retry policy. If a retry policy is not configured and :ref:`config_http_filters_router_x-envoy-retry-on` +or :ref:`config_http_filters_router_x-envoy-retry-grpc-on` headers are not specified, Envoy will not retry a failed request. A few notes on how Envoy does retries: * The route timeout (set via :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` or the :ref:`route configuration `) **includes** all retries. Thus if the request timeout is set to 3s, and the first request attempt takes 2.7s, the - retry (including backoff) has .3s to complete. This is by design to avoid an exponential + retry (including back-off) has .3s to complete. This is by design to avoid an exponential retry/timeout explosion. -* Envoy uses a fully jittered exponential backoff algorithm for retries with a base time of 25ms. - The first retry will be delayed randomly between 0-24ms, the 2nd between 0-74ms, the 3rd between - 0-174ms and so on. +* Envoy uses a fully jittered exponential back-off algorithm for retries with a default base + interval of 25ms. Given a base interval B and retry number N, the back-off for the retry is in + the range :math:`\big[0, (2^N-1)B\big)`. For example, given the default interval, the first retry + will be delayed randomly by 0-24ms, the 2nd by 0-74ms, the 3rd by 0-174ms, and so on. The + interval is capped at a maximum interval, which defaults to 10 times the base interval (250ms). + The default base interval (and therefore the maximum interval) can be manipulated by setting the + upstream.base_retry_backoff_ms runtime parameter. The back-off intervals can also be modified + by configuring the retry policy's + :ref:`retry back-off `. * If max retries is set both by header as well as in the route configuration, the maximum value is taken when determining the max retries to use for the request. @@ -156,7 +162,7 @@ x-envoy-retriable-status-codes Setting this header informs Envoy about what status codes should be considered retriable when used in conjunction with the :ref:`retriable-status-code ` retry policy. When the corresponding retry policy is set, the list of retriable status codes will be considered retriable -in addition to the status codes enabled for retry through other retry policies. +in addition to the status codes enabled for retry through other retry policies. The list is a comma delimited list of integers: "409" would cause 409 to be considered retriable, while "504,409" would consider both 504 and 409 retriable. @@ -239,7 +245,7 @@ x-envoy-ratelimited If this header is set by upstream, Envoy will not retry. Currently the value of the header is not looked at, only its presence. This header is set by :ref:`rate limit filter` -when the request is rate limited. +when the request is rate limited. .. _config_http_filters_router_x-envoy-decorator-operation: @@ -330,6 +336,7 @@ owning HTTP connection manager. rq_redirect, Counter, Total requests that resulted in a redirect response rq_direct_response, Counter, Total requests that resulted in a direct response rq_total, Counter, Total routed requests + rq_reset_after_downstream_response_started, Counter, Total requests that were reset after downstream response had started. Virtual cluster statistics are output in the *vhost..vcluster..* namespace and include the following @@ -349,8 +356,9 @@ Runtime The router filter supports the following runtime settings: upstream.base_retry_backoff_ms - Base exponential retry back off time. See :ref:`here ` for more - information. Defaults to 25ms. + Base exponential retry back-off time. See :ref:`here ` and + :ref:`config_http_filters_router_x-envoy-max-retries` for more information. Defaults to 25ms. + The default maximum retry back-off time is 10 times this value. .. _config_http_filters_router_runtime_maintenance_mode: diff --git a/docs/root/configuration/http_filters/tap_filter.rst b/docs/root/configuration/http_filters/tap_filter.rst index 6982a3384a952..4adcf5f9375d3 100644 --- a/docs/root/configuration/http_filters/tap_filter.rst +++ b/docs/root/configuration/http_filters/tap_filter.rst @@ -121,6 +121,44 @@ Another example POST body: The preceding configuration instructs the tap filter to match any HTTP requests. All requests will be tapped and streamed out the admin endpoint. +Output format +------------- + +Each output sink has an associated :ref:`format +`. The default format is +:ref:`JSON_BODY_AS_BYTES +`. This format is +easy to read JSON, but has the downside that body data is base64 encoded. In the case that the tap +is known to be on human readable data, the :ref:`JSON_BODY_AS_STRING +` format may be +more user friendly. See the reference documentation for more information on other available formats. + +An example of a streaming admin tap configuration that uses the :ref:`JSON_BODY_AS_STRING +` format: + +.. code-block:: yaml + + config_id: test_config_id + tap_config: + match_config: + any_match: true + output_config: + sinks: + - format: JSON_BODY_AS_STRING + streaming_admin: {} + +Buffered body limits +-------------------- + +For buffered taps, Envoy will limit the amount of body data that is tapped to avoid OOM situations. +The default limit is 1KiB for both received (request) and transmitted (response) data. This is +configurable via the :ref:`max_buffered_rx_bytes +` and +:ref:`max_buffered_tx_bytes +` settings. + +.. _config_http_filters_tap_streaming: + Streaming matching ------------------ @@ -129,11 +167,64 @@ the request/response sequence, the filter will match incrementally as the reques first the request headers will be matched, then the request body if present, then the request trailers if present, then the response headers if present, etc. -In the future, the filter will support streaming output. Currently only :ref:`fully buffered output -` is implemented. However, even in the current -implementation, if a tap is configured to match request headers and the request headers match, -even if there is no response (upstream failure, etc.) the request will still be tapped and sent -to the configured output. +The filter additionally supports optional streamed output which is governed by the :ref:`streaming +` setting. If this setting is false +(the default), Envoy will emit :ref:`fully buffered traces +`. Users are likely to find this format easier +to interact with for simple cases. + +In cases where fully buffered traces are not practical (e.g., very large request and responses, +long lived streaming APIs, etc.), the streaming setting can be set to true, and Envoy will emit +multiple :ref:`streamed trace segments ` for +each tap. In this case, it is required that post-processing is performed to stitch all of the trace +segments back together into a usable form. Also note that binary protobuf is not a self-delimiting +format. If binary protobuf output is desired, the :ref:`PROTO_BINARY_LENGTH_DELIMITED +` output +format should be used. + +An static filter configuration to enable streaming output looks like: + +.. code-block:: yaml + + name: envoy.filters.http.tap + config: + common_config: + static_config: + match_config: + http_response_headers_match: + headers: + - name: bar + exact_match: baz + output_config: + streaming: true + sinks: + - format: PROTO_BINARY_LENGTH_DELIMITED + file_per_tap: + path_prefix: /tmp/ + +The previous configuration will match response headers, and as such will buffer request headers, +body, and trailers until a match can be determined (buffered data limits still apply as described +in the previous section). If a match is determined, buffered data will be flushed in individual +trace segments and then the rest of the tap will be streamed as data arrives. The messages output +might look like this: + +.. code-block:: yaml + + http_streamed_trace_segment: + trace_id: 1 + request_headers: + headers: + - key: a + value: b + +.. code-block:: yaml + + http_streamed_trace_segment: + trace_id: 1 + request_body_chunk: + as_bytes: aGVsbG8= + +Etc. Statistics ---------- diff --git a/docs/root/configuration/listener_filters/proxy_protocol.rst b/docs/root/configuration/listener_filters/proxy_protocol.rst index bd6729655b716..bfc59cf3ae974 100644 --- a/docs/root/configuration/listener_filters/proxy_protocol.rst +++ b/docs/root/configuration/listener_filters/proxy_protocol.rst @@ -4,7 +4,7 @@ Proxy Protocol ============== This listener filter adds support for -`HAProxy Proxy Protocol `_. +`HAProxy Proxy Protocol `_. In this mode, the upstream connection is assumed to come from a proxy which places the original coordinates (IP, PORT) into a connection-string. diff --git a/docs/root/configuration/listener_filters/tls_inspector.rst b/docs/root/configuration/listener_filters/tls_inspector.rst index af949c0f10fef..0f3c3c655d617 100644 --- a/docs/root/configuration/listener_filters/tls_inspector.rst +++ b/docs/root/configuration/listener_filters/tls_inspector.rst @@ -27,7 +27,7 @@ A sample filter configuration could be: listener_filters: - name: "envoy.listener.tls_inspector" - config: {} + typed_config: {} Statistics ---------- diff --git a/docs/root/configuration/listeners/lds.rst b/docs/root/configuration/listeners/lds.rst index ab1587ee9329a..a90fe84f6f11e 100644 --- a/docs/root/configuration/listeners/lds.rst +++ b/docs/root/configuration/listeners/lds.rst @@ -48,4 +48,4 @@ LDS has a statistics tree rooted at *listener_manager.lds.* with the following s update_failure, Counter, Total API fetches that failed because of network errors update_rejected, Counter, Total API fetches that failed because of schema/validation errors version, Gauge, Hash of the contents from the last successful API fetch - control_plane.connected_state, Gauge, A boolan (1 for connected and 0 for disconnected) that indicates the current connection state with management server + control_plane.connected_state, Gauge, A boolean (1 for connected and 0 for disconnected) that indicates the current connection state with management server diff --git a/docs/root/configuration/network_filters/dubbo_proxy_filter.rst b/docs/root/configuration/network_filters/dubbo_proxy_filter.rst new file mode 100644 index 0000000000000..503dd6970a9b6 --- /dev/null +++ b/docs/root/configuration/network_filters/dubbo_proxy_filter.rst @@ -0,0 +1,82 @@ +.. _config_network_filters_dubbo_proxy: + +Dubbo proxy +============ + +The dubbo proxy filter decodes the RPC protocol between dubbo clients +and servers. the decoded RPC information is converted to metadata. +the metadata includes the basic request ID, request type, serialization type, +and the required service name, method name, parameter name, +and parameter value for routing. + +* :ref:`v2 API reference ` +* This filter should be configured with the name *envoy.filters.network.dubbo_proxy*. + +.. _config_network_filters_dubbo_proxy_stats: + +Statistics +---------- + +Every configured dubbo proxy filter has statistics rooted at *redis..* with the +following statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + request, Counter, Total requests + request_twoway, Counter, Total twoway requests + request_oneway, Counter, Total oneway requests + request_event, Counter, Total event requests + request_decoding_error, Counter, Total decoding error requests + request_decoding_success, Counter, Total decoding success requests + request_active, Gauge, Total active requests + response, Counter, Total responses + response_success, Counter, Total success responses + response_error, Counter, Total responses that protocol parse error + response_error_caused_connection_close, Counter, Total responses that caused by the downstream connection close + response_business_exception, Counter, Total responses that the protocol contains exception information returned by the business layer + response_decoding_error, Counter, Total decoding error responses + response_decoding_success, Counter, Total decoding success responses + response_error, Counter, Total responses that protocol parse error + local_response_success, Counter, Total local responses + local_response_error, Counter, Total local responses that encoding error + local_response_business_exception, Counter, Total local responses that the protocol contains business exception + cx_destroy_local_with_active_rq, Counter, Connections destroyed locally with an active query + cx_destroy_remote_with_active_rq, Counter, Connections destroyed remotely with an active query + + +Implement custom filter based on the dubbo proxy filter +-------------------------------------------------------- + +If you want to implement a custom filter based on the dubbo protocol, +the dubbo proxy filter like HTTP also provides a very convenient way to expand, +the first step is to implement the DecoderFilter interface, and give the filter named, such as testFilter, +the second step is to add your configuration, configuration method refer to the following sample + +.. code-block:: yaml + + filter_chains: + - filters: + - name: envoy.filters.network.dubbo_proxy + config: + stat_prefix: dubbo_incomming_stats + protocol_type: Dubbo + serialization_type: Hessian2 + route_config: + name: local_route + interface: org.apache.dubbo.demo.DemoService + routes: + - match: + method: + name: + exact: sayHello + route: + cluster: user_service_dubbo_server + dubbo_filters: + - name: envoy.filters.dubbo.testFilter + config: + "@type": type.googleapis.com/google.protobuf.Struct + value: + name: test_service + - name: envoy.filters.dubbo.router \ No newline at end of file diff --git a/docs/root/configuration/network_filters/ext_authz_filter.rst b/docs/root/configuration/network_filters/ext_authz_filter.rst index bdff69f727311..c0702ae02671e 100644 --- a/docs/root/configuration/network_filters/ext_authz_filter.rst +++ b/docs/root/configuration/network_filters/ext_authz_filter.rst @@ -32,17 +32,25 @@ A sample filter configuration could be: filters: - name: envoy.ext_authz - stat_prefix: ext_authz - grpc_service: - envoy_grpc: - cluster_name: ext-authz + config: + stat_prefix: ext_authz + grpc_service: + envoy_grpc: + cluster_name: ext-authz clusters: - name: ext-authz type: static http2_protocol_options: {} - hosts: - - socket_address: { address: 127.0.0.1, port_value: 10003 } + load_assignment: + cluster_name: ext-authz + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 10003 Statistics ---------- diff --git a/docs/root/configuration/network_filters/mysql_proxy_filter.rst b/docs/root/configuration/network_filters/mysql_proxy_filter.rst index 6d4731d4b00c0..2890538978161 100644 --- a/docs/root/configuration/network_filters/mysql_proxy_filter.rst +++ b/docs/root/configuration/network_filters/mysql_proxy_filter.rst @@ -15,6 +15,11 @@ as operations performed on each table. development. Capabilities will be expanded over time and the configuration structures are likely to change. +.. warning:: + + The mysql_proxy filter was tested with MySQL v5.5. The filter may not work + with other versions of MySQL due to differences in the protocol implementation. + .. _config_network_filters_mysql_proxy_config: Configuration @@ -28,10 +33,12 @@ in the configuration snippet below: filter_chains: - filters: - name: envoy.filters.network.mysql_proxy - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.mysql_proxy.v1alpha1.MySQLProxy stat_prefix: mysql - name: envoy.tcp_proxy - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy stat_prefix: tcp cluster: ... @@ -88,10 +95,12 @@ _catalog_ table in the _productdb_ database. filter_chains: - filters: - name: envoy.filters.network.mysql_proxy - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.mysql_proxy.v1alpha1.MySQLProxy stat_prefix: mysql - name: envoy.filters.network.rbac - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.rbac.v2.RBAC stat_prefix: rbac rules: action: DENY @@ -110,6 +119,7 @@ _catalog_ table in the _productdb_ database. principals: - any: true - name: envoy.tcp_proxy - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy stat_prefix: tcp cluster: mysql diff --git a/docs/root/configuration/network_filters/network_filters.rst b/docs/root/configuration/network_filters/network_filters.rst index dd559ddd66890..f43f474ac6547 100644 --- a/docs/root/configuration/network_filters/network_filters.rst +++ b/docs/root/configuration/network_filters/network_filters.rst @@ -10,6 +10,7 @@ filters. .. toctree:: :maxdepth: 2 + dubbo_proxy_filter client_ssl_auth_filter echo_filter ext_authz_filter @@ -21,3 +22,4 @@ filters. tcp_proxy_filter thrift_proxy_filter sni_cluster_filter + zookeeper_proxy_filter diff --git a/docs/root/configuration/network_filters/redis_proxy_filter.rst b/docs/root/configuration/network_filters/redis_proxy_filter.rst index a79da1a5faf2c..1dd12ee50ad5f 100644 --- a/docs/root/configuration/network_filters/redis_proxy_filter.rst +++ b/docs/root/configuration/network_filters/redis_proxy_filter.rst @@ -48,7 +48,8 @@ Per command statistics ---------------------- The Redis filter will gather statistics for commands in the -*redis..command..* namespace. +*redis..command..* namespace. By default latency stats are in milliseconds and can be +changed to microseconds by setting the configuration parameter :ref:`latency_in_micros ` to true. .. csv-table:: :header: Name, Type, Description diff --git a/docs/root/configuration/network_filters/zookeeper_proxy_filter.rst b/docs/root/configuration/network_filters/zookeeper_proxy_filter.rst new file mode 100644 index 0000000000000..cf8e1c9716a72 --- /dev/null +++ b/docs/root/configuration/network_filters/zookeeper_proxy_filter.rst @@ -0,0 +1,92 @@ +.. _config_network_filters_zookeeper_proxy: + +ZooKeeper proxy +=============== + +The ZooKeeper proxy filter decodes the client protocol for +`Apache ZooKeeper `_. It decodes the requests, +responses and events in the payload. Most opcodes known in +`ZooKeeper 3.5 `_ +are supported. The unsupported ones are related to SASL authentication. + +.. attention:: + + The zookeeper_proxy filter is experimental and is currently under active + development. Capabilities will be expanded over time and the + configuration structures are likely to change. + +.. _config_network_filters_zookeeper_proxy_config: + +Configuration +------------- + +The ZooKeeper proxy filter should be chained with the TCP proxy filter as shown +in the configuration snippet below: + +.. code-block:: yaml + + filter_chains: + - filters: + - name: envoy.filters.network.zookeeper_proxy + config: + stat_prefix: zookeeper + - name: envoy.tcp_proxy + config: + stat_prefix: tcp + cluster: ... + + +.. _config_network_filters_zookeeper_proxy_stats: + +Statistics +---------- + +Every configured ZooKeeper proxy filter has statistics rooted at *zookeeper..* with the +following statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + decoder_error, Counter, Number of times a message wasn't decoded + request_bytes, Counter, Number of bytes in decoded request messages + connect_rq, Counter, Number of regular connect (non-readonly) requests + connect_readonly_rq, Counter, Number of connect requests with the readonly flag set + ping_rq, Counter, Number of ping requests + auth._rq, Counter, Number of auth requests for a given type + getdata_rq, Counter, Number of getdata requests + create_rq, Counter, Number of create requests + create2_rq, Counter, Number of create2 requests + setdata_rq, Counter, Number of setdata requests + getchildren_rq, Counter, Number of getchildren requests + getchildren2_rq, Counter, Number of getchildren2 requests + remove_rq, Counter, Number of delete requests + exists_rq, Counter, Number of stat requests + getacl_rq, Counter, Number of getacl requests + setacl_rq, Counter, Number of setacl requests + sync_rq, Counter, Number of sync requests + multi_rq, Counter, Number of multi transaction requests + reconfig_rq, Counter, Number of reconfig requests + close_rq, Counter, Number of close requests + setwatches_rq, Counter, Number of setwatches requests + checkwatches_rq, Counter, Number of checkwatches requests + removewatches_rq, Counter, Number of removewatches requests + check_rq, Counter, Number of check requests + +.. _config_network_filters_zookeeper_proxy_dynamic_metadata: + +Dynamic Metadata +---------------- + +The ZooKeeper filter emits the following dynamic metadata for each message parsed: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + , string, "The path associated with the request, response or event" + , string, "The opname for the request, response or event" + , string, "The string representation of the flags applied to the znode" + , string, "The size of the request message in bytes" + , string, "True if a watch is being set, false otherwise" + , string, "The version parameter, if any, given with the request" diff --git a/docs/root/configuration/overload_manager/overload_manager.rst b/docs/root/configuration/overload_manager/overload_manager.rst index ea2e0f42b71d0..06d298a8f22ac 100644 --- a/docs/root/configuration/overload_manager/overload_manager.rst +++ b/docs/root/configuration/overload_manager/overload_manager.rst @@ -51,6 +51,7 @@ The following overload actions are supported: envoy.overload_actions.stop_accepting_requests, Envoy will immediately respond with a 503 response code to new requests envoy.overload_actions.disable_http_keepalive, Envoy will disable keepalive on HTTP/1.x responses envoy.overload_actions.stop_accepting_connections, Envoy will stop accepting new network connections on its configured listeners + envoy.overload_actions.shrink_heap, Envoy will periodically try to shrink the heap by releasing free memory to the system Statistics ---------- diff --git a/docs/root/configuration/overview/v2_overview.rst b/docs/root/configuration/overview/v2_overview.rst index f7036bc691b3b..30b6066a98206 100644 --- a/docs/root/configuration/overview/v2_overview.rst +++ b/docs/root/configuration/overview/v2_overview.rst @@ -8,19 +8,19 @@ The Envoy v2 APIs are defined as `proto3 `_ in the `data plane API repository `_. They support -* Streaming delivery of `xDS `_ - API updates via gRPC. This reduces resource requirements and can lower the update latency. +* Streaming delivery of :repo:`xDS ` API updates via gRPC. This reduces + resource requirements and can lower the update latency. * A new REST-JSON API in which the JSON/YAML formats are derived mechanically via the `proto3 canonical JSON mapping `_. * Delivery of updates via the filesystem, REST-JSON or gRPC endpoints. * Advanced load balancing through an extended endpoint assignment API and load and resource utilization reporting to management servers. -* `Stronger consistency and ordering properties - `_ +* :repo:`Stronger consistency and ordering properties + ` when needed. The v2 APIs still maintain a baseline eventual consistency model. -See the `xDS protocol description `_ for +See the :repo:`xDS protocol description ` for further details on aspects of v2 message exchange between Envoy and the management server. .. _config_overview_v2_bootstrap: @@ -41,7 +41,7 @@ where the extension reflects the underlying v2 config representation. The :ref:`Bootstrap ` message is the root of the configuration. A key concept in the :ref:`Bootstrap ` -message is the distinction between static and dynamic resouces. Resources such +message is the distinction between static and dynamic resources. Resources such as a :ref:`Listener ` or :ref:`Cluster ` may be supplied either statically in :ref:`static_resources ` or have @@ -75,7 +75,8 @@ A minimal fully static bootstrap config is provided below: filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager stat_prefix: ingress_http codec_type: AUTO route_config: @@ -109,7 +110,7 @@ Mostly static with dynamic EDS A bootstrap config that continues from the above example with :ref:`dynamic endpoint discovery ` via an :ref:`EDS` gRPC management server listening -on 127.0.0.3:5678 is provided below: +on 127.0.0.1:5678 is provided below: .. code-block:: yaml @@ -126,7 +127,8 @@ on 127.0.0.3:5678 is provided below: filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager stat_prefix: ingress_http codec_type: AUTO route_config: @@ -156,6 +158,10 @@ on 127.0.0.3:5678 is provided below: type: STATIC lb_policy: ROUND_ROBIN http2_protocol_options: {} + upstream_connection_options: + # configure a TCP keep-alive to detect and reconnect to the admin + # server in the event of a TCP socket half open connection + tcp_keepalive: {} load_assignment: cluster_name: xds_cluster endpoints: @@ -170,6 +176,10 @@ Notice above that *xds_cluster* is defined to point Envoy at the management serv an otherwise completely dynamic configurations, some static resources need to be defined to point Envoy at its xDS management server(s). +It's important to set appropriate :ref:`TCP Keep-Alive options ` +in the `tcp_keepalive` block. This will help detect TCP half open connections to the xDS management +server and re-establish a full connection. + In the above example, the EDS management server could then return a proto encoding of a :ref:`DiscoveryResponse `: @@ -189,8 +199,8 @@ In the above example, the EDS management server could then return a proto encodi The versioning and type URL scheme that appear above are explained in more -detail in the `streaming gRPC subscription protocol -`_ +detail in the :repo:`streaming gRPC subscription protocol +` documentation. Dynamic @@ -228,6 +238,10 @@ below: type: STATIC lb_policy: ROUND_ROBIN http2_protocol_options: {} + upstream_connection_options: + # configure a TCP keep-alive to detect and reconnect to the admin + # server in the event of a TCP socket half open connection + tcp_keepalive: {} load_assignment: cluster_name: xds_cluster endpoints: @@ -253,7 +267,8 @@ The management server could respond to LDS requests with: filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager stat_prefix: ingress_http codec_type: AUTO rds: @@ -317,17 +332,6 @@ The management server could respond to EDS requests with: address: 127.0.0.2 port_value: 1234 -Upgrading from v1 configuration -------------------------------- - -While new v2 bootstrap JSON/YAML can be written, it might be expedient to upgrade an existing -v1 JSON/YAML configuration to v2. To do this (in an Envoy source tree), -you can run: - -.. code-block:: console - - bazel run //tools:v1_to_bootstrap - .. _config_overview_v2_management_server: Management server @@ -337,7 +341,7 @@ A v2 xDS management server will implement the below endpoints as required for gRPC and/or REST serving. In both streaming gRPC and REST-JSON cases, a :ref:`DiscoveryRequest ` is sent and a :ref:`DiscoveryResponse ` received following the -`xDS protocol `_. +:repo:`xDS protocol `. .. _v2_grpc_streaming_endpoints: @@ -346,9 +350,8 @@ gRPC streaming endpoints .. http:post:: /envoy.api.v2.ClusterDiscoveryService/StreamClusters -See `cds.proto -`_ -for the service definition. This is used by Envoy as a client when +See :repo:`cds.proto ` for the service definition. This is used by Envoy +as a client when .. code-block:: yaml @@ -365,8 +368,8 @@ is set in the :ref:`dynamic_resources .. http:post:: /envoy.api.v2.EndpointDiscoveryService/StreamEndpoints -See `eds.proto -`_ +See :repo:`eds.proto +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -384,8 +387,8 @@ is set in the :ref:`eds_cluster_config .. http:post:: /envoy.api.v2.ListenerDiscoveryService/StreamListeners -See `lds.proto -`_ +See :repo:`lds.proto +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -403,8 +406,8 @@ is set in the :ref:`dynamic_resources .. http:post:: /envoy.api.v2.RouteDiscoveryService/StreamRoutes -See `rds.proto -`_ +See :repo:`rds.proto +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -426,8 +429,8 @@ REST endpoints .. http:post:: /v2/discovery:clusters -See `cds.proto -`_ +See :repo:`cds.proto +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -443,8 +446,8 @@ is set in the :ref:`dynamic_resources .. http:post:: /v2/discovery:endpoints -See `eds.proto -`_ +See :repo:`eds.proto +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -460,8 +463,8 @@ is set in the :ref:`eds_cluster_config .. http:post:: /v2/discovery:listeners -See `lds.proto -`_ +See :repo:`lds.proto +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -477,8 +480,8 @@ is set in the :ref:`dynamic_resources .. http:post:: /v2/discovery:routes -See `rds.proto -`_ +See :repo:`rds.proto +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -507,7 +510,7 @@ the management of multiple streams and connections to distinct management servers. ADS will allow for hitless updates of configuration by appropriate sequencing. -For example, suppose *foo.com* was mappped to cluster *X*. We wish to change the +For example, suppose *foo.com* was mapped to cluster *X*. We wish to change the mapping in the route table to point *foo.com* at cluster *Y*. In order to do this, a CDS/EDS update must first be delivered containing both clusters *X* and *Y*. @@ -521,14 +524,14 @@ synchronization to correctly sequence the update. With ADS, the management server would deliver the CDS, EDS and then RDS updates on a single stream. ADS is only available for gRPC streaming (not REST) and is described more fully -in `this -`_ +in :repo:`this +` document. The gRPC endpoint is: -.. http:post:: /envoy.api.v2.AggregatedDiscoveryService/StreamAggregatedResources +.. http:post:: /envoy.service.discovery.v2.AggregatedDiscoveryService/StreamAggregatedResources -See `discovery.proto -`_ +See :repo:`discovery.proto +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -587,10 +590,10 @@ Management Server has a statistics tree rooted at *control_plane.* with the foll :header: Name, Type, Description :widths: 1, 1, 2 - connected_state, Gauge, A boolan (1 for connected and 0 for disconnected) that indicates the current connection state with management server + connected_state, Gauge, A boolean (1 for connected and 0 for disconnected) that indicates the current connection state with management server rate_limit_enforced, Counter, Total number of times rate limit was enforced for management server requests pending_requests, Gauge, Total number of pending requests when the rate limit was enforced - + .. _config_overview_v2_status: Status @@ -607,8 +610,8 @@ means that we will not break wire format compatibility. manner that does not break `backwards compatibility `_. Fields in the above protos may be later deprecated, subject to the -`breaking change policy -`_, +:repo:`breaking change policy +`, when their related functionality is no longer required. While frozen APIs have their wire format compatibility preserved, we reserve the right to change proto namespaces, file locations and nesting relationships, which may cause @@ -619,7 +622,7 @@ likely to be at least partially implemented in Envoy but may have wire format breaking changes made prior to freezing. Protos tagged *experimental*, have the same caveats as draft protos -and may have have major changes made prior to Envoy implementation and freezing. +and may have major changes made prior to Envoy implementation and freezing. The current open v2 API issues are tracked `here `_. diff --git a/docs/root/configuration/runtime.rst b/docs/root/configuration/runtime.rst index 262dafdd0d6bf..d367822214391 100644 --- a/docs/root/configuration/runtime.rst +++ b/docs/root/configuration/runtime.rst @@ -3,29 +3,67 @@ Runtime ======= -The :ref:`runtime configuration ` specifies the location of the local file -system tree that contains re-loadable configuration elements. Values can be viewed at the -:ref:`/runtime admin endpoint `. Values can be modified and -added at the :ref:`/runtime_modify admin endpoint `. If -runtime is not configured, an empty provider is used which has the effect of using all defaults -built into the code, except for any values added via `/runtime_modify`. +The :ref:`runtime configuration ` specifies a virtual file system tree that +contains re-loadable configuration elements. This virtual file system can be realized via a series +of local file system, static bootstrap configuration and admin console derived overlays. -.. attention:: +* :ref:`v2 API reference ` - Use the :ref:`/runtime_modify` endpoint with care. - Changes are effectively immediately. It is **critical** that the admin interface is :ref:`properly - secured `. +.. _config_virtual_filesystem: +Virtual file system +------------------- -* :ref:`v2 API reference ` +.. _config_runtime_layering: + +Layering +++++++++ + +The runtime can be viewed as virtual file system consisting of multiple layers: + +1. :ref:`Static bootstrap configuration ` +2. :ref:`Local disk file system ` +3. :ref:`Local disk file system *override_subdirectory* ` +4. :ref:`Admin console overrides ` + +with values in higher layers overriding corresponding values in lower layers. + +.. _config_runtime_file_system: File system layout ------------------- +++++++++++++++++++ Various sections of the configuration guide describe the runtime settings that are available. For example, :ref:`here ` are the runtime settings for upstream clusters. +Each '.' in a runtime key indicates a new directory in the hierarchy, +The terminal portion of a path is the file. The contents of the file constitute the runtime value. +When reading numeric values from a file, spaces and new lines will be ignored. + +*numerator* or *denominator* are reserved keywords and may not appear in any directory. + +.. _config_runtime_bootstrap: + +Static bootstrap +++++++++++++++++ + +A static base runtime may be specified in the :ref:`bootstrap configuration +` via a :ref:`protobuf JSON representation +`. + +.. _config_runtime_local_disk: + +Local disk file system +++++++++++++++++++++++ + +When the :ref:`runtime virtual file system ` is realized on a local +disk, it is rooted at *symlink_root* + +*subdirectory*. For example, the *health_check.min_interval* key would have the following full +file system path (using the symbolic link): + +``/srv/runtime/current/envoy/health_check/min_interval`` + Assume that the folder ``/srv/runtime/v1`` points to the actual file system path where global runtime configurations are stored. The following would be a typical configuration setting for runtime: @@ -36,15 +74,6 @@ runtime: Where ``/srv/runtime/current`` is a symbolic link to ``/srv/runtime/v1``. -Each '.' in a runtime key indicates a new directory in the hierarchy, rooted at *symlink_root* + -*subdirectory*. For example, the *health_check.min_interval* key would have the following full -file system path (using the symbolic link): - -``/srv/runtime/current/envoy/health_check/min_interval`` - -The terminal portion of a path is the file. The contents of the file constitute the runtime value. -When reading numeric values from a file, spaces and new lines will be ignored. - The *override_subdirectory* is used along with the :option:`--service-cluster` CLI option. Assume that :option:`--service-cluster` has been set to ``my-cluster``. Envoy will first look for the *health_check.min_interval* key in the following full file system path: @@ -54,6 +83,60 @@ that :option:`--service-cluster` has been set to ``my-cluster``. Envoy will firs If found, the value will override any value found in the primary lookup path. This allows the user to customize the runtime values for individual clusters on top of global defaults. +.. _config_runtime_symbolic_link_swap: + +Updating runtime values via symbolic link swap +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +There are two steps to update any runtime value. First, create a hard copy of the entire runtime +tree and update the desired runtime values. Second, atomically swap the symbolic link root from the +old tree to the new runtime tree, using the equivalent of the following command: + +.. code-block:: console + + /srv/runtime:~$ ln -s /srv/runtime/v2 new && mv -Tf new current + +It's beyond the scope of this document how the file system data is deployed, garbage collected, etc. + +.. _config_runtime_admin: + +Admin console ++++++++++++++ + +Values can be viewed at the +:ref:`/runtime admin endpoint `. Values can be modified and +added at the :ref:`/runtime_modify admin endpoint `. If +runtime is not configured, an empty provider is used which has the effect of using all defaults +built into the code, except for any values added via `/runtime_modify`. + +.. attention:: + + Use the :ref:`/runtime_modify` endpoint with care. + Changes are effectively immediately. It is **critical** that the admin interface is :ref:`properly + secured `. + +.. _config_runtime_proto_json: + +Protobuf and JSON representation +-------------------------------- + +The runtime :ref:`file system ` can be represented inside a proto3 +message as a `google.protobuf.Struct +`_ +modeling a JSON object with the following rules: + +* Dot separators map to tree edges. +* Scalar leaves (integer, strings, booleans) are represented with their respective JSON type. +* :ref:`FractionalPercent ` is represented with via its + `canonical JSON encoding `_. + +An example representation of a setting for the *health_check.min_interval* key in YAML is: + +.. code-block:: yaml + + health_check: + min_interval: 5 + .. _config_runtime_comments: Comments @@ -64,20 +147,31 @@ Lines starting with ``#`` as the first character are treated as comments. Comments can be used to provide context on an existing value. Comments are also useful in an otherwise empty file to keep a placeholder for deployment in a time of need. -.. _config_runtime_symbolic_link_swap: +Using runtime overrides for deprecated features +----------------------------------------------- -Updating runtime values via symbolic link swap ----------------------------------------------- +The Envoy runtime is also a part of the Envoy feature deprecation process. -There are two steps to update any runtime value. First, create a hard copy of the entire runtime -tree and update the desired runtime values. Second, atomically swap the symbolic link root from the -old tree to the new runtime tree, using the equivalent of the following command: +As described in the Envoy :repo:`breaking change policy `, +feature deprecation in Envoy is in 3 phases: warn-by-default, fail-by-default, and code removal. -.. code-block:: console +In the first phase, Envoy logs a warning to the warning log that the feature is deprecated and +increments the :ref:`deprecated_feature_use ` runtime stat. +Users are encouraged to go to :ref:`deprecated ` to see how to +migrate to the new code path and make sure it is suitable for their use case. - /srv/runtime:~$ ln -s /srv/runtime/v2 new && mv -Tf new current +In the second phase the message and filename will be added to +:repo:`runtime_features.cc ` +and use of that configuration field will cause the config to be rejected by default. +This fail-by-default mode can be overridden in runtime configuration by setting +envoy.deprecated_features.filename.proto:fieldname to true. For example, for a deprecated field +``Foo.Bar.Eep`` in ``baz.proto`` set ``envoy.deprecated_features.baz.proto:Eep`` to +``true``. Use of this override is **strongly discouraged**. +Fatal-by-default configuration indicates that the removal of the old code paths is imminent. It is +far better for both Envoy users and for Envoy contributors if any bugs or feature gaps with the new +code paths are flushed out ahead of time, rather than after the code is removed! -It's beyond the scope of this document how the file system data is deployed, garbage collected, etc. +.. _runtime_stats: Statistics ---------- @@ -92,4 +186,5 @@ The file system runtime provider emits some statistics in the *runtime.* namespa override_dir_not_exists, Counter, Total number of loads that did not use an override directory override_dir_exists, Counter, Total number of loads that did use an override directory load_success, Counter, Total number of load attempts that were successful + deprecated_feature_use, Counter, Total number of times deprecated features were used. num_keys, Gauge, Number of keys currently loaded diff --git a/docs/root/configuration/secret.rst b/docs/root/configuration/secret.rst index c1d8566ebf083..bf42233583fce 100644 --- a/docs/root/configuration/secret.rst +++ b/docs/root/configuration/secret.rst @@ -13,15 +13,15 @@ If a listener server certificate needs to be fetched by SDS remotely, it will NO Upstream clusters are handled in a similar way, if a cluster client certificate needs to be fetched by SDS remotely, it will NOT be marked as active and it will not be used before the certificates are fetched. If Envoy fails to fetch the certificates due to connection failures, or bad response data, the cluster will be marked as active, it can be used to handle the requests, but the requests routed to that cluster will be rejected. -If a static cluster is using SDS, and it needs to define a SDS cluster (unless Google gRPC is used which doens't need a cluster), the SDS cluster has to be defined before the static clusters using it. +If a static cluster is using SDS, and it needs to define a SDS cluster (unless Google gRPC is used which doesn't need a cluster), the SDS cluster has to be defined before the static clusters using it. -The connection bewteeen Envoy proxy and SDS server has to be secure. One option is to run the SDS server on the same host and use Unix Domain Socket for the connection. Otherwise it requires mTLS between the proxy and SDS server. In this case, the client certificates for the SDS connection must be statically configured. +The connection between Envoy proxy and SDS server has to be secure. One option is to run the SDS server on the same host and use Unix Domain Socket for the connection. Otherwise it requires mTLS between the proxy and SDS server. In this case, the client certificates for the SDS connection must be statically configured. SDS server ---------- -A SDS server needs to implement the gRPC service `SecretDiscoveryService `_. -It follows the same protocol as other `xDS `_ +A SDS server needs to implement the gRPC service :repo:`SecretDiscoveryService `. +It follows the same protocol as other :repo:`xDS `. SDS Configuration ----------------- @@ -59,8 +59,8 @@ This example show how to configure secrets in the static_resource: E0:F3:C8:CE:5E:2E:A3:05:F0:70:1F:F5:12:E3:6E:2E:97:92:82:84:A2:28:BC:F7:73:32:D3:39:30:A1:B6:FD clusters: - connect_timeout: 0.25s - hosts: - - name: local_service_tls + load_assignment: + cluster_name: local_service_tls ... tls_context: common_tls_context: @@ -89,10 +89,15 @@ This example shows how to configure secrets fetched from remote SDS servers: clusters: - name: sds_server_mtls http2_protocol_options: {} - hosts: - socket_address: - address: 127.0.0.1 - port_value: 8234 + load_assignment: + cluster_name: sds_server_mtls + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8234 tls_context: common_tls_context: - tls_certificate: @@ -102,13 +107,18 @@ This example shows how to configure secrets fetched from remote SDS servers: filename: certs/sds_key.pem - name: sds_server_uds http2_protocol_options: {} - hosts: - - pipe: - path: /tmp/uds_path + load_assignment: + cluster_name: sds_server_uds + endpoints: + - lb_endpoints: + - endpoint: + address: + pipe: + path: /tmp/uds_path - name: example_cluster connect_timeout: 0.25s - hosts: - - name: local_service_tls + load_assignment: + cluster_name: local_service_tls ... tls_context: common_tls_context: diff --git a/docs/root/configuration/statistics.rst b/docs/root/configuration/statistics.rst index cce87dce6cb4e..5829244ed82f9 100644 --- a/docs/root/configuration/statistics.rst +++ b/docs/root/configuration/statistics.rst @@ -3,14 +3,6 @@ Statistics ========== -A few statistics are emitted to report statistics system behavior: - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - stats.overflow, Counter, Total number of times Envoy cannot allocate a statistic due to a shortage of shared memory - Server ------ diff --git a/docs/root/configuration/tools/router_check.rst b/docs/root/configuration/tools/router_check.rst index e064da563af33..fbbf2e8dda0bb 100644 --- a/docs/root/configuration/tools/router_check.rst +++ b/docs/root/configuration/tools/router_check.rst @@ -137,7 +137,7 @@ input validate *(required, object)* The validate object specifies the returned route parameters to match. At least one - test parameter must be specificed. Use "" (empty string) to indicate that no return value is expected. + test parameter must be specified. Use "" (empty string) to indicate that no return value is expected. For example, to test that no cluster match is expected use {"cluster_name": ""}. cluster_name @@ -161,7 +161,7 @@ validate header_fields *(optional, array)* Match the listed header fields. Examples header fields include the ":path", "cookie", and "date" fields. The header fields are checked after all other test cases. Thus, the header fields checked - will be those of the redirected or rewriten routes when applicable. + will be those of the redirected or rewritten routes when applicable. field *(required, string)* The name of the header field to match. diff --git a/docs/root/configuration/well_known_dynamic_metadata.rst b/docs/root/configuration/well_known_dynamic_metadata.rst index dd11866a42a02..73215617e46db 100644 --- a/docs/root/configuration/well_known_dynamic_metadata.rst +++ b/docs/root/configuration/well_known_dynamic_metadata.rst @@ -17,3 +17,4 @@ The following Envoy filters emit dynamic metadata that other filters can leverag * :ref:`MySQL Proxy Filter ` * :ref:`Role Based Access Control (RBAC) Filter ` * :ref:`Role Based Access Control (RBAC) Network Filter ` +* :ref:`ZooKeeper Proxy Filter ` diff --git a/docs/root/extending/extending.rst b/docs/root/extending/extending.rst index 8a6f4e22ced36..a44e367851496 100644 --- a/docs/root/extending/extending.rst +++ b/docs/root/extending/extending.rst @@ -3,8 +3,24 @@ Extending Envoy for custom use cases ==================================== -The Envoy architecture makes it fairly easily extensible via both :ref:`network filters -` and :ref:`HTTP filters `. +The Envoy architecture makes it fairly easily extensible via a variety of differerent extension +types including: + +* :ref:`Access loggers ` +* :ref:`Clusters ` +* :ref:`Listener filters ` +* :ref:`Network filters ` +* :ref:`HTTP filters ` +* :ref:`gRPC credential providers ` +* :ref:`Health checkers ` +* :ref:`Resource monitors ` +* :ref:`Retry implementations ` +* :ref:`Stat sinks ` +* :ref:`Tracers ` +* Transport sockets + +As of this writing there is no high level extension developer documentation. The +:repo:`existing extensions ` are a good way to learn what is possible. An example of how to add a network filter and structure the repository and build dependencies can be found at `envoy-filter-example `_. diff --git a/docs/root/faq/configure_flow_control.rst b/docs/root/faq/configure_flow_control.rst new file mode 100644 index 0000000000000..f1a94ca2e4099 --- /dev/null +++ b/docs/root/faq/configure_flow_control.rst @@ -0,0 +1,65 @@ +How can I configure flow control +================================ + +Flow control may cause problems where Envoy is using non-streaming L7 filters, and request or +response bodies exceed the L7 buffer limits. For requests where the body must be buffered and +exceeds the configured limits, Envoy will serve a 413 to the user and increment the +:ref:`downstream_rq_too_large ` metric. On the response path if the +response body must be buffered and exceeds the limit, Envoy will increment the +:ref:`rs_too_large ` metric and either disconnect mid-response +(if headers have already been sent downstream) or send a 500 response. + +There are three knobs for configuring Envoy flow control: +:ref:`listener limits `, +:ref:`cluster limits ` and +:ref:`http2 stream limits ` + +The listener limits apply to how much raw data will be read per read() call from +downstream, as well as how much data may be buffered in userspace between Envoy +and downstream. + +The listener limits are also propogated to the HttpConnectionManager, and applied on a per-stream +basis to HTTP/1.1 L7 buffers described below. As such they limit the size of HTTP/1 requests and +response bodies that can be buffered. For HTTP/2, as many streams can be multiplexed over one TCP +connection, the L7 and L4 buffer limits can be tuned separately, and the configuration option +:ref:`http2 stream limits ` +is applied to all of the L7 buffers. Note that for both HTTP/1 and +HTTP/2 Envoy can and will proxy arbitrarily large bodies on routes where all L7 filters are +streaming, but many filters such as the transcoder or buffer filters require the full HTTP body to +be buffered, so limit the request and response size based on the listener limit. + +The cluster limits affect how much raw data will be read per read() call from upstream, as +well as how much data may be buffered in userspace between Envoy and upstream. + +The following code block shows how to adjust all three fields mentioned above, though generally +the only one which needs to be amended is the listener +:ref:`per_connection_buffer_limit_bytes ` + +.. code-block:: yaml + + staticResources: + listeners: + name: http + address: + socketAddress: + address: '::1' + portValue: 0 + filterChains: + filters: + name: envoy.http_connection_manager + config: + http2_protocol_options: + initial_stream_window_size: 65535 + route_config: {} + codec_type: HTTP2 + http_filters: [] + stat_prefix: config_test + perConnectionBufferLimitBytes: 1024 + clusters: + name: cluster_0 + connectTimeout: 5s + perConnectionBufferLimitBytes: 1024 + hosts: + socketAddress: + address: '::1' + portValue: 46685 diff --git a/docs/root/faq/overview.rst b/docs/root/faq/overview.rst index 7b10c6416117a..0fe96c0fd675b 100644 --- a/docs/root/faq/overview.rst +++ b/docs/root/faq/overview.rst @@ -14,4 +14,5 @@ FAQ lb_panic_threshold concurrency_lb disable_circuit_breaking + configure_flow_control transient_failures diff --git a/docs/root/faq/sni.rst b/docs/root/faq/sni.rst index cb36a587857c9..61ef2619db037 100644 --- a/docs/root/faq/sni.rst +++ b/docs/root/faq/sni.rst @@ -19,7 +19,7 @@ The following is a YAML example of the above requirement. socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: server_names: ["example.com", "www.example.com"] @@ -30,7 +30,8 @@ The following is a YAML example of the above requirement. private_key: { filename: "example_com_key.pem" } filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager stat_prefix: ingress_http route_config: virtual_hosts: @@ -48,7 +49,8 @@ The following is a YAML example of the above requirement. private_key: { filename: "api_example_com_key.pem" } filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager stat_prefix: ingress_http route_config: virtual_hosts: diff --git a/docs/root/index.rst b/docs/root/index.rst index 2e824f0c135a4..354b3578c8ed2 100644 --- a/docs/root/index.rst +++ b/docs/root/index.rst @@ -18,5 +18,5 @@ Envoy documentation configuration/configuration operations/operations extending/extending - api-v2/api + api/api faq/overview diff --git a/docs/root/install/building.rst b/docs/root/install/building.rst index c0c0e0084886b..143f236c6b3e2 100644 --- a/docs/root/install/building.rst +++ b/docs/root/install/building.rst @@ -6,7 +6,7 @@ Building The Envoy build system uses Bazel. In order to ease initial building and for a quick start, we provide an Ubuntu 16 based docker container that has everything needed inside of it to build -and *statically link* envoy, see :repo:`ci/README.md`. +and *statically link* Envoy, see :repo:`ci/README.md`. In order to build manually, follow the instructions at :repo:`bazel/README.md`. @@ -20,8 +20,7 @@ recent Linux including Ubuntu 16 LTS. Building Envoy has the following requirements: -* GCC 5+ (for C++14 support). -* These :repo:`pre-built ` third party dependencies. +* GCC 7+ or Clang/LLVM 7+ (for C++14 support). * These :repo:`Bazel native ` dependencies. Please see the linked :repo:`CI ` and :repo:`Bazel ` documentation @@ -32,8 +31,8 @@ for more information on performing manual builds. Pre-built binaries ------------------ -On every master commit we create a set of lightweight Docker images that contain the Envoy -binary. We also tag the docker images with release versions when we do official releases. +We build and tag Docker images with release versions when we do official releases. These images can +be found in the following repositories: * `envoyproxy/envoy `_: Release binary with symbols stripped on top of an Ubuntu Xenial base. @@ -42,6 +41,33 @@ binary. We also tag the docker images with release versions when we do official * `envoyproxy/envoy-alpine-debug `_: Release binary with debug symbols on top of a **glibc** alpine base. +In the above repositories, the *latest* tag points to the latest official release. + +.. note:: + + The above repositories used to contain the dev images described below. They remain to avoid + breaking existing users. New dev images are added to the repositories described in the following + section. + +On every master commit we additionally create a set of development Docker images. These images can +be found in the following repositories: + +* `envoyproxy/envoy-dev `_: Release binary with + symbols stripped on top of an Ubuntu Xenial base. +* `envoyproxy/envoy-alpine-dev `_: Release + binary with symbols stripped on top of a **glibc** alpine base. +* `envoyproxy/envoy-alpine-debug-dev `_: + Release binary with debug symbols on top of a **glibc** alpine base. + +In the above *dev* repositories, the *latest* tag points to the last Envoy SHA in master that passed +tests. + +.. note:: + + The Envoy project considers master to be release candidate quality at all times, and many + organizations track and deploy master in production. We encourage you to do the same so that + issues can be reported as early as possible in the development process. + We will consider producing additional binary types depending on community interest in helping with CI, packaging, etc. Please open an `issue `_ in GitHub if desired. @@ -57,6 +83,3 @@ Envoy binary, and putting the binary in an Ubuntu container. :maxdepth: 1 sandboxes/local_docker_build - - - diff --git a/docs/root/install/ref_configs.rst b/docs/root/install/ref_configs.rst index 4ae48c831f487..bc14ae225e242 100644 --- a/docs/root/install/ref_configs.rst +++ b/docs/root/install/ref_configs.rst @@ -46,8 +46,8 @@ A few notes about the example configurations: to be running at `discovery.yourcompany.net`. * DNS for `yourcompany.net` is assumed to be setup for various things. Search the configuration templates for different instances of this. -* Tracing is configured for `LightStep `_. To - disable this or enable `Zipkin `_ or `Datadog `_ tracing, delete or +* Tracing is configured for `LightStep `_. To + disable this or enable `Zipkin `_ or `Datadog `_ tracing, delete or change the :ref:`tracing configuration ` accordingly. * The configuration demonstrates the use of a :ref:`global rate limiting service `. To disable this delete the :ref:`rate limit configuration diff --git a/docs/root/install/sandboxes/local_docker_build.rst b/docs/root/install/sandboxes/local_docker_build.rst index 578c275a29dc0..d5dda8c03191a 100644 --- a/docs/root/install/sandboxes/local_docker_build.rst +++ b/docs/root/install/sandboxes/local_docker_build.rst @@ -19,7 +19,7 @@ That command will take some time to run because it is compiling an Envoy binary For more information on building and different build targets, please refer to :repo:`ci/README.md`. -**Step 2: Build image with only envoy binary** +**Step 2: Build image with only Envoy binary** In this step we'll build an image that only has the Envoy binary, and none of the software used to build it.:: diff --git a/docs/root/install/tools/schema_validator_check_tool.rst b/docs/root/install/tools/schema_validator_check_tool.rst index dc68b7f3990d9..9684962ec7851 100644 --- a/docs/root/install/tools/schema_validator_check_tool.rst +++ b/docs/root/install/tools/schema_validator_check_tool.rst @@ -3,24 +3,26 @@ Schema Validator check tool =========================== -The schema validator tool validates that the passed in JSON conforms to a schema in -the configuration. To validate the entire config, please refer to the +The schema validator tool validates that the passed in configuration conforms to +a given schema. The configuration may be JSON or YAML. To validate the entire +config, please refer to the :ref:`config load check tool`. Currently, only :ref:`route config` schema validation is supported. Input The tool expects two inputs: - 1. The schema type to check the passed in JSON against. The supported type is: + 1. The schema type to check the passed in configuration against. The supported type is: * `route` - for :ref:`route configuration` validation. - 2. The path to the JSON. + 2. The path to the configuration file. Output - If the JSON conforms to the schema, the tool will exit with status EXIT_SUCCESS. If the JSON does - not conform to the schema, an error message is outputted detailing what doesn't conform to the - schema. The tool will exit with status EXIT_FAILURE. + If the configuration conforms to the schema, the tool will exit with status + EXIT_SUCCESS. If the configuration does not conform to the schema, an error + message is outputted detailing what doesn't conform to the schema. The tool + will exit with status EXIT_FAILURE. Building The tool can be built locally using Bazel. :: @@ -30,4 +32,4 @@ Building Running The tool takes a path as described above. :: - bazel-bin/test/tools/schema_validator/schema_validator_tool --schema-type SCHEMA_TYPE --json-path PATH + bazel-bin/test/tools/schema_validator/schema_validator_tool --schema-type SCHEMA_TYPE --config-path PATH diff --git a/docs/root/intro/arch_overview/circuit_breaking.rst b/docs/root/intro/arch_overview/circuit_breaking.rst index a92bd622ee573..57dc097dba90d 100644 --- a/docs/root/intro/arch_overview/circuit_breaking.rst +++ b/docs/root/intro/arch_overview/circuit_breaking.rst @@ -9,6 +9,8 @@ mesh is that Envoy enforces circuit breaking limits at the network level as oppo configure and code each application independently. Envoy supports various types of fully distributed (not coordinated) circuit breaking: +.. _arch_overview_circuit_break_cluster_maximum_connections: + * **Cluster maximum connections**: The maximum number of connections that Envoy will establish to all hosts in an upstream cluster. In practice this is only applicable to HTTP/1.1 clusters since HTTP/2 uses a single connection to each host. If this circuit breaker overflows the :ref:`upstream_cx_overflow @@ -34,10 +36,29 @@ configure and code each application independently. Envoy supports various types :ref:`upstream_rq_retry_overflow ` counter for the cluster will increment. + .. _arch_overview_circuit_break_cluster_maximum_connection_pools: + +* **Cluster maximum concurrent connection pools**: The maximum number of connection pools that can be + concurrently instantiated. Some features, such as the + :ref:`Original Src Listener Filter `, can + create an unbounded number of connection pools. When a cluster has exhausted its concurrent + connection pools, it will attempt to reclaim an idle one. If it cannot, then the circuit breaker + will overflow. This differs from + :ref:`Cluster maximum connections ` in that + connection pools never time out, whereas connections typically will. Connections automatically + clean up; connection pools do not. Note that in order for a connection pool to function it needs + at least one upstream connection, so this value should likely be no greater than + :ref:`Cluster maximum connections `. + If this circuit breaker overflows the + :ref:`upstream_cx_pool_overflow ` counter for the cluster + will increment. + + Each circuit breaking limit is :ref:`configurable ` and tracked on a per upstream cluster and per priority basis. This allows different components of the distributed system to be tuned independently and have different limits. The live state of these -circuit breakers can be observed via :ref:`statistics `. +circuit breakers, including the number of resources remaining until a circuit breaker opens, can +be observed via :ref:`statistics `. Note that circuit breaking will cause the :ref:`x-envoy-overloaded ` header to be set by the router filter in the diff --git a/docs/root/intro/arch_overview/cluster_manager.rst b/docs/root/intro/arch_overview/cluster_manager.rst index 71739a4a302c8..8550d3a0655ba 100644 --- a/docs/root/intro/arch_overview/cluster_manager.rst +++ b/docs/root/intro/arch_overview/cluster_manager.rst @@ -25,6 +25,8 @@ distribution. * Cluster manager :ref:`configuration `. * CDS :ref:`configuration `. +.. _arch_overview_cluster_warming: + Cluster warming --------------- diff --git a/docs/root/intro/arch_overview/grpc.rst b/docs/root/intro/arch_overview/grpc.rst index 4b419395e36ed..1277ac6e983fd 100644 --- a/docs/root/intro/arch_overview/grpc.rst +++ b/docs/root/intro/arch_overview/grpc.rst @@ -3,7 +3,7 @@ gRPC ==== -`gRPC `_ is an RPC framework from Google. It uses protocol buffers as the +`gRPC `_ is an RPC framework from Google. It uses protocol buffers as the underlying serialization/IDL format. At the transport layer it uses HTTP/2 for request/response multiplexing. Envoy has first class support for gRPC both at the transport layer as well as at the application layer: @@ -31,8 +31,8 @@ Envoy supports two gRPC bridges: * :ref:`grpc_http1_bridge filter ` which allows gRPC requests to be sent to Envoy over HTTP/1.1. Envoy then translates the requests to HTTP/2 for transport to the target server. The response is translated back to HTTP/1.1. When installed, the bridge filter gathers per RPC statistics in addition to the standard array of global HTTP statistics. -* :ref:`grpc_http1_reverse_bridge filter ` which allows gRPC requests to be sent to Envoy and - then translated to HTTP/1.1 when sent to the upstream. The response is then converted back into gRPC when sent to the downstream. +* :ref:`grpc_http1_reverse_bridge filter ` which allows gRPC requests to be sent to Envoy + and then translated to HTTP/1.1 when sent to the upstream. The response is then converted back into gRPC when sent to the downstream. This filter can also optionally manage the gRPC frame header, allowing the upstream to not have to be gRPC aware at all. .. _arch_overview_grpc_services: @@ -40,9 +40,9 @@ Envoy supports two gRPC bridges: gRPC services ------------- -In addition to proxying gRPC on the data plane, Envoy make use of gRPC for its +In addition to proxying gRPC on the data plane, Envoy makes use of gRPC for its control plane, where it :ref:`fetches configuration from management server(s) -` and also in filters, for example for :ref:`rate limiting +` and in filters, such as for :ref:`rate limiting ` or authorization checks. We refer to these as *gRPC services*. diff --git a/docs/root/intro/arch_overview/health_checking.rst b/docs/root/intro/arch_overview/health_checking.rst index e039798c0163a..a79470fdc396d 100644 --- a/docs/root/intro/arch_overview/health_checking.rst +++ b/docs/root/intro/arch_overview/health_checking.rst @@ -11,9 +11,11 @@ even when using the other service discovery types. Envoy supports three differen checking along with various settings (check interval, failures required before marking a host unhealthy, successes required before marking a host healthy, etc.): -* **HTTP**: During HTTP health checking Envoy will send an HTTP request to the upstream host. It - expects a 200 response if the host is healthy. The upstream host can return 503 if it wants to - immediately notify downstream hosts to no longer forward traffic to it. +* **HTTP**: During HTTP health checking Envoy will send an HTTP request to the upstream host. By + default, it expects a 200 response if the host is healthy. Expected response codes are + :ref:`configurable `. The + upstream host can return 503 if it wants to immediately notify downstream hosts to no longer + forward traffic to it. * **L3/L4**: During L3/L4 health checking, Envoy will send a configurable byte buffer to the upstream host. It expects the byte buffer to be echoed in the response if the host is to be considered healthy. Envoy also supports connect only L3/L4 health checking. @@ -91,9 +93,10 @@ operation: Envoy will respond with a 200 or a 503 depending on the current draining state of the server. * **No pass through, computed from upstream cluster health**: In this mode, the health checking filter will return a 200 or a 503 depending on whether at least a :ref:`specified percentage - ` of the - servers are healthy in one or more upstream clusters. (If the Envoy server is in a draining - state, though, it will respond with a 503 regardless of the upstream cluster health.) + ` + of the servers are available (healthy + degraded) in one or more upstream clusters. (If the Envoy + server is in a draining state, though, it will respond with a 503 regardless of the upstream + cluster health.) * **Pass through**: In this mode, Envoy will pass every health check request to the local service. The service is expected to return a 200 or a 503 depending on its health state. * **Pass through with caching**: In this mode, Envoy will pass health check requests to the local diff --git a/docs/root/intro/arch_overview/http_connection_management.rst b/docs/root/intro/arch_overview/http_connection_management.rst index 713594d5d9645..68bdeacb33e20 100644 --- a/docs/root/intro/arch_overview/http_connection_management.rst +++ b/docs/root/intro/arch_overview/http_connection_management.rst @@ -48,31 +48,31 @@ table `. The route table can be specified in one of Retry plugin configuration -------------------------- -Normally during retries, hosts selection follows the same process as the original request. To modify -this behavior retry plugins can be used, which fall into two categories: +Normally during retries, host selection follows the same process as the original request. Retry plugins +can be used to modify this behavior, and they fall into two categories: * :ref:`Host Predicates `: - These predicates can be used to "reject" a host, which will cause host selection to be reattempted. - Any number of these predicates can be specified, and the host will be rejected if any of the predicates reject the host. + These predicates can be used to "reject" a host, which will cause host selection to be reattempted. + Any number of these predicates can be specified, and the host will be rejected if any of the predicates reject the host. Envoy supports the following built-in host predicates * *envoy.retry_host_predicates.previous_hosts*: This will keep track of previously attempted hosts, and rejects hosts that have already been attempted. - + * :ref:`Priority Predicates`: These predicates can be used to adjust the priority load used when selecting a priority for a retry attempt. Only one such predicate may be specified. Envoy supports the following built-in priority predicates - * *envoy.retry_priority.previous_priorities*: This will keep track of previously attempted priorities, + * *envoy.retry_priority.previous_priorities*: This will keep track of previously attempted priorities, and adjust the priority load such that other priorities will be targeted in subsequent retry attempts. Host selection will continue until either the configured predicates accept the host or a configurable -:ref:`max attempts ` has been reached. +:ref:`max attempts ` has been reached. -These plugins can be combined to affect both host selection and priority load. Envoy can also be extended +These plugins can be combined to affect both host selection and priority load. Envoy can also be extended with custom retry plugins similar to how custom filters can be added. @@ -104,7 +104,7 @@ To configure retries to attempt other priorities during retries, the built-in config: update_frequency: 2 -This will target priorites in subsequent retry attempts that haven't been already used. The ``update_frequency`` parameter decides how +This will target priorities in subsequent retry attempts that haven't been already used. The ``update_frequency`` parameter decides how often the priority load should be recalculated. These plugins can be combined, which will exclude both previously attempted hosts as well as @@ -152,7 +152,7 @@ upstream will be modified by: 2. Replacing the Authority/Host, Scheme, and Path headers with the values from the Location header. The altered request headers will then have a new route selected, be sent through a new filter chain, -and then shipped upstream with all of the normal Envoy request sanitization taking place. +and then shipped upstream with all of the normal Envoy request sanitization taking place. .. warning:: Note that HTTP connection manager sanitization such as clearing untrusted headers will only be diff --git a/docs/root/intro/arch_overview/init.rst b/docs/root/intro/arch_overview/init.rst index af0320a57f322..2e05c5a750564 100644 --- a/docs/root/intro/arch_overview/init.rst +++ b/docs/root/intro/arch_overview/init.rst @@ -1,3 +1,5 @@ +.. _arch_overview_initialization: + Initialization ============== @@ -22,3 +24,6 @@ accepting new connections. * After all of the previous steps have taken place, the listeners start accepting new connections. This flow ensures that during hot restart the new process is fully capable of accepting and processing new connections before the draining of the old process begins. + +All mentioned "waiting for one response" periods can be limited by setting corresponding +:ref:`initial_fetch_timeout `. diff --git a/docs/root/intro/arch_overview/ip_transparency.rst b/docs/root/intro/arch_overview/ip_transparency.rst index 7a270e940c726..48b442eca7dda 100644 --- a/docs/root/intro/arch_overview/ip_transparency.rst +++ b/docs/root/intro/arch_overview/ip_transparency.rst @@ -13,7 +13,7 @@ connection will be different from that of any proxied connections. Sometimes the upstream server or network may need to know the original IP address of the connection, called the *downstream remote address*, for many reasons. Some examples include: -* the the IP address being used to form part of an identity, +* the IP address being used to form part of an identity, * the IP address being used to enforce network policy, or * the IP address being included in an audit. diff --git a/docs/root/intro/arch_overview/load_balancing/load_balancers.rst b/docs/root/intro/arch_overview/load_balancing/load_balancers.rst index f2917b0a80757..df762656fea6a 100644 --- a/docs/root/intro/arch_overview/load_balancing/load_balancers.rst +++ b/docs/root/intro/arch_overview/load_balancing/load_balancers.rst @@ -34,7 +34,7 @@ weight greater than 1. * *all weights 1*: An O(1) algorithm which selects N random available hosts as specified in the :ref:`configuration ` (2 by default) and picks the host which has the fewest active requests (`Research - `_ has shown that this + `_ has shown that this approach is nearly as good as an O(N) full scan). This is also known as P2C (power of two choices). The P2C load balancer has the property that a host with the highest number of active requests in the cluster will never receive new requests. It will be allowed to drain until it is @@ -56,23 +56,30 @@ weight greater than 1. Ring hash ^^^^^^^^^ -The ring/modulo hash load balancer implements consistent hashing to upstream hosts. The algorithm is -based on mapping all hosts onto a circle such that the addition or removal of a host from the host -set changes only affect 1/N requests. This technique is also commonly known as `"ketama" -`_ hashing. A consistent hashing load balancer is only effective -when protocol routing is used that specifies a value to hash on. The minimum ring size governs the -replication factor for each host in the ring. For example, if the minimum ring size is 1024 and -there are 16 hosts, each host will be replicated 64 times. The ring hash load balancer does not -currently support weighting. +The ring/modulo hash load balancer implements consistent hashing to upstream hosts. Each host is +mapped onto a circle (the "ring") by hashing its address; each request is then routed to a host by +hashing some property of the request, and finding the nearest corresponding host clockwise around +the ring. This technique is also commonly known as `"Ketama" `_ +hashing, and like all hash-based load balancers, it is only effective when protocol routing is used +that specifies a value to hash on. + +Each host is hashed and placed on the ring some number of times proportional to its weight. For +example, if host A has a weight of 1 and host B has a weight of 2, then there might be three entries +on the ring: one for host A and two for host B. This doesn't actually provide the desired 2:1 +partitioning of the circle, however, since the computed hashes could be coincidentally very close to +one another; so it is necessary to multiply the number of hashes per host---for example inserting +100 entries on the ring for host A and 200 entries for host B---to better approximate the desired +distribution. Best practice is to explicitly set +:ref:`minimum_ring_size` and +:ref:`maximum_ring_size`, and monitor +the :ref:`min_hashes_per_host and max_hashes_per_host +gauges` to ensure good distribution. With the +ring partitioned appropriately, the addition or removal of one host from a set of N hosts will +affect only 1/N requests. When priority based load balancing is in use, the priority level is also chosen by hash, so the endpoint selected will still be consistent when the set of backends is stable. -.. note:: - - The ring hash load balancer does not support :ref:`locality weighted load - balancing `. - .. _arch_overview_load_balancing_types_maglev: Maglev @@ -86,6 +93,17 @@ any place in which consistent hashing is desired. Like the ring hash load balanc hashing load balancer is only effective when protocol routing is used that specifies a value to hash on. +The table construction algorithm places each host in the table some number of times proportional +to its weight, until the table is completely filled. For example, if host A has a weight of 1 and +host B has a weight of 2, then host A will have 21,846 entries and host B will have 43,691 entries +(totaling 65,537 entries). The algorithm attempts to place each host in the table at least once, +regardless of the configured host and locality weights, so in some extreme cases the actual +proportions may differ from the configured weights. For example, if the total number of hosts is +larger than the fixed table size, then some hosts will get 1 entry each and the rest will get 0, +regardless of weight. Best practice is to monitor the :ref:`min_entries_per_host and +max_entries_per_host gauges ` to ensure no hosts +are underrepresented or missing. + In general, when compared to the ring hash ("ketama") algorithm, Maglev has substantially faster table lookup build times as well as host selection times (approximately 10x and 5x respectively when using a large ring size of 256K entries). The downside of Maglev is that it is not as stable diff --git a/docs/root/intro/arch_overview/load_balancing/locality_weight.rst b/docs/root/intro/arch_overview/load_balancing/locality_weight.rst index c0ebce13595fe..1003d98418ed8 100644 --- a/docs/root/intro/arch_overview/load_balancing/locality_weight.rst +++ b/docs/root/intro/arch_overview/load_balancing/locality_weight.rst @@ -19,7 +19,7 @@ this. As with :ref:`priority levels `, we assume an :ref:`over-provision factor ` (default value 1.4), which means we do not perform any weight -adjustment when only a small number of endpoints in a locality are unavilable. +adjustment when only a small number of endpoints in a locality are unavailable. Assume a simple set-up with 2 localities X and Y, where X has a locality weight of 1 and Y has a locality weight of 2, L=Y 100% available, diff --git a/docs/root/intro/arch_overview/load_balancing/original_dst.rst b/docs/root/intro/arch_overview/load_balancing/original_dst.rst index 2fb3f6d0c7c55..212a7e9e645b7 100644 --- a/docs/root/intro/arch_overview/load_balancing/original_dst.rst +++ b/docs/root/intro/arch_overview/load_balancing/original_dst.rst @@ -17,7 +17,7 @@ be used with original destination clusters. Original destination host request header ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Envoy can also pick up the original destination from a HTTP header called -:ref:`x-envoy-orignal-dst-host `. +:ref:`x-envoy-original-dst-host `. Please note that fully resolved IP address should be passed in this header. For example if a request has to be routed to a host with IP address 10.195.16.237 at port 8888, the request header value should be set as ``10.195.16.237:8888``. diff --git a/docs/root/intro/arch_overview/load_balancing/overview.rst b/docs/root/intro/arch_overview/load_balancing/overview.rst index ebff9b9b46c9f..7897c19c89a29 100644 --- a/docs/root/intro/arch_overview/load_balancing/overview.rst +++ b/docs/root/intro/arch_overview/load_balancing/overview.rst @@ -16,7 +16,7 @@ load balancing and distributed load balancing. Distributed Load Balancing -------------------------- -Distributed load balancing refers to having Envoy itself deteremine how load should be distributed +Distributed load balancing refers to having Envoy itself determine how load should be distributed to the endpoints based on knowing the location of the upstream hosts. Examples diff --git a/docs/root/intro/arch_overview/load_balancing/panic_threshold.rst b/docs/root/intro/arch_overview/load_balancing/panic_threshold.rst index b2214507ba076..21f0f78ac4e31 100644 --- a/docs/root/intro/arch_overview/load_balancing/panic_threshold.rst +++ b/docs/root/intro/arch_overview/load_balancing/panic_threshold.rst @@ -18,7 +18,7 @@ finding enough available hosts in lower priorities, Envoy will disregard panic t mathematical terms, if normalized total availability across all priority levels is 100%, Envoy disregards panic thresholds and continues to distribute traffic load across priorities according to the algorithm described :ref:`here `. -However, when normalized total availablility drops below 100%, Envoy assumes that there are not enough +However, when normalized total availability drops below 100%, Envoy assumes that there are not enough available hosts across all priority levels. It continues to distribute traffic load across priorities, but if a given priority level's availability is below the panic threshold, traffic will go to all hosts in that priority level regardless of their availability. diff --git a/docs/root/intro/arch_overview/redis.rst b/docs/root/intro/arch_overview/redis.rst index d67cc710c2233..866f2ad01ad9a 100644 --- a/docs/root/intro/arch_overview/redis.rst +++ b/docs/root/intro/arch_overview/redis.rst @@ -8,7 +8,9 @@ In this mode, the goals of Envoy are to maintain availability and partition tole over consistency. This is the key point when comparing Envoy to `Redis Cluster `_. Envoy is designed as a best-effort cache, meaning that it will not try to reconcile inconsistent data or keep a globally consistent -view of cluster membership. +view of cluster membership. It also supports routing commands from different workload to +different to different upstream clusters based on their access patterns, eviction, or isolation +requirements. The Redis project offers a thorough reference on partitioning as it relates to Redis. See "`Partitioning: how to split data among multiple Redis instances @@ -21,6 +23,8 @@ The Redis project offers a thorough reference on partitioning as it relates to R * Ketama distribution. * Detailed command statistics. * Active and passive healthchecking. +* Hash tagging. +* Prefix routing. **Planned future enhancements**: @@ -30,7 +34,6 @@ The Redis project offers a thorough reference on partitioning as it relates to R * Replication. * Built-in retry. * Tracing. -* Hash tagging. .. _arch_overview_redis_configuration: @@ -54,6 +57,28 @@ If passive healthchecking is desired, also configure For the purposes of passive healthchecking, connect timeouts, command timeouts, and connection close map to 5xx. All other responses from Redis are counted as a success. +Redis Cluster Support (Experimental) +---------------------------------------- + +Envoy currently offers experimental support for `Redis Cluster `_. + +When using Envoy as a sidecar proxy for a Redis Cluster, the service can use a non-cluster Redis client +implemented in any language to connect to the proxy as if it's a single node Redis instance. +The Envoy proxy will keep track of the cluster topology and send commands to the correct Redis node in the +cluster according to the `spec `_. Advance features such as reading +from replicas can also be added to the Envoy proxy instead of updating redis clients in each language. + +Envoy proxy tracks the topology of the cluster by sending periodic +`cluster slots `_ commands to a random node in the cluster, and maintains the +following information: + +* List of known nodes. +* The masters for each shard. +* Nodes entering or leaving the cluster. + +For topology configuration details, see the Redis Cluster +:ref:`v2 API reference `. + Supported commands ------------------ @@ -148,6 +173,8 @@ For details on each command's usage see the official ZREVRANGEBYLEX, Sorted Set ZREVRANGEBYSCORE, Sorted Set ZREVRANK, Sorted Set + ZPOPMIN, Sorted Set + ZPOPMAX, Sorted Set ZSCAN, Sorted Set ZSCORE, Sorted Set APPEND, String diff --git a/docs/root/intro/arch_overview/service_discovery.rst b/docs/root/intro/arch_overview/service_discovery.rst index f950b82b667a2..4d00f638dbdc8 100644 --- a/docs/root/intro/arch_overview/service_discovery.rst +++ b/docs/root/intro/arch_overview/service_discovery.rst @@ -96,6 +96,14 @@ The Envoy project provides reference gRPC implementations of EDS and in both `Java `_ and `Go `_. +.. _arch_overview_service_discovery_types_custom: + +Custom cluster +^^^^^^^^^^^^^^ + +Envoy also supports custom cluster discovery mechanism. Custom clusters are specified using +:ref:`cluster_type field ` on the cluster configuration. + Generally active health checking is used in conjunction with the eventually consistent service discovery service data to making load balancing and routing decisions. This is discussed further in the following section. diff --git a/docs/root/intro/arch_overview/ssl.rst b/docs/root/intro/arch_overview/ssl.rst index 3763ecf85b670..e73d14dd3ef35 100644 --- a/docs/root/intro/arch_overview/ssl.rst +++ b/docs/root/intro/arch_overview/ssl.rst @@ -86,7 +86,15 @@ Example configuration connect_timeout: 0.25s type: STATIC lb_policy: ROUND_ROBIN - hosts: [{ socket_address: { address: 127.0.0.2, port_value: 1234 }}] + load_assignment: + cluster_name: some_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.2 + port_value: 1234 tls_context: common_tls_context: tls_certificates: @@ -150,3 +158,23 @@ infrastructure. Client TLS authentication filter :ref:`configuration reference `. + +.. _arch_overview_ssl_trouble_shooting: + +Trouble shooting +---------------- + +When Envoy originates TLS when making connections to upstream clusters, any errors will be logged into +:ref:`UPSTREAM_TRANSPORT_FAILURE_REASON` field or +:ref:`AccessLogCommon.upstream_transport_failure_reason` field. +Common errors are: + +* ``Secret is not supplied by SDS``: Envoy is still waiting SDS to deliver key/cert or root CA. +* ``SSLV3_ALERT_CERTIFICATE_EXPIRED``: Peer certificate is expired and not allowed in config. +* ``SSLV3_ALERT_CERTIFICATE_UNKNOWN``: Peer certificate is not in config specified SPKI. +* ``SSLV3_ALERT_HANDSHAKE_FAILURE``: Handshake failed, usually due to upstream requires client certificate but not presented. +* ``TLSV1_ALERT_PROTOCOL_VERSION``: TLS protocol version mismatch. +* ``TLSV1_ALERT_UNKNOWN_CA``: Peer certificate CA is not in trusted CA. + +More detailed list of error that can be raised by BoringSSL can be found +`here `_ diff --git a/docs/root/intro/arch_overview/tracing.rst b/docs/root/intro/arch_overview/tracing.rst index 91407ae4d01c3..47c635d5cb432 100644 --- a/docs/root/intro/arch_overview/tracing.rst +++ b/docs/root/intro/arch_overview/tracing.rst @@ -12,14 +12,18 @@ sources of latency. Envoy supports three features related to system wide tracing * **Request ID generation**: Envoy will generate UUIDs when needed and populate the :ref:`config_http_conn_man_headers_x-request-id` HTTP header. Applications can forward the x-request-id header for unified logging as well as tracing. -* **External trace service integration**: Envoy supports pluggable external trace visualization - providers. Currently Envoy supports `LightStep `_, `Zipkin `_ - or any Zipkin compatible backends (e.g. `Jaeger `_), and - `Datadog `_. - However, support for other tracing providers would not be difficult to add. * **Client trace ID joining**: The :ref:`config_http_conn_man_headers_x-client-trace-id` header can be used to join untrusted request IDs to the trusted internal :ref:`config_http_conn_man_headers_x-request-id`. +* **External trace service integration**: Envoy supports pluggable external trace visualization + providers, that are divided into two subgroups: + + - External tracers which are part of the Envoy code base, like `LightStep `_, + `Zipkin `_ or any Zipkin compatible backends (e.g. `Jaeger `_), and + `Datadog `_. + - External tracers which come as a third party plugin, like `Instana `_. + +Support for other tracing providers would not be difficult to add. How to initiate a trace ----------------------- diff --git a/docs/root/intro/arch_overview/websocket.rst b/docs/root/intro/arch_overview/websocket.rst index e2776e600b00a..e854eb53bb271 100644 --- a/docs/root/intro/arch_overview/websocket.rst +++ b/docs/root/intro/arch_overview/websocket.rst @@ -32,23 +32,23 @@ laid out below, but custom filter chains can only be configured on a per-HttpCon | F | F | F | +-----------------------+-------------------------+-------------------+ -Note that the statistics for upgrades are all bundled together so websocket +Note that the statistics for upgrades are all bundled together so WebSocket :ref:`statistics ` are tracked by stats such as downstream_cx_upgrades_total and downstream_cx_upgrades_active Handling H2 hops ^^^^^^^^^^^^^^^^ -Envoy currently has an alpha implementation of tunneling websockets over H2 streams for deployments -that prefer a uniform H2 mesh throughout, for example, for a deployment of the form: +Envoy supports tunneling WebSockets over H2 streams for deployments that prefer a uniform +H2 mesh throughout; this enables, for example, a deployment of the form: [Client] ---- HTTP/1.1 ---- [Front Envoy] ---- HTTP/2 ---- [Sidecar Envoy ---- H1 ---- App] -In this case, if a client is for example using WebSocket, we want the Websocket to arive at the +In this case, if a client is for example using WebSocket, we want the Websocket to arrive at the upstream server functionally intact, which means it needs to traverse the HTTP/2 hop. This is accomplished via -`extended CONNECT `_ support. The +`extended CONNECT `_ support. The WebSocket request will be transformed into an HTTP/2 CONNECT stream, with :protocol header indicating the original upgrade, traverse the HTTP/2 hop, and be downgraded back into an HTTP/1 WebSocket Upgrade. This same Upgrade-CONNECT-Upgrade transformation will be performed on any diff --git a/docs/root/intro/comparison.rst b/docs/root/intro/comparison.rst index 046d873bc2100..ccab990b93013 100644 --- a/docs/root/intro/comparison.rst +++ b/docs/root/intro/comparison.rst @@ -27,8 +27,8 @@ proxy: run a mix of nginx and haproxy. A single proxy solution at every hop is substantially simpler from an operations perspective. -`haproxy `_ ------------------------------------- +`haproxy `_ +------------------------------------- haproxy is the canonical modern software load balancer. It also supports basic HTTP reverse proxy features. Envoy provides the following main advantages over haproxy as a load balancer: @@ -60,8 +60,8 @@ well as basic HTTP L7 request routing to multiple backend clusters. The feature compared to Envoy and performance and stability are unknown, but it's clear that AWS will continue to invest in this area in the future. -`SmartStack `_ ---------------------------------------------------------------------------- +`SmartStack `_ +---------------------------------------------------------------------------- SmartStack is an interesting solution which provides additional service discovery and health checking support on top of haproxy. At a high level, SmartStack has most of the same goals as @@ -94,8 +94,8 @@ proxygen to obtain high performance as an HTTP library/proxy. Beyond that howeve are not really comparable as Envoy is a complete self contained server with a large feature set versus a library that must be built into something by each project individually. -`gRPC `_ ------------------------------ +`gRPC `_ +------------------------------ gRPC is a new multi-platform message passing system out of Google. It uses an IDL to describe an RPC library and then implements application specific runtimes for a variety of different languages. The diff --git a/docs/root/intro/deprecated.rst b/docs/root/intro/deprecated.rst new file mode 100644 index 0000000000000..db86c98be20ad --- /dev/null +++ b/docs/root/intro/deprecated.rst @@ -0,0 +1,126 @@ +.. _deprecated: + +Deprecated +---------- + +As of release 1.3.0, Envoy will follow a +`Breaking Change Policy `_. + +The following features have been DEPRECATED and will be removed in the specified release cycle. +A logged warning is expected for each deprecated item that is in deprecation window. +Deprecated items below are listed in chronological order. + +Version 1.11.0 (Pending) +======================== +* The --max-stats and --max-obj-name-len flags no longer has any effect. +* Use of :ref:`cluster ` in :ref:`redis_proxy.proto ` is deprecated. Set a :ref:`catch_all_cluster ` instead. + +Version 1.10.0 (Apr 5, 2019) +============================ +* Use of `use_alpha` in :ref:`Ext-Authz Authorization Service ` is deprecated. It should be used for a short time, and only when transitioning from alpha to V2 release version. +* Use of `enabled` in `CorsPolicy`, found in + :ref:`route.proto `. + Set the `filter_enabled` field instead. +* Use of the `type` field in the `FaultDelay` message (found in + :ref:`fault.proto `) + has been deprecated. It was never used and setting it has no effect. It will be removed in the + following release. + +Version 1.9.0 (Dec 20, 2018) +============================ +* Order of execution of the network write filter chain has been reversed. Prior to this release cycle it was incorrect, see `#4599 `_. In the 1.9.0 release cycle we introduced `bugfix_reverse_write_filter_order` in `lds.proto `_ to temporarily support both old and new behaviors. Note this boolean field is deprecated. +* Order of execution of the HTTP encoder filter chain has been reversed. Prior to this release cycle it was incorrect, see `#4599 `_. In the 1.9.0 release cycle we introduced `bugfix_reverse_encode_order` in `http_connection_manager.proto `_ to temporarily support both old and new behaviors. Note this boolean field is deprecated. +* Use of the v1 REST_LEGACY ApiConfigSource is deprecated. +* Use of std::hash in the ring hash load balancer is deprecated. +* Use of `rate_limit_service` configuration in the `bootstrap configuration `_ is deprecated. +* Use of `runtime_key` in `RequestMirrorPolicy`, found in + `route.proto `_ + is deprecated. Set the `runtime_fraction` field instead. +* Use of buffer filter `max_request_time` is deprecated in favor of the request timeout found in `HttpConnectionManager `_ + +Version 1.8.0 (Oct 4, 2018) +============================== +* Use of the v1 API (including `*.deprecated_v1` fields in the v2 API) is deprecated. + See envoy-announce `email `_. +* Use of the legacy + `ratelimit.proto `_ + is deprecated, in favor of the proto defined in + `date-plane-api `_ + Prior to 1.8.0, Envoy can use either proto to send client requests to a ratelimit server with the use of the + `use_data_plane_proto` boolean flag in the `ratelimit configuration `_. + However, when using the deprecated client a warning is logged. +* Use of the --v2-config-only flag. +* Use of both `use_websocket` and `websocket_config` in + `route.proto `_ + is deprecated. Please use the new `upgrade_configs` in the + `HttpConnectionManager `_ + instead. +* Use of the integer `percent` field in `FaultDelay `_ + and in `FaultAbort `_ is deprecated in favor + of the new `FractionalPercent` based `percentage` field. +* Setting hosts via `hosts` field in `Cluster` is deprecated. Use `load_assignment` instead. +* Use of `response_headers_to_*` and `request_headers_to_add` are deprecated at the `RouteAction` + level. Please use the configuration options at the `Route` level. +* Use of `runtime` in `RouteMatch`, found in + `route.proto `_. + Set the `runtime_fraction` field instead. +* Use of the string `user` field in `Authenticated` in `rbac.proto `_ + is deprecated in favor of the new `StringMatcher` based `principal_name` field. + +Version 1.7.0 (Jun 21, 2018) +=============================== +* Admin mutations should be sent as POSTs rather than GETs. HTTP GETs will result in an error + status code and will not have their intended effect. Prior to 1.7, GETs can be used for + admin mutations, but a warning is logged. +* Rate limit service configuration via the `cluster_name` field is deprecated. Use `grpc_service` + instead. +* gRPC service configuration via the `cluster_names` field in `ApiConfigSource` is deprecated. Use + `grpc_services` instead. Prior to 1.7, a warning is logged. +* Redis health checker configuration via the `redis_health_check` field in `HealthCheck` is + deprecated. Use `custom_health_check` with name `envoy.health_checkers.redis` instead. Prior + to 1.7, `redis_health_check` can be used, but warning is logged. +* `SAN` is replaced by `URI` in the `x-forwarded-client-cert` header. +* The `endpoint` field in the http health check filter is deprecated in favor of the `headers` + field where one can specify HeaderMatch objects to match on. +* The `sni_domains` field in the filter chain match was deprecated/renamed to `server_names`. + +Version 1.6.0 (March 20, 2018) +================================= +* DOWNSTREAM_ADDRESS log formatter is deprecated. Use DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT + instead. +* CLIENT_IP header formatter is deprecated. Use DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT instead. +* 'use_original_dst' field in the v2 LDS API is deprecated. Use listener filters and filter chain + matching instead. +* `value` and `regex` fields in the `HeaderMatcher` message is deprecated. Use the `exact_match` + or `regex_match` oneof instead. + +Version 1.5.0 (Dec 4, 2017) +============================== +* The outlier detection `ejections_total` stats counter has been deprecated and not replaced. Monitor + the individual `ejections_detected_*` counters for the detectors of interest, or + `ejections_enforced_total` for the total number of ejections that actually occurred. +* The outlier detection `ejections_consecutive_5xx` stats counter has been deprecated in favour of + `ejections_detected_consecutive_5xx` and `ejections_enforced_consecutive_5xx`. +* The outlier detection `ejections_success_rate` stats counter has been deprecated in favour of + `ejections_detected_success_rate` and `ejections_enforced_success_rate`. + +Version 1.4.0 (Aug 24, 2017) +============================ +* Config option `statsd_local_udp_port` has been deprecated and has been replaced with + `statsd_udp_ip_address`. +* `HttpFilterConfigFactory` filter API has been deprecated in favor of `NamedHttpFilterConfigFactory`. +* Config option `http_codec_options` has been deprecated and has been replaced with `http2_settings`. +* The following log macros have been deprecated: `log_trace`, `log_debug`, `conn_log`, + `conn_log_info`, `conn_log_debug`, `conn_log_trace`, `stream_log`, `stream_log_info`, + `stream_log_debug`, `stream_log_trace`. For replacements, please see + `logger.h `_. +* The connectionId() and ssl() callbacks of StreamFilterCallbacks have been deprecated and + replaced with a more general connection() callback, which, when not returning a nullptr, can be + used to get the connection id and SSL connection from the returned Connection object pointer. +* The protobuf stub gRPC support via `Grpc::RpcChannelImpl` is now replaced with `Grpc::AsyncClientImpl`. + This no longer uses `protoc` generated stubs but instead utilizes C++ template generation of the + RPC stubs. `Grpc::AsyncClientImpl` supports streaming, in addition to the previous unary, RPCs. +* The direction of network and HTTP filters in the configuration will be ignored from 1.4.0 and + later removed from the configuration in the v2 APIs. Filter direction is now implied at the C++ type + level. The `type()` methods on the `NamedNetworkFilterConfigFactory` and + `NamedHttpFilterConfigFactory` interfaces have been removed to reflect this. diff --git a/docs/root/intro/intro.rst b/docs/root/intro/intro.rst index 014f89650d955..472683b2f9665 100644 --- a/docs/root/intro/intro.rst +++ b/docs/root/intro/intro.rst @@ -12,3 +12,5 @@ Introduction comparison getting_help version_history + deprecated + \ No newline at end of file diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index 08ed0c9fa4d00..e1ac8a33084cf 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -1,45 +1,143 @@ Version history --------------- -1.10.0 (pending) +1.11.0 (Pending) ================ +* access log: added a new field for response code details in :ref:`file access logger` and :ref:`gRPC access logger`. +* api: track and report requests issued since last load report. +* build: releases are built with Clang and linked with LLD. +* dubbo_proxy: support the :ref:`Dubbo proxy filter `. +* eds: added support to specify max time for which endpoints can be used :ref:`gRPC filter `. +* event: added :ref:`loop duration and poll delay statistics `. +* ext_authz: added a `x-envoy-auth-partial-body` metadata header set to `false|true` indicating if there is a partial body sent in the authorization request message. +* ext_authz: added option to `ext_authz` that allows the filter clearing route cache. +* health check: added :ref:`initial jitter ` to add jitter to the first health check in order to prevent thundering herd on Envoy startup. +* hot restart: stats are no longer shared between hot restart parent/child via shared memory, but rather by RPC. Hot restart version incremented to 11. +* http: fixed a bug where large unbufferable responses were not tracked in stats and logs correctly. +* http: fixed a crashing bug where gRPC local replies would cause segfaults when upstream access logging was on. +* http: mitigated a race condition with the :ref:`delayed_close_timeout` where it could trigger while actively flushing a pending write buffer for a downstream connection. +* jwt_authn: make filter's parsing of JWT more flexible, allowing syntax like ``jwt=eyJhbGciOiJS...ZFnFIw,extra=7,realm=123`` +* redis: add support for Redis cluster custom cluster type. +* redis: added :ref:`prefix routing ` to enable routing commands based on their key's prefix to different upstream. +* redis: add support for zpopmax and zpopmin commands. +* redis: added + :ref:`max_buffer_size_before_flush ` to batch commands together until the encoder buffer hits a certain size, and + :ref:`buffer_flush_timeout ` to control how quickly the buffer is flushed if it is not full. +* router: add support for configuring a :ref:`grpc timeout offset ` on incoming requests. +* router: added ability to control retry back-off intervals via :ref:`retry policy `. +* router: per try timeouts will no longer start before the downstream request has been received + in full by the router. This ensures that the per try timeout does not account for slow + downstreams and that will not start before the global timeout. +* runtime: added support for statically :ref:`specifying the runtime in the bootstrap configuration + `. +* server: ``--define manual_stamp=manual_stamp`` was added to allow server stamping outside of binary rules. + more info in the `bazel docs `_. +* upstream: added :ref:`upstream_cx_pool_overflow ` for the connection pool circuit breaker. +* upstream: an EDS management server can now force removal of a host that is still passing active + health checking by first marking the host as failed via EDS health check and subsequently removing + it in a future update. This is a mechanism to work around a race condition in which an EDS + implementation may remove a host before it has stopped passing active HC, thus causing the host + to become stranded until a future update. +* upstream: added :ref:`an option ` + that allows ignoring new hosts for the purpose of load balancing calculations until they have + been health checked for the first time. +* upstream: added runtime error checking to prevent setting dns type to STRICT_DNS or LOGICAL_DNS when custom resolver name is specified. +* grpc-json: added support for :ref:`auto mapping + `. + +1.10.0 (Apr 5, 2019) +==================== * access log: added a new flag for upstream retry count exceeded. +* access log: added a :ref:`gRPC filter ` to allow filtering on gRPC status. +* access log: added a new flag for stream idle timeout. +* access log: added a new field for upstream transport failure reason in :ref:`file access logger` and + :ref:`gRPC access logger` for HTTP access logs. +* access log: added new fields for downstream x509 information (URI sans and subject) to file and gRPC access logger. * admin: the admin server can now be accessed via HTTP/2 (prior knowledge). +* admin: changed HTTP response status code from 400 to 405 when attempting to GET a POST-only route (such as /quitquitquit). * buffer: fix vulnerabilities when allocation fails. +* build: releases are built with GCC-7 and linked with LLD. +* build: dev docker images :ref:`have been split ` from tagged images for easier + discoverability in Docker Hub. Additionally, we now build images for point releases. * config: added support of using google.protobuf.Any in opaque configs for extensions. * config: logging warnings when deprecated fields are in use. * config: removed deprecated --v2-config-only from command line config. * config: removed deprecated_v1 sds_config from :ref:`Bootstrap config `. +* config: removed the deprecated_v1 config option from :ref:`ring hash `. * config: removed REST_LEGACY as a valid :ref:`ApiType `. +* config: finish cluster warming only when a named response i.e. ClusterLoadAssignment associated to the cluster being warmed comes in the EDS response. This is a behavioural change from the current implementation where warming of cluster completes on missing load assignments also. +* config: use Envoy cpuset size to set the default number or worker threads if :option:`--cpuset-threads` is enabled. +* config: added support for :ref:`initial_fetch_timeout `. The timeout is disabled by default. * cors: added :ref:`filter_enabled & shadow_enabled RuntimeFractionalPercent flags ` to filter. -* ext_authz: migrated from V2alpha to V2 and improved docs. +* csrf: added :ref:`CSRF filter `. +* ext_authz: added support for buffering request body. +* ext_authz: migrated from v2alpha to v2 and improved docs. +* ext_authz: added a configurable option to make the gRPC service cross-compatible with V2Alpha. Note that this feature is already deprecated. It should be used for a short time, and only when transitioning from alpha to V2 release version. +* ext_authz: migrated from v2alpha to v2 and improved the documentation. * ext_authz: authorization request and response configuration has been separated into two distinct objects: :ref:`authorization request ` and :ref:`authorization response `. In addition, :ref:`client headers ` and :ref:`upstream headers ` replaces the previous *allowed_authorization_headers* object. All the control header lists now support :ref:`string matcher ` instead of standard string. +* fault: added the :ref:`max_active_faults + ` setting, as well as + :ref:`statistics ` for the number of active faults + and the number of faults the overflowed. +* fault: added :ref:`response rate limit + ` fault injection. +* fault: added :ref:`HTTP header fault configuration + ` to the HTTP fault filter. * governance: extending Envoy deprecation policy from 1 release (0-3 months) to 2 releases (3-6 months). +* health check: expected response codes in http health checks are now :ref:`configurable `. * http: added new grpc_http1_reverse_bridge filter for converting gRPC requests into HTTP/1.1 requests. * http: fixed a bug where Content-Length:0 was added to HTTP/1 204 responses. -* outlier_detection: added support for :ref:`outlier detection event protobuf-based logging `. -* mysql: added a MySQL proxy filter that is capable of parsing SQL queries over MySQL wire protocol. Refer to ::ref:`MySQL proxy` for more details. * http: added :ref:`max request headers size `. The default behaviour is unchanged. +* http: added modifyDecodingBuffer/modifyEncodingBuffer to allow modifying the buffered request/response data. +* http: added encodeComplete/decodeComplete. These are invoked at the end of the stream, after all data has been encoded/decoded respectively. Default implementation is a no-op. +* outlier_detection: added support for :ref:`outlier detection event protobuf-based logging `. +* mysql: added a MySQL proxy filter that is capable of parsing SQL queries over MySQL wire protocol. Refer to :ref:`MySQL proxy` for more details. +* performance: new buffer implementation (disabled by default; to test it, add "--use-libevent-buffers 0" to the command-line arguments when starting Envoy). +* jwt_authn: added :ref:`filter_state_rules ` to allow specifying requirements from filterState by other filters. +* ratelimit: removed deprecated rate limit configuration from bootstrap. * redis: added :ref:`hashtagging ` to guarantee a given key's upstream. * redis: added :ref:`latency stats ` for commands. * redis: added :ref:`success and error stats ` for commands. * redis: migrate hash function for host selection to `MurmurHash2 `_ from std::hash. MurmurHash2 is compatible with std::hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled on Linux and not macOS. +* redis: added :ref:`latency_in_micros ` to specify the redis commands stats time unit in microseconds. * router: added ability to configure a :ref:`retry policy ` at the virtual host level. +* router: added reset reason to response body when upstream reset happens. After this change, the response body will be of the form `upstream connect error or disconnect/reset before headers. reset reason:` +* router: added :ref:`rq_reset_after_downstream_response_started ` counter stat to router stats. +* router: added per-route configuration of :ref:`internal redirects `. +* router: removed deprecated route-action level headers_to_add/remove. +* router: made :ref: `max retries header ` take precedence over the number of retries in route and virtual host retry policies. +* router: added support for prefix wildcards in :ref:`virtual host domains` * stats: added support for histograms in prometheus * stats: added usedonly flag to prometheus stats to only output metrics which have been updated at least once. +* stats: added gauges tracking remaining resources before circuit breakers open. * tap: added new alpha :ref:`HTTP tap filter `. * tls: enabled TLS 1.3 on the server-side (non-FIPS builds). -* router: added per-route configuration of :ref:`internal redirects `. * upstream: add hash_function to specify the hash function for :ref:`ring hash` as either xxHash or `murmurHash2 `_. MurmurHash2 is compatible with std::hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled on Linux and not macOS. * upstream: added :ref:`degraded health value` which allows routing to certain hosts only when there are insufficient healthy hosts available. +* upstream: add cluster factory to allow creating and registering :ref:`custom cluster type`. +* upstream: added a :ref:`circuit breaker ` to limit the number of concurrent connection pools in use. +* tracing: added :ref:`verbose ` to support logging annotations on spans. +* upstream: added support for host weighting and :ref:`locality weighting ` in the :ref:`ring hash load balancer `, and added a :ref:`maximum_ring_size` config parameter to strictly bound the ring size. +* zookeeper: added a ZooKeeper proxy filter that parses ZooKeeper messages (requests/responses/events). + Refer to :ref:`ZooKeeper proxy` for more details. +* upstream: added configuration option to select any host when the fallback policy fails. +* upstream: stopped incrementing upstream_rq_total for HTTP/1 conn pool when request is circuit broken. + +1.9.1 (Apr 2, 2019) +=================== +* http: fixed CVE-2019-9900 by rejecting HTTP/1.x headers with embedded NUL characters. +* http: fixed CVE-2019-9901 by normalizing HTTP paths prior to routing or L7 data plane processing. + This defaults off and is configurable via either HTTP connection manager :ref:`normalize_path + ` + or the :ref:`runtime `. 1.9.0 (Dec 20, 2018) ==================== @@ -230,7 +328,7 @@ Version history `. * upstream: added configuration option to the subset load balancer to take locality weights into account when selecting a host from a subset. -* upstream: require opt-in to use the :ref:`x-envoy-orignal-dst-host ` header +* upstream: require opt-in to use the :ref:`x-envoy-original-dst-host ` header for overriding destination address when using the :ref:`Original Destination ` load balancing policy. @@ -536,7 +634,7 @@ Version history * mongo filter: added :ref:`fault injection `. * mongo filter: added :ref:`"drain close" ` support. * outlier detection: added :ref:`HTTP gateway failure type `. - See `DEPRECATED.md `_ + See :ref:`deprecated log ` for outlier detection stats deprecations in this release. * redis: the :ref:`redis proxy filter ` is now considered production ready. @@ -560,7 +658,7 @@ Version history * runtime: added :ref:`comment capability `. * server: change default log level (:option:`-l`) to `info`. * stats: maximum stat/name sizes and maximum number of stats are now variable via the - :option:`--max-obj-name-len` and :option:`--max-stats` options. + `--max-obj-name-len` and `--max-stats` options. * tcp proxy: added :ref:`access logging `. * tcp proxy: added :ref:`configurable connect retries `. @@ -613,7 +711,7 @@ Version history * UDP `statsd_ip_address` option added. * Per-cluster DNS resolvers added. * :ref:`Fault filter ` enhancements and fixes. -* Several features are :repo:`deprecated as of the 1.4.0 release `. They +* Several features are :ref:`deprecated as of the 1.4.0 release `. They will be removed at the beginning of the 1.5.0 release cycle. We explicitly call out that the `HttpFilterConfigFactory` filter API has been deprecated in favor of `NamedHttpFilterConfigFactory`. diff --git a/docs/root/intro/what_is_envoy.rst b/docs/root/intro/what_is_envoy.rst index 7e97ec9248a15..1e75ab089c8f9 100644 --- a/docs/root/intro/what_is_envoy.rst +++ b/docs/root/intro/what_is_envoy.rst @@ -60,7 +60,7 @@ requests based on path, authority, content type, :ref:`runtime `_ is an RPC framework from Google that uses HTTP/2 +**gRPC support:** `gRPC `_ is an RPC framework from Google that uses HTTP/2 as the underlying multiplexed transport. Envoy :ref:`supports ` all of the HTTP/2 features required to be used as the routing and load balancing substrate for gRPC requests and responses. The two systems are very complementary. diff --git a/docs/root/operations/admin.rst b/docs/root/operations/admin.rst index d1e46de993ccf..1d67ce8b05924 100644 --- a/docs/root/operations/admin.rst +++ b/docs/root/operations/admin.rst @@ -25,6 +25,7 @@ modify different aspects of the server: admin: access_log_path: /tmp/admin_access.log + profile_path: /tmp/envoy.prof address: socket_address: { address: 127.0.0.1, port_value: 9901 } @@ -60,7 +61,7 @@ modify different aspects of the server: Cluster manager information - ``version_info`` string -- the version info string of the last loaded :ref:`CDS` update. - If envoy does not have :ref:`CDS` setup, the + If Envoy does not have :ref:`CDS` setup, the output will read ``version_info::static``. Cluster wide information @@ -135,7 +136,11 @@ modify different aspects of the server: .. http:post:: /cpuprofiler - Enable or disable the CPU profiler. Requires compiling with gperftools. + Enable or disable the CPU profiler. Requires compiling with gperftools. The output file can be configured by admin.profile_path. + +.. http:post:: /heapprofiler + + Enable or disable the Heap profiler. Requires compiling with gperftools. The output file can be configured by admin.profile_path. .. _operations_admin_interface_healthcheck_fail: @@ -206,14 +211,13 @@ modify different aspects of the server: "service_node": "", "service_zone": "", "mode": "Serve", - "max_stats": "16384", - "max_obj_name_len": "60", "disable_hot_restart": false, "enable_mutex_tracing": false, "restart_epoch": 0, "file_flush_interval": "10s", "drain_time": "600s", - "parent_shutdown_time": "900s" + "parent_shutdown_time": "900s", + "cpuset_threads": false }, "uptime_current_epoch": "6s", "uptime_all_epochs": "6s" @@ -230,7 +234,7 @@ explanation of the output. Histograms will output the computed quantiles i.e P0,P25,P50,P75,P90,P99,P99.9 and P100. The output for each quantile will be in the form of (interval,cumulative) where interval value represents the summary since last flush interval and cumulative value represents the - summary since the start of envoy instance. "No recorded values" in the histogram output indicates + summary since the start of Envoy instance. "No recorded values" in the histogram output indicates that it has not been updated with a value. See :ref:`here ` for more information. @@ -374,7 +378,7 @@ explanation of the output. This endpoint is intended to be used as the stream source for `Hystrix dashboard `_. - a GET to this endpoint will trriger a stream of statistics from envoy in + a GET to this endpoint will trigger a stream of statistics from Envoy in `text/event-stream `_ format, as expected by the Hystrix dashboard. diff --git a/docs/root/operations/cli.rst b/docs/root/operations/cli.rst index 6e2b45d10ba22..2f9e3f976ed8b 100644 --- a/docs/root/operations/cli.rst +++ b/docs/root/operations/cli.rst @@ -76,6 +76,14 @@ following are the command line options that Envoy supports. `connection` component to run at `trace` level, you should pass ``upstream:debug,connection:trace`` to this flag. See ``ALL_LOGGER_IDS`` in :repo:`/source/common/common/logger.h` for a list of components. +.. option:: --cpuset-threads + + *(optional)* This flag is used to control the number of worker threads if :option:`--concurrency` is + not set. If enabled, the assigned cpuset size is used to determine the number of worker threads on + Linux-based systems. Otherwise the number of worker threads is set to the number of hardware threads + on the machine. You can read more about cpusets in the + `kernel documentation `_. + .. option:: --log-path *(optional)* The output file path where logs should be written. This file will be re-opened @@ -206,24 +214,12 @@ following are the command line options that Envoy supports. during a hot restart. See the :ref:`hot restart overview ` for more information. Defaults to 900 seconds (15 minutes). -.. option:: --max-obj-name-len - - *(optional)* The maximum name length (in bytes) of the name field in a cluster/route_config/listener. - This setting is typically used in scenarios where the cluster names are auto generated, and often exceed - the built-in limit of 60 characters. Defaults to 60, and it's not valid to set to less than 60. - .. attention:: - This setting affects the output of :option:`--hot-restart-version`. If you started envoy with this + This setting affects the output of :option:`--hot-restart-version`. If you started Envoy with this option set to a non default value, you should use the same option (and same value) for subsequent hot restarts. -.. option:: --max-stats - - *(optional)* The maximum number of stats that can be shared between hot-restarts. This setting - affects the output of :option:`--hot-restart-version`; the same value must be used to hot - restart. Defaults to 16384. It's not valid to set this larger than 100 million. - .. option:: --disable-hot-restart *(optional)* This flag disables Envoy hot restart for builds that have it enabled. By default, hot diff --git a/docs/root/operations/operations.rst b/docs/root/operations/operations.rst index 98005b9977ba6..3f1ada49c1ae5 100644 --- a/docs/root/operations/operations.rst +++ b/docs/root/operations/operations.rst @@ -13,3 +13,4 @@ Operations and administration runtime fs_flags traffic_tapping + performance diff --git a/docs/root/operations/performance.rst b/docs/root/operations/performance.rst new file mode 100644 index 0000000000000..d7066374f3ed4 --- /dev/null +++ b/docs/root/operations/performance.rst @@ -0,0 +1,51 @@ +.. _operations_performance: + +Performance +=========== + +Envoy is architected to optimize scalability and resource utilization by running an event loop on a +:ref:`small number of threads `. The "main" thread is responsible for +control plane processing, and each "worker" thread handles a portion of the data plane processing. +Envoy exposes two statistics to monitor performance of the event loops on all these threads. + +* **Loop duration:** Some amount of processing is done on each iteration of the event loop. This + amount will naturally vary with changes in load. However, if one or more threads have an unusually + long-tailed loop duration, it may indicate a performance issue. For example, work might not be + distributed fairly across the worker threads, or there may be a long blocking operation in an + extension that's impeding progress. + +* **Poll delay:** On each iteration of the event loop, the event dispatcher polls for I/O events + and "wakes up" either when some I/O events are ready to be processed or when a timeout fires, + whichever occurs first. In the case of a timeout, we can measure the difference between the + expected wakeup time and the actual wakeup time after polling; this difference is called the "poll + delay." It's normal to see some small poll delay, usually equal to the kernel scheduler's "time + slice" or "quantum"---this depends on the specific operating system on which Envoy is + running---but if this number elevates substantially above its normal observed baseline, it likely + indicates kernel scheduler delays. + +These statistics can be enabled by setting :ref:`enable_dispatcher_stats ` +to true. + +.. warning:: + + Note that enabling dispatcher stats records a value for each iteration of the event loop on every + thread. This should normally be minimal overhead, but when using + :ref:`statsd `, it will send each observed value over + the wire individually because the statsd protocol doesn't have any way to represent a histogram + summary. Be aware that this can be a very large volume of data. + +Statistics +---------- + +The event dispatcher for the main thread has a statistics tree rooted at *server.dispatcher.*, and +the event dispatcher for each worker thread has a statistics tree rooted at +*listener_manager.worker_.dispatcher.*, each with the following statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + loop_duration_us, Histogram, Event loop durations in microseconds + poll_delay_us, Histogram, Polling delays in microseconds + +Note that any auxiliary threads are not included here. diff --git a/docs/root/operations/traffic_tapping.rst b/docs/root/operations/traffic_tapping.rst index c15819db82776..0170bf9ba4984 100644 --- a/docs/root/operations/traffic_tapping.rst +++ b/docs/root/operations/traffic_tapping.rst @@ -9,16 +9,17 @@ Envoy currently provides two experimental extensions that can tap traffic: information. * :ref:`Tap transport socket extension ` that can intercept traffic and write to a :ref:`protobuf trace file - `. The remainder of this document describes + `. The remainder of this document describes the configuration of the tap transport socket. Tap transport socket configuration ---------------------------------- -.. warning:: - This feature is experimental and has a known limitation that it will OOM for large traces on a - given socket. It can also be disabled in the build if there are security concerns, see - https://github.com/envoyproxy/envoy/blob/master/bazel/README.md#disabling-extensions. +.. attention:: + + The tap transport socket is experimental and is currently under active development. There is + currently a very limited set of match conditions, output configuration, output sinks, etc. + Capabilities will be expanded over time and the configuration structures are likely to change. Tapping can be configured on :ref:`Listener ` and :ref:`Cluster @@ -40,7 +41,8 @@ or cluster. For a plain text socket this might look like: any_match: true output_config: sinks: - - file_per_tap: + - format: PROTO_BINARY + file_per_tap: path_prefix: /some/tap/path transport_socket: name: raw_buffer @@ -58,7 +60,8 @@ For a TLS socket, this will be: any_match: true output_config: sinks: - - file_per_tap: + - format: PROTO_BINARY + file_per_tap: path_prefix: /some/tap/path transport_socket: name: ssl @@ -72,6 +75,32 @@ TLS configuration on the listener or cluster, respectively. Each unique socket instance will generate a trace file prefixed with `path_prefix`. E.g. `/some/tap/path_0.pb`. +Buffered data limits +-------------------- + +For buffered socket taps, Envoy will limit the amount of body data that is tapped to avoid OOM +situations. The default limit is 1KiB for both received and transmitted data. +This is configurable via the :ref:`max_buffered_rx_bytes +` and +:ref:`max_buffered_tx_bytes +` settings. When a buffered +socket tap is truncated, the trace will indicate truncation via the :ref:`read_truncated +` and :ref:`write_truncated +` fields as well as the body +:ref:`truncated ` field. + +Streaming +--------- + +The tap transport socket supports both buffered and streaming, controlled by the :ref:`streaming +` setting. When buffering, +:ref:`SocketBufferedTrace ` messages are +emitted. When streaming, a series of :ref:`SocketStreamedTraceSegment +` are emitted. + +See the :ref:`HTTP tap filter streaming ` documentation for more +information. Most of the concepts overlap between the HTTP filter and the transport socket. + PCAP generation --------------- diff --git a/docs/root/start/sandboxes/cors.rst b/docs/root/start/sandboxes/cors.rst index 82c9de6712e86..3be513279bd83 100644 --- a/docs/root/start/sandboxes/cors.rst +++ b/docs/root/start/sandboxes/cors.rst @@ -9,7 +9,7 @@ access certain or all routes of your domain. Browsers use the presence of HTTP headers to determine if a response from a different origin is allowed. To help demonstrate how front-envoy can enforce CORS policies, we are -releasing a set of `docker compose `_ sandboxes +releasing a set of `docker compose `_ sandboxes that deploy a frontend and backend service on different origins, both behind front-envoy. @@ -33,30 +33,11 @@ The following documentation runs through the setup of both services. **Step 1: Install Docker** -Ensure that you have a recent versions of ``docker``, ``docker-compose`` and -``docker-machine`` installed. +Ensure that you have a recent versions of ``docker`` and ``docker-compose``. A simple way to achieve this is via the `Docker Toolbox `_. -**Step 2: Setup Docker Machines** - -First, let's create a couple of new machines which will hold the containers. - -Terminal 1 - -.. code-block:: console - - $ docker-machine create --driver virtualbox frontend - $ eval $(docker-machine env frontend) - -Terminal 2 - -.. code-block:: console - - $ docker-machine create --driver virtualbox backend - $ eval $(docker-machine env backend) - -**Step 3: Clone the Envoy repo and start all of our containers** +**Step 2: Clone the Envoy repo and start all of our containers** If you have not cloned the Envoy repo, clone it with ``git clone git@github.com:envoyproxy/envoy`` or ``git clone https://github.com/envoyproxy/envoy.git`` @@ -67,12 +48,14 @@ Terminal 1 $ pwd envoy/examples/cors/frontend + $ docker-compose pull $ docker-compose up --build -d $ docker-compose ps - Name Command State Ports - ----------------------------------------------------------------------------------------------------------------------------------------- - frontend_front-envoy_1_cb51c62edc96 /usr/bin/dumb-init -- /bin ... Up 10000/tcp, 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp - frontend_frontend-service_1_491cf87432cd /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 80/tcp + + Name Command State Ports + ---------------------------------------------------------------------------------------------------------------------------- + frontend_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp + frontend_frontend-service_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 80/tcp Terminal 2 @@ -82,22 +65,15 @@ Terminal 2 envoy/examples/cors/backend $ docker-compose up --build -d $ docker-compose ps - Name Command State Ports - ----------------------------------------------------------------------------------------------------------------------------------------- - backend_front-envoy_1_7f9d5039c86f /usr/bin/dumb-init -- /bin ... Up 10000/tcp, 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp - backend_backend-service_1_c7752ae7192a /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 80/tcp - -**Step 4: Test Envoy's CORS capabilities** -You can now open a browser to view your frontend service. To find the IP of -your frontend service run the following command in terminal 1. - -.. code-block:: console + Name Command State Ports + -------------------------------------------------------------------------------------------------------------------------- + backend_backend-service_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 80/tcp + backend_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8002->80/tcp, 0.0.0.0:8003->8001/tcp - $ docker-machine ip frontend +**Step 3: Test Envoy's CORS capabilities** -In terminal 2, run the same command to get the remote origin IP that the -frontend service will be making requests to. +You can now open a browser to view your frontend service at ``localhost:8000``. Results of the cross-origin request will be shown on the page under *Request Results*. Your browser's CORS enforcement logs can be found in the console. @@ -106,19 +82,19 @@ For example: .. code-block:: console - Access to XMLHttpRequest at 'http://192.168.99.100:8000/cors/disabled' from origin 'http://192.168.99.101:8000' + Access to XMLHttpRequest at 'http://192.168.99.100:8002/cors/disabled' from origin 'http://192.168.99.101:8000' has been blocked by CORS policy: No 'Access-Control-Allow-Origin' header is present on the requested resource. -**Step 6: Check stats of backend via admin** +**Step 4: Check stats of backend via admin** When Envoy runs, it can listen to ``admin`` requests if a port is configured. In the example -configs, the admin is bound to port ``8001``. +configs, the backend admin is bound to port ``8003``. -If you go to ``:8001/stats`` you will be able to view +If you go to ``localhost:8003/stats`` you will be able to view all of the Envoy stats for the backend. You should see the CORS stats for invalid and valid origins increment as you make requests from the frontend cluster. .. code-block:: none - http.ingress_http.cors.origin_invalid: 0 - http.ingress_http.cors.origin_valid: 0 + http.ingress_http.cors.origin_invalid: 2 + http.ingress_http.cors.origin_valid: 7 diff --git a/docs/root/start/sandboxes/fault_injection.rst b/docs/root/start/sandboxes/fault_injection.rst new file mode 100644 index 0000000000000..bf474769e13b6 --- /dev/null +++ b/docs/root/start/sandboxes/fault_injection.rst @@ -0,0 +1,4 @@ +Fault injection +=============== + +* :repo:`Fault Injection ` diff --git a/docs/root/start/sandboxes/front_proxy.rst b/docs/root/start/sandboxes/front_proxy.rst index 553f2e5debeea..e14d938cadfdb 100644 --- a/docs/root/start/sandboxes/front_proxy.rst +++ b/docs/root/start/sandboxes/front_proxy.rst @@ -6,7 +6,7 @@ Front Proxy To get a flavor of what Envoy has to offer as a front proxy, we are releasing a `docker compose `_ sandbox that deploys a front envoy and a couple of services (simple flask apps) colocated with a running -service envoy. The three containers will be deployed inside a virtual network +service Envoy. The three containers will be deployed inside a virtual network called ``envoymesh``. Below you can see a graphic showing the docker compose deployment: @@ -14,63 +14,57 @@ Below you can see a graphic showing the docker compose deployment: .. image:: /_static/docker_compose_v0.1.svg :width: 100% -All incoming requests are routed via the front envoy, which is acting as a reverse proxy sitting on +All incoming requests are routed via the front Envoy, which is acting as a reverse proxy sitting on the edge of the ``envoymesh`` network. Port ``80`` is mapped to port ``8000`` by docker compose -(see :repo:`/examples/front-proxy/docker-compose.yml`). Moreover, notice -that all traffic routed by the front envoy to the service containers is actually routed to the -service envoys (routes setup in :repo:`/examples/front-proxy/front-envoy.yaml`). In turn the service +(see :repo:`/examples/front-proxy/docker-compose.yaml`). Moreover, notice +that all traffic routed by the front Envoy to the service containers is actually routed to the +service Envoys (routes setup in :repo:`/examples/front-proxy/front-envoy.yaml`). In turn the service envoys route the request to the flask app via the loopback address (routes setup in :repo:`/examples/front-proxy/service-envoy.yaml`). This setup -illustrates the advantage of running service envoys collocated with your services: all requests are -handled by the service envoy, and efficiently routed to your services. +illustrates the advantage of running service Envoys collocated with your services: all requests are +handled by the service Envoy, and efficiently routed to your services. Running the Sandbox ~~~~~~~~~~~~~~~~~~~ -The following documentation runs through the setup of an envoy cluster organized +The following documentation runs through the setup of an Envoy cluster organized as is described in the image above. **Step 1: Install Docker** -Ensure that you have a recent versions of ``docker, docker-compose`` and -``docker-machine`` installed. +Ensure that you have a recent versions of ``docker`` and ``docker-compose`` installed. A simple way to achieve this is via the `Docker Toolbox `_. -**Step 2: Docker Machine setup** +**Step 2: Clone the Envoy repo, and start all of our containers** -First let's create a new machine which will hold the containers:: - - $ docker-machine create --driver virtualbox default - $ eval $(docker-machine env default) - -**Step 3: Clone the Envoy repo, and start all of our containers** - -If you have not cloned the envoy repo, clone it with ``git clone git@github.com:envoyproxy/envoy`` +If you have not cloned the Envoy repo, clone it with ``git clone git@github.com:envoyproxy/envoy`` or ``git clone https://github.com/envoyproxy/envoy.git``:: $ pwd envoy/examples/front-proxy + $ docker-compose pull $ docker-compose up --build -d $ docker-compose ps - Name Command State Ports - ------------------------------------------------------------------------------------------------------------- - example_service1_1 /bin/sh -c /usr/local/bin/ ... Up 80/tcp - example_service2_1 /bin/sh -c /usr/local/bin/ ... Up 80/tcp - example_front-envoy_1 /bin/sh -c /usr/local/bin/ ... Up 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp -**Step 4: Test Envoy's routing capabilities** + Name Command State Ports + -------------------------------------------------------------------------------------------------------------------------- + front-proxy_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp + front-proxy_service1_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 80/tcp + front-proxy_service2_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 80/tcp + +**Step 3: Test Envoy's routing capabilities** You can now send a request to both services via the front-envoy. For service1:: - $ curl -v $(docker-machine ip default):8000/service/1 + $ curl -v localhost:8000/service/1 * Trying 192.168.99.100... * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) > GET /service/1 HTTP/1.1 > Host: 192.168.99.100:8000 - > User-Agent: curl/7.43.0 + > User-Agent: curl/7.54.0 > Accept: */* > < HTTP/1.1 200 OK @@ -78,20 +72,19 @@ For service1:: < content-length: 89 < x-envoy-upstream-service-time: 1 < server: envoy - < date: Fri, 26 Aug 2016 19:39:19 GMT - < x-envoy-protocol-version: HTTP/1.1 + < date: Fri, 26 Aug 2018 19:39:19 GMT < Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6 * Connection #0 to host 192.168.99.100 left intact For service2:: - $ curl -v $(docker-machine ip default):8000/service/2 + $ curl -v localhost:8000/service/2 * Trying 192.168.99.100... * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) > GET /service/2 HTTP/1.1 > Host: 192.168.99.100:8000 - > User-Agent: curl/7.43.0 + > User-Agent: curl/7.54.0 > Accept: */* > < HTTP/1.1 200 OK @@ -99,28 +92,27 @@ For service2:: < content-length: 89 < x-envoy-upstream-service-time: 2 < server: envoy - < date: Fri, 26 Aug 2016 19:39:23 GMT - < x-envoy-protocol-version: HTTP/1.1 + < date: Fri, 26 Aug 2018 19:39:23 GMT < Hello from behind Envoy (service 2)! hostname: 92f4a3737bbc resolvedhostname: 172.19.0.2 * Connection #0 to host 192.168.99.100 left intact -Notice that each request, while sent to the front envoy, was correctly routed +Notice that each request, while sent to the front Envoy, was correctly routed to the respective application. -**Step 5: Test Envoy's load balancing capabilities** +**Step 4: Test Envoy's load balancing capabilities** -Now let's scale up our service1 nodes to demonstrate the clustering abilities -of envoy.:: +Now let's scale up our service1 nodes to demonstrate the load balancing abilities +of Envoy.:: $ docker-compose scale service1=3 Creating and starting example_service1_2 ... done Creating and starting example_service1_3 ... done -Now if we send a request to service1 multiple times, the front envoy will load balance the +Now if we send a request to service1 multiple times, the front Envoy will load balance the requests by doing a round robin of the three service1 machines:: - $ curl -v $(docker-machine ip default):8000/service/1 + $ curl -v localhost:8000/service/1 * Trying 192.168.99.100... * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) > GET /service/1 HTTP/1.1 @@ -133,17 +125,17 @@ requests by doing a round robin of the three service1 machines:: < content-length: 89 < x-envoy-upstream-service-time: 1 < server: envoy - < date: Fri, 26 Aug 2016 19:40:21 GMT + < date: Fri, 26 Aug 2018 19:40:21 GMT < x-envoy-protocol-version: HTTP/1.1 < Hello from behind Envoy (service 1)! hostname: 85ac151715c6 resolvedhostname: 172.19.0.3 * Connection #0 to host 192.168.99.100 left intact - $ curl -v $(docker-machine ip default):8000/service/1 + $ curl -v localhost:8000/service/1 * Trying 192.168.99.100... * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) > GET /service/1 HTTP/1.1 > Host: 192.168.99.100:8000 - > User-Agent: curl/7.43.0 + > User-Agent: curl/7.54.0 > Accept: */* > < HTTP/1.1 200 OK @@ -151,12 +143,11 @@ requests by doing a round robin of the three service1 machines:: < content-length: 89 < x-envoy-upstream-service-time: 1 < server: envoy - < date: Fri, 26 Aug 2016 19:40:22 GMT - < x-envoy-protocol-version: HTTP/1.1 + < date: Fri, 26 Aug 2018 19:40:22 GMT < Hello from behind Envoy (service 1)! hostname: 20da22cfc955 resolvedhostname: 172.19.0.5 * Connection #0 to host 192.168.99.100 left intact - $ curl -v $(docker-machine ip default):8000/service/1 + $ curl -v localhost:8000/service/1 * Trying 192.168.99.100... * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) > GET /service/1 HTTP/1.1 @@ -169,13 +160,13 @@ requests by doing a round robin of the three service1 machines:: < content-length: 89 < x-envoy-upstream-service-time: 1 < server: envoy - < date: Fri, 26 Aug 2016 19:40:24 GMT + < date: Fri, 26 Aug 2018 19:40:24 GMT < x-envoy-protocol-version: HTTP/1.1 < Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6 * Connection #0 to host 192.168.99.100 left intact -**Step 6: enter containers and curl services** +**Step 5: enter containers and curl services** In addition of using ``curl`` from your host machine, you can also enter the containers themselves and ``curl`` from inside them. To enter a container you @@ -192,9 +183,9 @@ enter the ``front-envoy`` container, and ``curl`` for services locally:: root@81288499f9d7:/# curl localhost:80/service/2 Hello from behind Envoy (service 2)! hostname: 92f4a3737bbc resolvedhostname: 172.19.0.2 -**Step 7: enter containers and curl admin** +**Step 6: enter containers and curl admin** -When envoy runs it also attaches an ``admin`` to your desired port. In the example +When Envoy runs it also attaches an ``admin`` to your desired port. In the example configs the admin is bound to port ``8001``. We can ``curl`` it to gain useful information. For example you can ``curl`` ``/server_info`` to get information about the envoy version you are running. Additionally you can ``curl`` ``/stats`` to get @@ -202,7 +193,43 @@ statistics. For example inside ``frontenvoy`` we can get:: $ docker-compose exec front-envoy /bin/bash root@e654c2c83277:/# curl localhost:8001/server_info - envoy 10e00b/RELEASE live 142 142 0 + +.. code-block:: json + + { + "version": "3ba949a9cb5b0b1cccd61e76159969a49377fd7d/1.10.0-dev/Clean/RELEASE/BoringSSL", + "state": "LIVE", + "command_line_options": { + "base_id": "0", + "concurrency": 4, + "config_path": "/etc/front-envoy.yaml", + "config_yaml": "", + "allow_unknown_fields": false, + "admin_address_path": "", + "local_address_ip_version": "v4", + "log_level": "info", + "component_log_level": "", + "log_format": "[%Y-%m-%d %T.%e][%t][%l][%n] %v", + "log_path": "", + "hot_restart_version": false, + "service_cluster": "front-proxy", + "service_node": "", + "service_zone": "", + "mode": "Serve", + "disable_hot_restart": false, + "enable_mutex_tracing": false, + "restart_epoch": 0, + "cpuset_threads": false, + "file_flush_interval": "10s", + "drain_time": "600s", + "parent_shutdown_time": "900s" + }, + "uptime_current_epoch": "401s", + "uptime_all_epochs": "401s" + } + +.. code-block:: text + root@e654c2c83277:/# curl localhost:8001/stats cluster.service1.external.upstream_rq_200: 7 ... diff --git a/docs/root/start/sandboxes/grpc_bridge.rst b/docs/root/start/sandboxes/grpc_bridge.rst index 09798b3e1291b..aa61e60742699 100644 --- a/docs/root/start/sandboxes/grpc_bridge.rst +++ b/docs/root/start/sandboxes/grpc_bridge.rst @@ -23,10 +23,10 @@ To build the Go gRPC service run:: $ pwd envoy/examples/grpc-bridge - $ script/bootstrap - $ script/build + $ script/bootstrap.sh + $ script/build.sh -Note: ``build`` requires that your Envoy codebase (or a working copy thereof) is in ``$GOPATH/src/github.com/envoyproxy/envoy``. +Note: ``build.sh`` requires that your Envoy codebase (or a working copy thereof) is in ``$GOPATH/src/github.com/envoyproxy/envoy``. Docker compose ~~~~~~~~~~~~~~ @@ -36,6 +36,7 @@ run:: $ pwd envoy/examples/grpc-bridge + $ docker-compose pull $ docker-compose up --build Sending requests to the Key/Value store diff --git a/docs/root/start/sandboxes/jaeger_native_tracing.rst b/docs/root/start/sandboxes/jaeger_native_tracing.rst index 7adb86871be66..07193e03f9740 100644 --- a/docs/root/start/sandboxes/jaeger_native_tracing.rst +++ b/docs/root/start/sandboxes/jaeger_native_tracing.rst @@ -4,7 +4,7 @@ Jaeger Native Tracing ===================== The Jaeger tracing sandbox demonstrates Envoy's :ref:`request tracing ` -capabilities using `Jaeger `_ as the tracing provider and Jaeger's native +capabilities using `Jaeger `_ as the tracing provider and Jaeger's native `C++ client `_ as a plugin. Using Jaeger with its native client instead of with Envoy's builtin Zipkin client has the following advantages: @@ -20,15 +20,15 @@ service1 makes an API call to service2 before returning a response. The three containers will be deployed inside a virtual network called ``envoymesh``. (Note: the sandbox only works on x86-64). -All incoming requests are routed via the front envoy, which is acting as a reverse proxy +All incoming requests are routed via the front Envoy, which is acting as a reverse proxy sitting on the edge of the ``envoymesh`` network. Port ``80`` is mapped to port ``8000`` -by docker compose (see :repo:`/examples/jaeger-native-tracing/docker-compose.yml`). Notice that -all envoys are configured to collect request traces (e.g., http_connection_manager/config/tracing setup in +by docker compose (see :repo:`/examples/jaeger-native-tracing/docker-compose.yaml`). Notice that +all Envoys are configured to collect request traces (e.g., http_connection_manager/config/tracing setup in :repo:`/examples/jaeger-native-tracing/front-envoy-jaeger.yaml`) and setup to propagate the spans generated by the Jaeger tracer to a Jaeger cluster (trace driver setup in :repo:`/examples/jaeger-native-tracing/front-envoy-jaeger.yaml`). -Before routing a request to the appropriate service envoy or the application, Envoy will take +Before routing a request to the appropriate service Envoy or the application, Envoy will take care of generating the appropriate spans for tracing (parent/child context spans). At a high-level, each span records the latency of upstream API calls as well as information needed to correlate the span with other related spans (e.g., the trace ID). @@ -44,7 +44,7 @@ the trace headers while making an outbound call to service2. Running the Sandbox ~~~~~~~~~~~~~~~~~~~ -The following documentation runs through the setup of an envoy cluster organized +The following documentation runs through the setup of an Envoy cluster organized as is described in the image above. **Step 1: Build the sandbox** @@ -53,33 +53,35 @@ To build this sandbox example, and start the example apps run the following comm $ pwd envoy/examples/jaeger-native-tracing + $ docker-compose pull $ docker-compose up --build -d $ docker-compose ps - Name Command State Ports - ------------------------------------------------------------------------------------------------------------- - jaegertracing_service1_1 /bin/sh -c /usr/local/bin/ ... Up 80/tcp - jaegertracing_service2_1 /bin/sh -c /usr/local/bin/ ... Up 80/tcp - jaegertracing_front-envoy_1 /bin/sh -c /usr/local/bin/ ... Up 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp + + Name Command State Ports + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + jaeger-native-tracing_front-envoy_1 /start-front.sh Up 10000/tcp, 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp + jaeger-native-tracing_jaeger_1 /go/bin/all-in-one-linux - ... Up 14250/tcp, 14268/tcp, 0.0.0.0:16686->16686/tcp, 5775/udp, 5778/tcp, 6831/udp, 6832/udp, 0.0.0.0:9411->9411/tcp + jaeger-native-tracing_service1_1 /start-service.sh Up 10000/tcp, 80/tcp + jaeger-native-tracing_service2_1 /start-service.sh Up 10000/tcp, 80/tcp **Step 2: Generate some load** You can now send a request to service1 via the front-envoy as follows:: - $ curl -v $(docker-machine ip default):8000/trace/1 + $ curl -v localhost:8000/trace/1 * Trying 192.168.99.100... * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) > GET /trace/1 HTTP/1.1 > Host: 192.168.99.100:8000 - > User-Agent: curl/7.43.0 + > User-Agent: curl/7.54.0 > Accept: */* > < HTTP/1.1 200 OK < content-type: text/html; charset=utf-8 < content-length: 89 - < x-envoy-upstream-service-time: 1 + < x-envoy-upstream-service-time: 9 < server: envoy - < date: Fri, 26 Aug 2016 19:39:19 GMT - < x-envoy-protocol-version: HTTP/1.1 + < date: Fri, 26 Aug 2018 19:39:19 GMT < Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6 * Connection #0 to host 192.168.99.100 left intact diff --git a/docs/root/start/sandboxes/jaeger_tracing.rst b/docs/root/start/sandboxes/jaeger_tracing.rst index 0c17b8181438c..bad25e5bd26fc 100644 --- a/docs/root/start/sandboxes/jaeger_tracing.rst +++ b/docs/root/start/sandboxes/jaeger_tracing.rst @@ -4,20 +4,20 @@ Jaeger Tracing ============== The Jaeger tracing sandbox demonstrates Envoy's :ref:`request tracing ` -capabilities using `Jaeger `_ as the tracing provider. This sandbox +capabilities using `Jaeger `_ as the tracing provider. This sandbox is very similar to the front proxy architecture described above, with one difference: service1 makes an API call to service2 before returning a response. The three containers will be deployed inside a virtual network called ``envoymesh``. -All incoming requests are routed via the front envoy, which is acting as a reverse proxy +All incoming requests are routed via the front Envoy, which is acting as a reverse proxy sitting on the edge of the ``envoymesh`` network. Port ``80`` is mapped to port ``8000`` -by docker compose (see :repo:`/examples/jaeger-tracing/docker-compose.yml`). Notice that -all envoys are configured to collect request traces (e.g., http_connection_manager/config/tracing setup in +by docker compose (see :repo:`/examples/jaeger-tracing/docker-compose.yaml`). Notice that +all Envoys are configured to collect request traces (e.g., http_connection_manager/config/tracing setup in :repo:`/examples/jaeger-tracing/front-envoy-jaeger.yaml`) and setup to propagate the spans generated by the Jaeger tracer to a Jaeger cluster (trace driver setup in :repo:`/examples/jaeger-tracing/front-envoy-jaeger.yaml`). -Before routing a request to the appropriate service envoy or the application, Envoy will take +Before routing a request to the appropriate service Envoy or the application, Envoy will take care of generating the appropriate spans for tracing (parent/child context spans). At a high-level, each span records the latency of upstream API calls as well as information needed to correlate the span with other related spans (e.g., the trace ID). @@ -33,7 +33,7 @@ the trace headers while making an outbound call to service2. Running the Sandbox ~~~~~~~~~~~~~~~~~~~ -The following documentation runs through the setup of an envoy cluster organized +The following documentation runs through the setup of an Envoy cluster organized as is described in the image above. **Step 1: Build the sandbox** @@ -42,33 +42,35 @@ To build this sandbox example, and start the example apps run the following comm $ pwd envoy/examples/jaeger-tracing + $ docker-compose pull $ docker-compose up --build -d $ docker-compose ps - Name Command State Ports - ------------------------------------------------------------------------------------------------------------- - jaegertracing_service1_1 /bin/sh -c /usr/local/bin/ ... Up 80/tcp - jaegertracing_service2_1 /bin/sh -c /usr/local/bin/ ... Up 80/tcp - jaegertracing_front-envoy_1 /bin/sh -c /usr/local/bin/ ... Up 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp + + Name Command State Ports + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + jaeger-tracing_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp + jaeger-tracing_jaeger_1 /go/bin/all-in-one-linux - ... Up 14250/tcp, 14268/tcp, 0.0.0.0:16686->16686/tcp, 5775/udp, 5778/tcp, 6831/udp, 6832/udp, 0.0.0.0:9411->9411/tcp + jaeger-tracing_service1_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 80/tcp + jaeger-tracing_service2_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 80/tcp **Step 2: Generate some load** You can now send a request to service1 via the front-envoy as follows:: - $ curl -v $(docker-machine ip default):8000/trace/1 + $ curl -v localhost:8000/trace/1 * Trying 192.168.99.100... * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) > GET /trace/1 HTTP/1.1 > Host: 192.168.99.100:8000 - > User-Agent: curl/7.43.0 + > User-Agent: curl/7.54.0 > Accept: */* > < HTTP/1.1 200 OK < content-type: text/html; charset=utf-8 < content-length: 89 - < x-envoy-upstream-service-time: 1 + < x-envoy-upstream-service-time: 9 < server: envoy - < date: Fri, 26 Aug 2016 19:39:19 GMT - < x-envoy-protocol-version: HTTP/1.1 + < date: Fri, 26 Aug 2018 19:39:19 GMT < Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6 * Connection #0 to host 192.168.99.100 left intact diff --git a/docs/root/start/sandboxes/lua.rst b/docs/root/start/sandboxes/lua.rst new file mode 100644 index 0000000000000..82c6686982ce2 --- /dev/null +++ b/docs/root/start/sandboxes/lua.rst @@ -0,0 +1,4 @@ +Lua +=== + +* :repo:`Lua ` diff --git a/docs/root/start/sandboxes/mysql.rst b/docs/root/start/sandboxes/mysql.rst new file mode 100644 index 0000000000000..648cb2f7e740a --- /dev/null +++ b/docs/root/start/sandboxes/mysql.rst @@ -0,0 +1,4 @@ +MySQL +===== + +* :repo:`MySQL ` diff --git a/docs/root/start/sandboxes/redis.rst b/docs/root/start/sandboxes/redis.rst new file mode 100644 index 0000000000000..9d81afc6cf22b --- /dev/null +++ b/docs/root/start/sandboxes/redis.rst @@ -0,0 +1,4 @@ +Redis +===== + +* :repo:`Redis ` diff --git a/docs/root/start/sandboxes/zipkin_tracing.rst b/docs/root/start/sandboxes/zipkin_tracing.rst index 53a97dcc8f3a0..c64ce82e9f39a 100644 --- a/docs/root/start/sandboxes/zipkin_tracing.rst +++ b/docs/root/start/sandboxes/zipkin_tracing.rst @@ -4,20 +4,20 @@ Zipkin Tracing ============== The Zipkin tracing sandbox demonstrates Envoy's :ref:`request tracing ` -capabilities using `Zipkin `_ as the tracing provider. This sandbox +capabilities using `Zipkin `_ as the tracing provider. This sandbox is very similar to the front proxy architecture described above, with one difference: service1 makes an API call to service2 before returning a response. The three containers will be deployed inside a virtual network called ``envoymesh``. -All incoming requests are routed via the front envoy, which is acting as a reverse proxy +All incoming requests are routed via the front Envoy, which is acting as a reverse proxy sitting on the edge of the ``envoymesh`` network. Port ``80`` is mapped to port ``8000`` -by docker compose (see :repo:`/examples/zipkin-tracing/docker-compose.yml`). Notice that -all envoys are configured to collect request traces (e.g., http_connection_manager/config/tracing setup in +by docker compose (see :repo:`/examples/zipkin-tracing/docker-compose.yaml`). Notice that +all Envoys are configured to collect request traces (e.g., http_connection_manager/config/tracing setup in :repo:`/examples/zipkin-tracing/front-envoy-zipkin.yaml`) and setup to propagate the spans generated by the Zipkin tracer to a Zipkin cluster (trace driver setup in :repo:`/examples/zipkin-tracing/front-envoy-zipkin.yaml`). -Before routing a request to the appropriate service envoy or the application, Envoy will take +Before routing a request to the appropriate service Envoy or the application, Envoy will take care of generating the appropriate spans for tracing (parent/child/shared context spans). At a high-level, each span records the latency of upstream API calls as well as information needed to correlate the span with other related spans (e.g., the trace ID). @@ -33,7 +33,7 @@ the trace headers while making an outbound call to service2. Running the Sandbox ~~~~~~~~~~~~~~~~~~~ -The following documentation runs through the setup of an envoy cluster organized +The following documentation runs through the setup of an Envoy cluster organized as is described in the image above. **Step 1: Build the sandbox** @@ -42,19 +42,22 @@ To build this sandbox example, and start the example apps run the following comm $ pwd envoy/examples/zipkin-tracing + $ docker-compose pull $ docker-compose up --build -d $ docker-compose ps - Name Command State Ports - ------------------------------------------------------------------------------------------------------------- - zipkintracing_service1_1 /bin/sh -c /usr/local/bin/ ... Up 80/tcp - zipkintracing_service2_1 /bin/sh -c /usr/local/bin/ ... Up 80/tcp - zipkintracing_front-envoy_1 /bin/sh -c /usr/local/bin/ ... Up 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp + + Name Command State Ports + ----------------------------------------------------------------------------------------------------------------------------- + zipkin-tracing_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp + zipkin-tracing_service1_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 80/tcp + zipkin-tracing_service2_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 80/tcp + zipkin-tracing_zipkin_1 /busybox/sh run.sh Up 9410/tcp, 0.0.0.0:9411->9411/tcp **Step 2: Generate some load** You can now send a request to service1 via the front-envoy as follows:: - $ curl -v $(docker-machine ip default):8000/trace/1 + $ curl -v localhost:8000/trace/1 * Trying 192.168.99.100... * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) > GET /trace/1 HTTP/1.1 @@ -67,8 +70,7 @@ You can now send a request to service1 via the front-envoy as follows:: < content-length: 89 < x-envoy-upstream-service-time: 1 < server: envoy - < date: Fri, 26 Aug 2016 19:39:19 GMT - < x-envoy-protocol-version: HTTP/1.1 + < date: Fri, 26 Aug 2018 19:39:19 GMT < Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6 * Connection #0 to host 192.168.99.100 left intact @@ -76,7 +78,6 @@ You can now send a request to service1 via the front-envoy as follows:: **Step 3: View the traces in Zipkin UI** Point your browser to http://localhost:9411 . You should see the Zipkin dashboard. -If this ip address is incorrect, you can find the correct one by running: ``$ docker-machine ip default``. Set the service to "front-proxy" and set the start time to a few minutes before the start of the test (step 2) and hit enter. You should see traces from the front-proxy. Click on a trace to explore the path taken by the request from front-proxy to service1 diff --git a/docs/root/start/start.rst b/docs/root/start/start.rst index 9ba821c26b049..32c256d4f8ae1 100644 --- a/docs/root/start/start.rst +++ b/docs/root/start/start.rst @@ -22,10 +22,12 @@ the same configuration. A very minimal Envoy configuration that can be used to validate basic plain HTTP proxying is available in :repo:`configs/google_com_proxy.v2.yaml`. This is not -intended to represent a realistic Envoy deployment:: +intended to represent a realistic Envoy deployment: - $ docker pull envoyproxy/envoy:latest - $ docker run --rm -d -p 10000:10000 envoyproxy/envoy:latest +.. substitution-code-block:: none + + $ docker pull envoyproxy/|envoy_docker_image| + $ docker run --rm -d -p 10000:10000 envoyproxy/|envoy_docker_image| $ curl -v localhost:10000 The Docker image used will contain the latest version of Envoy @@ -68,7 +70,8 @@ The specification of the :ref:`listeners ` # Comment out the following line to test on v6 networks dns_lookup_family: V4_ONLY lb_policy: ROUND_ROBIN - hosts: [{ socket_address: { address: google.com, port_value: 443 }}] - tls_context: { sni: www.google.com } + load_assignment: + cluster_name: service_google + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: www.google.com + port_value: 443 + tls_context: + sni: www.google.com Using the Envoy Docker Image @@ -103,9 +115,9 @@ Using the Envoy Docker Image Create a simple Dockerfile to execute Envoy, which assumes that envoy.yaml (described above) is in your local directory. You can refer to the :ref:`Command line options `. -.. code-block:: none +.. substitution-code-block:: none - FROM envoyproxy/envoy:latest + FROM envoyproxy/|envoy_docker_image| COPY envoy.yaml /etc/envoy/envoy.yaml Build the Docker image that runs your configuration using:: @@ -120,15 +132,15 @@ And finally, test it using:: $ curl -v localhost:10000 -If you would like to use envoy with docker-compose you can overwrite the provided configuration file +If you would like to use Envoy with docker-compose you can overwrite the provided configuration file by using a volume. -.. code-block: yaml +.. substitution-code-block: yaml version: '3' services: envoy: - image: envoyproxy/envoy:latest + image: envoyproxy/|envoy_docker_image| ports: - "10000:10000" volumes: @@ -147,13 +159,14 @@ features. The following sandboxes are available: :maxdepth: 1 sandboxes/cors - Fault Injection + sandboxes/fault_injection sandboxes/front_proxy sandboxes/grpc_bridge sandboxes/jaeger_native_tracing sandboxes/jaeger_tracing - Lua - Redis + sandboxes/lua + sandboxes/mysql + sandboxes/redis sandboxes/zipkin_tracing Other use cases diff --git a/examples/cors/backend/Dockerfile-frontenvoy b/examples/cors/backend/Dockerfile-frontenvoy index 98413046a146d..83b5ba806c6a1 100644 --- a/examples/cors/backend/Dockerfile-frontenvoy +++ b/examples/cors/backend/Dockerfile-frontenvoy @@ -1,4 +1,4 @@ -FROM envoyproxy/envoy:latest +FROM envoyproxy/envoy-dev:latest RUN apt-get update && apt-get -q install -y \ curl diff --git a/examples/cors/backend/Dockerfile-service b/examples/cors/backend/Dockerfile-service index 5e5013002cf55..89b5fc12736ec 100644 --- a/examples/cors/backend/Dockerfile-service +++ b/examples/cors/backend/Dockerfile-service @@ -1,4 +1,4 @@ -FROM envoyproxy/envoy-alpine:latest +FROM envoyproxy/envoy-alpine-dev:latest RUN apk update && apk add python3 bash RUN pip3 install -q Flask==0.11.1 diff --git a/examples/cors/backend/docker-compose.yml b/examples/cors/backend/docker-compose.yaml similarity index 90% rename from examples/cors/backend/docker-compose.yml rename to examples/cors/backend/docker-compose.yaml index 4ba0f2f4af464..987b4ef157bab 100644 --- a/examples/cors/backend/docker-compose.yml +++ b/examples/cors/backend/docker-compose.yaml @@ -1,4 +1,4 @@ -version: '2' +version: "3.7" services: front-envoy: @@ -13,8 +13,8 @@ services: - "80" - "8001" ports: - - "8000:80" - - "8001:8001" + - "8002:80" + - "8003:8001" backend-service: build: diff --git a/examples/cors/backend/front-envoy.yaml b/examples/cors/backend/front-envoy.yaml index 26f2a4cecf986..21b32df7ca452 100644 --- a/examples/cors/backend/front-envoy.yaml +++ b/examples/cors/backend/front-envoy.yaml @@ -7,12 +7,14 @@ static_resources: filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager codec_type: auto stat_prefix: ingress_http access_log: - name: envoy.file_access_log - config: + typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog path: "/var/log/access.log" route_config: name: local_route @@ -62,19 +64,24 @@ static_resources: cluster: backend_service http_filters: - name: envoy.cors - config: {} + typed_config: {} - name: envoy.router - config: {} + typed_config: {} clusters: - name: backend_service connect_timeout: 0.25s type: strict_dns lb_policy: round_robin http2_protocol_options: {} - hosts: - - socket_address: - address: backendservice - port_value: 80 + load_assignment: + cluster_name: backend_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: backendservice + port_value: 80 admin: access_log_path: "/dev/null" address: diff --git a/examples/cors/backend/service-envoy.yaml b/examples/cors/backend/service-envoy.yaml index f358d2209f69c..f03491544e299 100644 --- a/examples/cors/backend/service-envoy.yaml +++ b/examples/cors/backend/service-envoy.yaml @@ -7,7 +7,8 @@ static_resources: filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager codec_type: auto stat_prefix: ingress_http route_config: @@ -23,16 +24,21 @@ static_resources: cluster: local_service http_filters: - name: envoy.router - config: {} + typed_config: {} clusters: - name: local_service connect_timeout: 0.25s type: strict_dns lb_policy: round_robin - hosts: - - socket_address: - address: 127.0.0.1 - port_value: 8080 + load_assignment: + cluster_name: local_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8080 admin: access_log_path: "/dev/null" address: diff --git a/examples/cors/frontend/Dockerfile-frontenvoy b/examples/cors/frontend/Dockerfile-frontenvoy index 98413046a146d..83b5ba806c6a1 100644 --- a/examples/cors/frontend/Dockerfile-frontenvoy +++ b/examples/cors/frontend/Dockerfile-frontenvoy @@ -1,4 +1,4 @@ -FROM envoyproxy/envoy:latest +FROM envoyproxy/envoy-dev:latest RUN apt-get update && apt-get -q install -y \ curl diff --git a/examples/cors/frontend/Dockerfile-service b/examples/cors/frontend/Dockerfile-service index 6ba3b484e833a..8d882faa172fc 100644 --- a/examples/cors/frontend/Dockerfile-service +++ b/examples/cors/frontend/Dockerfile-service @@ -1,4 +1,4 @@ -FROM envoyproxy/envoy-alpine:latest +FROM envoyproxy/envoy-alpine-dev:latest RUN apk update && apk add python3 bash RUN pip3 install -q Flask==0.11.1 diff --git a/examples/cors/frontend/docker-compose.yml b/examples/cors/frontend/docker-compose.yaml similarity index 97% rename from examples/cors/frontend/docker-compose.yml rename to examples/cors/frontend/docker-compose.yaml index b29dc7f7d37cd..96b19d222e431 100644 --- a/examples/cors/frontend/docker-compose.yml +++ b/examples/cors/frontend/docker-compose.yaml @@ -1,4 +1,4 @@ -version: '2' +version: "3.7" services: front-envoy: diff --git a/examples/cors/frontend/front-envoy.yaml b/examples/cors/frontend/front-envoy.yaml index 357809fbcadbc..c2379d5efcd97 100644 --- a/examples/cors/frontend/front-envoy.yaml +++ b/examples/cors/frontend/front-envoy.yaml @@ -7,12 +7,14 @@ static_resources: filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager codec_type: auto stat_prefix: ingress_http access_log: - name: envoy.file_access_log - config: + typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog path: "/var/log/access.log" route_config: name: local_route @@ -27,19 +29,24 @@ static_resources: cluster: frontend_service http_filters: - name: envoy.cors - config: {} + typed_config: {} - name: envoy.router - config: {} + typed_config: {} clusters: - name: frontend_service connect_timeout: 0.25s type: strict_dns lb_policy: round_robin http2_protocol_options: {} - hosts: - - socket_address: - address: frontendservice - port_value: 80 + load_assignment: + cluster_name: frontend_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: frontendservice + port_value: 80 admin: access_log_path: "/dev/null" address: diff --git a/examples/cors/frontend/index.html b/examples/cors/frontend/index.html index 900ba06537657..30e31d51c51b2 100644 --- a/examples/cors/frontend/index.html +++ b/examples/cors/frontend/index.html @@ -11,7 +11,7 @@ var remoteIP = document.getElementById("remoteip").value; var enforcement = document.querySelector('input[name="cors"]:checked').value; if(client) { - var url = `http://${remoteIP}:8000/cors/${enforcement}`; + var url = `http://${remoteIP}:8002/cors/${enforcement}`; client.open('GET', url, true); client.onreadystatechange = handler; client.send(); @@ -43,12 +43,10 @@

It does NOT dispatch a preflight request.

- Enter the IP address of your docker machine.
- Hint: This can be found by running - docker-machine ip default. + Enter the IP address of backend Docker container. As we are running Docker Compose this should just be localhost.

- +
CORS Enforcement
diff --git a/examples/cors/frontend/service-envoy.yaml b/examples/cors/frontend/service-envoy.yaml index f358d2209f69c..f03491544e299 100644 --- a/examples/cors/frontend/service-envoy.yaml +++ b/examples/cors/frontend/service-envoy.yaml @@ -7,7 +7,8 @@ static_resources: filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager codec_type: auto stat_prefix: ingress_http route_config: @@ -23,16 +24,21 @@ static_resources: cluster: local_service http_filters: - name: envoy.router - config: {} + typed_config: {} clusters: - name: local_service connect_timeout: 0.25s type: strict_dns lb_policy: round_robin - hosts: - - socket_address: - address: 127.0.0.1 - port_value: 8080 + load_assignment: + cluster_name: local_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8080 admin: access_log_path: "/dev/null" address: diff --git a/examples/fault-injection/Dockerfile-envoy b/examples/fault-injection/Dockerfile-envoy index 421547df5a887..f4c09bae67c5e 100644 --- a/examples/fault-injection/Dockerfile-envoy +++ b/examples/fault-injection/Dockerfile-envoy @@ -1,4 +1,4 @@ -FROM envoyproxy/envoy:latest +FROM envoyproxy/envoy-dev:latest RUN apt-get update && apt-get install -y curl tree COPY enable_delay_fault_injection.sh disable_delay_fault_injection.sh enable_abort_fault_injection.sh disable_abort_fault_injection.sh send_request.sh / diff --git a/examples/fault-injection/README.md b/examples/fault-injection/README.md index a39a5aab153e9..80223e03cb0cd 100644 --- a/examples/fault-injection/README.md +++ b/examples/fault-injection/README.md @@ -8,6 +8,7 @@ support](https://www.envoyproxy.io/docs/envoy/latest/configuration/runtime) to c ## Usage ``` # in terminal 1, launch Envoy and backend service containers. +docker-compose pull docker-compose up # in terminal 2 diff --git a/examples/fault-injection/docker-compose.yml b/examples/fault-injection/docker-compose.yaml similarity index 97% rename from examples/fault-injection/docker-compose.yml rename to examples/fault-injection/docker-compose.yaml index b3b3f1da27c44..fe8ec0c9d68fb 100644 --- a/examples/fault-injection/docker-compose.yml +++ b/examples/fault-injection/docker-compose.yaml @@ -1,4 +1,4 @@ -version: '2' +version: "3.7" services: envoy: build: diff --git a/examples/fault-injection/envoy.yaml b/examples/fault-injection/envoy.yaml index 13c943e64b970..3c42501078141 100644 --- a/examples/fault-injection/envoy.yaml +++ b/examples/fault-injection/envoy.yaml @@ -7,12 +7,14 @@ static_resources: filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager codec_type: auto stat_prefix: ingress_http access_log: name: envoy.file_access_log - config: + typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog path: /dev/stdout route_config: name: local_route @@ -27,14 +29,15 @@ static_resources: cluster: local_service http_filters: - name: envoy.fault - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.http.fault.v2.HTTPFault abort: http_status: 503 percentage: numerator: 0 denominator: HUNDRED - name: envoy.router - config: {} + typed_config: {} clusters: - name: local_service connect_timeout: 1s diff --git a/examples/front-proxy/Dockerfile-frontenvoy b/examples/front-proxy/Dockerfile-frontenvoy index 98413046a146d..83b5ba806c6a1 100644 --- a/examples/front-proxy/Dockerfile-frontenvoy +++ b/examples/front-proxy/Dockerfile-frontenvoy @@ -1,4 +1,4 @@ -FROM envoyproxy/envoy:latest +FROM envoyproxy/envoy-dev:latest RUN apt-get update && apt-get -q install -y \ curl diff --git a/examples/front-proxy/Dockerfile-service b/examples/front-proxy/Dockerfile-service index ba7e5dbd2f011..c3f5bafefc19b 100644 --- a/examples/front-proxy/Dockerfile-service +++ b/examples/front-proxy/Dockerfile-service @@ -1,4 +1,4 @@ -FROM envoyproxy/envoy-alpine:latest +FROM envoyproxy/envoy-alpine-dev:latest RUN apk update && apk add python3 bash curl RUN pip3 install -q Flask==0.11.1 requests==2.18.4 diff --git a/examples/front-proxy/docker-compose.yml b/examples/front-proxy/docker-compose.yaml similarity index 98% rename from examples/front-proxy/docker-compose.yml rename to examples/front-proxy/docker-compose.yaml index 2c121d598b73c..34491c3636ce0 100644 --- a/examples/front-proxy/docker-compose.yml +++ b/examples/front-proxy/docker-compose.yaml @@ -1,4 +1,4 @@ -version: '2' +version: "3.7" services: front-envoy: diff --git a/examples/front-proxy/front-envoy.yaml b/examples/front-proxy/front-envoy.yaml index d842cf3adc3b5..5bed8c8490176 100644 --- a/examples/front-proxy/front-envoy.yaml +++ b/examples/front-proxy/front-envoy.yaml @@ -7,7 +7,8 @@ static_resources: filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager codec_type: auto stat_prefix: ingress_http route_config: @@ -27,26 +28,36 @@ static_resources: cluster: service2 http_filters: - name: envoy.router - config: {} + typed_config: {} clusters: - name: service1 connect_timeout: 0.25s type: strict_dns lb_policy: round_robin http2_protocol_options: {} - hosts: - - socket_address: - address: service1 - port_value: 80 + load_assignment: + cluster_name: service1 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: service1 + port_value: 80 - name: service2 connect_timeout: 0.25s type: strict_dns lb_policy: round_robin http2_protocol_options: {} - hosts: - - socket_address: - address: service2 - port_value: 80 + load_assignment: + cluster_name: service2 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: service2 + port_value: 80 admin: access_log_path: "/dev/null" address: diff --git a/examples/front-proxy/service-envoy.yaml b/examples/front-proxy/service-envoy.yaml index 4ae4562e5487c..90a466105b33b 100644 --- a/examples/front-proxy/service-envoy.yaml +++ b/examples/front-proxy/service-envoy.yaml @@ -7,7 +7,8 @@ static_resources: filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager codec_type: auto stat_prefix: ingress_http route_config: @@ -23,16 +24,21 @@ static_resources: cluster: local_service http_filters: - name: envoy.router - config: {} + typed_config: {} clusters: - name: local_service connect_timeout: 0.25s type: strict_dns lb_policy: round_robin - hosts: - - socket_address: - address: 127.0.0.1 - port_value: 8080 + load_assignment: + cluster_name: local_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8080 admin: access_log_path: "/dev/null" address: diff --git a/examples/front-proxy/service.py b/examples/front-proxy/service.py index 067fe2fa7a758..30e8d5219b313 100644 --- a/examples/front-proxy/service.py +++ b/examples/front-proxy/service.py @@ -1,9 +1,9 @@ from flask import Flask from flask import request -import socket import os -import sys import requests +import socket +import sys app = Flask(__name__) diff --git a/examples/grpc-bridge/Dockerfile-grpc b/examples/grpc-bridge/Dockerfile-grpc index 80d2b12e8ab18..679b0a728a01c 100644 --- a/examples/grpc-bridge/Dockerfile-grpc +++ b/examples/grpc-bridge/Dockerfile-grpc @@ -1,7 +1,7 @@ -FROM envoyproxy/envoy:latest +FROM envoyproxy/envoy-dev:latest RUN mkdir /var/log/envoy/ COPY ./bin/service /usr/local/bin/srv -COPY ./script/grpc_start /etc/grpc_start +COPY ./script/grpc_start.sh /etc/grpc_start CMD /etc/grpc_start diff --git a/examples/grpc-bridge/Dockerfile-python b/examples/grpc-bridge/Dockerfile-python index 02aa308c2acb1..e90c8a469a5c9 100644 --- a/examples/grpc-bridge/Dockerfile-python +++ b/examples/grpc-bridge/Dockerfile-python @@ -1,10 +1,10 @@ -FROM envoyproxy/envoy:latest +FROM envoyproxy/envoy-dev:latest RUN apt-get update RUN apt-get -q install -y python-dev \ python-pip -RUN pip install -q grpcio protobuf requests ADD ./client /client +RUN pip install -r /client/requirements.txt RUN chmod a+x /client/client.py RUN mkdir /var/log/envoy/ CMD /usr/local/bin/envoy -c /etc/s2s-python-envoy.yaml diff --git a/examples/grpc-bridge/client/client.py b/examples/grpc-bridge/client/client.py old mode 100644 new mode 100755 index 2ae336341a100..b1cba24bad2a4 --- a/examples/grpc-bridge/client/client.py +++ b/examples/grpc-bridge/client/client.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python import requests, sys import kv_pb2 as kv diff --git a/examples/grpc-bridge/client/requirements.txt b/examples/grpc-bridge/client/requirements.txt index 2f874cf2e4803..a86d6229e8b9d 100644 --- a/examples/grpc-bridge/client/requirements.txt +++ b/examples/grpc-bridge/client/requirements.txt @@ -1,2 +1,3 @@ requests>=2.20.0 grpcio +protobuf==3.7.1 diff --git a/examples/grpc-bridge/config/s2s-grpc-envoy.yaml b/examples/grpc-bridge/config/s2s-grpc-envoy.yaml index f4ca6ba29c53f..0f5e794739b83 100644 --- a/examples/grpc-bridge/config/s2s-grpc-envoy.yaml +++ b/examples/grpc-bridge/config/s2s-grpc-envoy.yaml @@ -7,7 +7,8 @@ static_resources: filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager codec_type: auto stat_prefix: ingress_http route_config: @@ -24,17 +25,22 @@ static_resources: cluster: local_service_grpc http_filters: - name: envoy.router - config: {} + typed_config: {} clusters: - name: local_service_grpc connect_timeout: 0.250s type: static lb_policy: round_robin http2_protocol_options: {} - hosts: - - socket_address: - address: 127.0.0.1 - port_value: 8081 + load_assignment: + cluster_name: local_service_grpc + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8081 admin: access_log_path: "/var/log/envoy/admin_access.log" address: diff --git a/examples/grpc-bridge/config/s2s-python-envoy.yaml b/examples/grpc-bridge/config/s2s-python-envoy.yaml index 1a0b18a3e68af..5538654f031a1 100644 --- a/examples/grpc-bridge/config/s2s-python-envoy.yaml +++ b/examples/grpc-bridge/config/s2s-python-envoy.yaml @@ -13,14 +13,16 @@ static_resources: filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager codec_type: auto add_user_agent: true idle_timeout: 0.840s access_log: - name: envoy.file_access_log - config: - path: "/var/log/envoy/egress_http.log" + typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + path: "/var/log/envoy/admin_access.log" stat_prefix: egress_http use_remote_address: true route_config: @@ -36,9 +38,9 @@ static_resources: cluster: grpc http_filters: - name: envoy.grpc_http1_bridge - config: {} + typed_config: {} - name: envoy.router - config: {} + typed_config: {} clusters: - name: grpc type: logical_dns @@ -46,7 +48,12 @@ static_resources: lb_policy: round_robin connect_timeout: 0.250s http_protocol_options: {} - hosts: - - socket_address: - address: grpc - port_value: 9211 + load_assignment: + cluster_name: grpc + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: grpc + port_value: 9211 diff --git a/examples/grpc-bridge/docker-compose.yml b/examples/grpc-bridge/docker-compose.yaml similarity index 96% rename from examples/grpc-bridge/docker-compose.yml rename to examples/grpc-bridge/docker-compose.yaml index 6cc1cf48843b2..25d5b1fb3cd36 100644 --- a/examples/grpc-bridge/docker-compose.yml +++ b/examples/grpc-bridge/docker-compose.yaml @@ -1,4 +1,4 @@ -version: '2' +version: "3.7" services: python: diff --git a/examples/grpc-bridge/script/bootstrap b/examples/grpc-bridge/script/bootstrap.sh similarity index 84% rename from examples/grpc-bridge/script/bootstrap rename to examples/grpc-bridge/script/bootstrap.sh index 3d8207cb3d68b..d8ba5a4f242a0 100755 --- a/examples/grpc-bridge/script/bootstrap +++ b/examples/grpc-bridge/script/bootstrap.sh @@ -6,5 +6,6 @@ cd $(dirname $0)/.. echo "fetching dependencies..." go get golang.org/x/net/context +go get golang.org/x/sys/unix go get google.golang.org/grpc echo "done" diff --git a/examples/grpc-bridge/script/build b/examples/grpc-bridge/script/build.sh similarity index 100% rename from examples/grpc-bridge/script/build rename to examples/grpc-bridge/script/build.sh diff --git a/examples/grpc-bridge/script/grpc_start b/examples/grpc-bridge/script/grpc_start.sh similarity index 100% rename from examples/grpc-bridge/script/grpc_start rename to examples/grpc-bridge/script/grpc_start.sh diff --git a/examples/grpc-bridge/service/script/gen b/examples/grpc-bridge/service/script/gen index 5813e58d20556..bed331f80c1de 100755 --- a/examples/grpc-bridge/service/script/gen +++ b/examples/grpc-bridge/service/script/gen @@ -4,7 +4,7 @@ set -e cd $(dirname $0)/.. -rm -rf generated/* +rm -rf gen/* # generate the protobufs protoc --go_out=plugins=grpc:./gen \ diff --git a/examples/jaeger-native-tracing/docker-compose.yml b/examples/jaeger-native-tracing/docker-compose.yaml similarity index 92% rename from examples/jaeger-native-tracing/docker-compose.yml rename to examples/jaeger-native-tracing/docker-compose.yaml index 659c4da8bd6cd..3321e110cbb83 100644 --- a/examples/jaeger-native-tracing/docker-compose.yml +++ b/examples/jaeger-native-tracing/docker-compose.yaml @@ -1,4 +1,4 @@ -version: '2' +version: "3.7" services: front-envoy: @@ -18,6 +18,9 @@ services: ports: - "8000:80" - "8001:8001" + dns: + - 8.8.8.8 + - 8.8.4.4 service1: build: @@ -36,6 +39,9 @@ services: - SERVICE_NAME=1 expose: - "80" + dns: + - 8.8.8.8 + - 8.8.4.4 service2: build: @@ -54,6 +60,9 @@ services: - SERVICE_NAME=2 expose: - "80" + dns: + - 8.8.8.8 + - 8.8.4.4 jaeger: image: jaegertracing/all-in-one diff --git a/examples/jaeger-native-tracing/front-envoy-jaeger.yaml b/examples/jaeger-native-tracing/front-envoy-jaeger.yaml index 0643a38132dd4..92163e2ef876d 100644 --- a/examples/jaeger-native-tracing/front-envoy-jaeger.yaml +++ b/examples/jaeger-native-tracing/front-envoy-jaeger.yaml @@ -7,7 +7,9 @@ static_resources: filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + generate_request_id: true tracing: operation_name: egress codec_type: auto @@ -27,21 +29,28 @@ static_resources: operation: checkAvailability http_filters: - name: envoy.router - config: {} + typed_config: {} + use_remote_address: true clusters: - name: service1 connect_timeout: 0.250s type: strict_dns lb_policy: round_robin http2_protocol_options: {} - hosts: - - socket_address: - address: service1 - port_value: 80 + load_assignment: + cluster_name: service1 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: service1 + port_value: 80 tracing: http: name: envoy.dynamic.ot - config: + typed_config: + "@type": type.googleapis.com/envoy.config.trace.v2.DynamicOtConfig library: /usr/local/lib/libjaegertracing_plugin.so config: service_name: front-proxy diff --git a/examples/jaeger-native-tracing/service1-envoy-jaeger.yaml b/examples/jaeger-native-tracing/service1-envoy-jaeger.yaml index 2a18b588743e2..6c6ae595fbdb9 100644 --- a/examples/jaeger-native-tracing/service1-envoy-jaeger.yaml +++ b/examples/jaeger-native-tracing/service1-envoy-jaeger.yaml @@ -7,7 +7,8 @@ static_resources: filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager tracing: operation_name: ingress codec_type: auto @@ -27,7 +28,7 @@ static_resources: operation: checkAvailability http_filters: - name: envoy.router - config: {} + typed_config: {} - address: socket_address: address: 0.0.0.0 @@ -35,7 +36,8 @@ static_resources: filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager tracing: operation_name: egress codec_type: auto @@ -55,29 +57,40 @@ static_resources: operation: checkStock http_filters: - name: envoy.router - config: {} + typed_config: {} clusters: - name: local_service connect_timeout: 0.250s type: strict_dns lb_policy: round_robin - hosts: - - socket_address: - address: 127.0.0.1 - port_value: 8080 + load_assignment: + cluster_name: local_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8080 - name: service2 connect_timeout: 0.250s type: strict_dns lb_policy: round_robin http2_protocol_options: {} - hosts: - - socket_address: - address: service2 - port_value: 80 + load_assignment: + cluster_name: service2 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: service2 + port_value: 80 tracing: http: name: envoy.dynamic.ot - config: + typed_config: + "@type": type.googleapis.com/envoy.config.trace.v2.DynamicOtConfig library: /usr/local/lib/libjaegertracing_plugin.so config: service_name: service1 diff --git a/examples/jaeger-native-tracing/service2-envoy-jaeger.yaml b/examples/jaeger-native-tracing/service2-envoy-jaeger.yaml index 895224cd1a284..dae9d8e555330 100644 --- a/examples/jaeger-native-tracing/service2-envoy-jaeger.yaml +++ b/examples/jaeger-native-tracing/service2-envoy-jaeger.yaml @@ -7,7 +7,8 @@ static_resources: filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager tracing: operation_name: ingress codec_type: auto @@ -27,20 +28,26 @@ static_resources: operation: checkStock http_filters: - name: envoy.router - config: {} + typed_config: {} clusters: - name: local_service connect_timeout: 0.250s type: strict_dns lb_policy: round_robin - hosts: - - socket_address: - address: 127.0.0.1 - port_value: 8080 + load_assignment: + cluster_name: local_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8080 tracing: http: name: envoy.dynamic.ot - config: + typed_config: + "@type": type.googleapis.com/envoy.config.trace.v2.DynamicOtConfig library: /usr/local/lib/libjaegertracing_plugin.so config: service_name: service2 diff --git a/examples/jaeger-tracing/docker-compose.yml b/examples/jaeger-tracing/docker-compose.yaml similarity index 98% rename from examples/jaeger-tracing/docker-compose.yml rename to examples/jaeger-tracing/docker-compose.yaml index 2c75265724eaa..6c353fada6f49 100644 --- a/examples/jaeger-tracing/docker-compose.yml +++ b/examples/jaeger-tracing/docker-compose.yaml @@ -1,4 +1,4 @@ -version: '2' +version: "3.7" services: front-envoy: diff --git a/examples/jaeger-tracing/front-envoy-jaeger.yaml b/examples/jaeger-tracing/front-envoy-jaeger.yaml index dba34e4c5993c..52cfdce8ff611 100644 --- a/examples/jaeger-tracing/front-envoy-jaeger.yaml +++ b/examples/jaeger-tracing/front-envoy-jaeger.yaml @@ -7,7 +7,9 @@ static_resources: filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + generate_request_id: true tracing: operation_name: egress codec_type: auto @@ -27,29 +29,41 @@ static_resources: operation: checkAvailability http_filters: - name: envoy.router - config: {} + typed_config: {} + use_remote_address: true clusters: - name: service1 connect_timeout: 0.250s type: strict_dns lb_policy: round_robin http2_protocol_options: {} - hosts: - - socket_address: - address: service1 - port_value: 80 + load_assignment: + cluster_name: service1 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: service1 + port_value: 80 - name: jaeger connect_timeout: 1s type: strict_dns lb_policy: round_robin - hosts: - - socket_address: - address: jaeger - port_value: 9411 + load_assignment: + cluster_name: jaeger + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: jaeger + port_value: 9411 tracing: http: name: envoy.zipkin - config: + typed_config: + "@type": type.googleapis.com/envoy.config.trace.v2.ZipkinConfig collector_cluster: jaeger collector_endpoint: "/api/v1/spans" shared_span_context: false diff --git a/examples/jaeger-tracing/service1-envoy-jaeger.yaml b/examples/jaeger-tracing/service1-envoy-jaeger.yaml index c9aa369341d19..7f06fe2da4417 100644 --- a/examples/jaeger-tracing/service1-envoy-jaeger.yaml +++ b/examples/jaeger-tracing/service1-envoy-jaeger.yaml @@ -7,7 +7,8 @@ static_resources: filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager tracing: operation_name: ingress codec_type: auto @@ -27,7 +28,7 @@ static_resources: operation: checkAvailability http_filters: - name: envoy.router - config: {} + typed_config: {} - address: socket_address: address: 0.0.0.0 @@ -35,7 +36,8 @@ static_resources: filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager tracing: operation_name: egress codec_type: auto @@ -55,37 +57,53 @@ static_resources: operation: checkStock http_filters: - name: envoy.router - config: {} + typed_config: {} clusters: - name: local_service connect_timeout: 0.250s type: strict_dns lb_policy: round_robin - hosts: - - socket_address: - address: 127.0.0.1 - port_value: 8080 + load_assignment: + cluster_name: local_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8080 - name: service2 connect_timeout: 0.250s type: strict_dns lb_policy: round_robin http2_protocol_options: {} - hosts: - - socket_address: - address: service2 - port_value: 80 + load_assignment: + cluster_name: service2 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: service2 + port_value: 80 - name: jaeger connect_timeout: 1s type: strict_dns lb_policy: round_robin - hosts: - - socket_address: - address: jaeger - port_value: 9411 + load_assignment: + cluster_name: jaeger + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: jaeger + port_value: 9411 tracing: http: name: envoy.zipkin - config: + typed_config: + "@type": type.googleapis.com/envoy.config.trace.v2.ZipkinConfig collector_cluster: jaeger collector_endpoint: "/api/v1/spans" shared_span_context: false diff --git a/examples/jaeger-tracing/service2-envoy-jaeger.yaml b/examples/jaeger-tracing/service2-envoy-jaeger.yaml index 6dbd16388c6cf..75a562ee41987 100644 --- a/examples/jaeger-tracing/service2-envoy-jaeger.yaml +++ b/examples/jaeger-tracing/service2-envoy-jaeger.yaml @@ -7,7 +7,8 @@ static_resources: filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager tracing: operation_name: ingress codec_type: auto @@ -27,28 +28,39 @@ static_resources: operation: checkStock http_filters: - name: envoy.router - config: {} + typed_config: {} clusters: - name: local_service connect_timeout: 0.250s type: strict_dns lb_policy: round_robin - hosts: - - socket_address: - address: 127.0.0.1 - port_value: 8080 + load_assignment: + cluster_name: local_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8080 - name: jaeger connect_timeout: 1s type: strict_dns lb_policy: round_robin - hosts: - - socket_address: - address: jaeger - port_value: 9411 + load_assignment: + cluster_name: jaeger + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: jaeger + port_value: 9411 tracing: http: name: envoy.zipkin - config: + typed_config: + "@type": type.googleapis.com/envoy.config.trace.v2.ZipkinConfig collector_cluster: jaeger collector_endpoint: "/api/v1/spans" shared_span_context: false diff --git a/examples/lua/Dockerfile-proxy b/examples/lua/Dockerfile-proxy index 26aaebb9ab6dc..92b320ea14879 100644 --- a/examples/lua/Dockerfile-proxy +++ b/examples/lua/Dockerfile-proxy @@ -1,2 +1,2 @@ -FROM envoyproxy/envoy:latest +FROM envoyproxy/envoy-dev:latest CMD /usr/local/bin/envoy -c /etc/envoy.yaml -l debug --service-cluster proxy diff --git a/examples/lua/README.md b/examples/lua/README.md index 59a3a0c1db352..54cca13022fd8 100644 --- a/examples/lua/README.md +++ b/examples/lua/README.md @@ -8,8 +8,8 @@ filter that contains two functions namely # Usage -1. `docker-compose build` -2. `docker-compose up` +1. `docker-compose pull` +2. `docker-compose up --build` 3. `curl -v localhost:8000` ## Sample Output: diff --git a/examples/lua/docker-compose.yaml b/examples/lua/docker-compose.yaml index 31ce3501ea1a8..2ee4860cfc48d 100644 --- a/examples/lua/docker-compose.yaml +++ b/examples/lua/docker-compose.yaml @@ -1,4 +1,4 @@ -version: '2' +version: "3.7" services: proxy: diff --git a/examples/lua/envoy.yaml b/examples/lua/envoy.yaml index ccf85ca18fb14..aed6f977299b5 100644 --- a/examples/lua/envoy.yaml +++ b/examples/lua/envoy.yaml @@ -8,7 +8,8 @@ static_resources: filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager stat_prefix: ingress_http codec_type: auto route_config: @@ -24,7 +25,8 @@ static_resources: cluster: web_service http_filters: - name: envoy.lua - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.http.lua.v2.Lua inline_code: | function envoy_on_request(request_handle) request_handle:headers():add("foo", "bar") @@ -34,17 +36,22 @@ static_resources: response_handle:headers():add("response-body-size", tostring(body_size)) end - name: envoy.router - config: {} + typed_config: {} clusters: - name: web_service connect_timeout: 0.25s type: strict_dns # static lb_policy: round_robin - hosts: - - socket_address: - address: web_service - port_value: 80 + load_assignment: + cluster_name: web_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: web_service + port_value: 80 admin: access_log_path: "/dev/null" address: diff --git a/examples/mysql/Dockerfile-mysql b/examples/mysql/Dockerfile-mysql new file mode 100644 index 0000000000000..cd8155a61f13d --- /dev/null +++ b/examples/mysql/Dockerfile-mysql @@ -0,0 +1 @@ +FROM mysql:5.5 diff --git a/examples/mysql/Dockerfile-proxy b/examples/mysql/Dockerfile-proxy new file mode 100644 index 0000000000000..ad18604cd0c78 --- /dev/null +++ b/examples/mysql/Dockerfile-proxy @@ -0,0 +1,3 @@ +FROM envoyproxy/envoy-dev:latest + +CMD /usr/local/bin/envoy -c /etc/envoy.yaml -l debug diff --git a/examples/mysql/README.md b/examples/mysql/README.md new file mode 100644 index 0000000000000..4bdd66fb989c6 --- /dev/null +++ b/examples/mysql/README.md @@ -0,0 +1,90 @@ +MySQL filter example +==================== + +In this example, we show how the [MySQL +filter](https://www.envoyproxy.io/docs/envoy/latest/configuration/network_filters/mysql_proxy_filter) +can be used with the Envoy proxy. The Envoy proxy [configuration](./envoy.yaml) +includes a MySQL filter that parses queries and collects MySQL-specific +metrics. + +## Usage + +Build and run the containers: + +```console +$ docker-compose pull +$ docker-compose up --build +``` + +Use `mysql` to issue some commands and verify they are routed via Envoy. Note +that the current implementation of the protocol filter was tested with MySQL +v5.5. It may, however, not work with other versions of MySQL due to differences +in the protocol implementation. + +```console +$ docker run --rm -it --network envoymesh mysql:5.5 mysql -h envoy -P 1999 -u root +... snip ... + +mysql> CREATE DATABASE test; +Query OK, 1 row affected (0.00 sec) + +mysql> USE test; +Database changed + +mysql> CREATE TABLE test ( text VARCHAR(255) ); +Query OK, 0 rows affected (0.02 sec) + +mysql> SELECT COUNT(*) FROM test; ++----------+ +| COUNT(*) | ++----------+ +| 0 | ++----------+ +1 row in set (0.00 sec) + +mysql> INSERT INTO test VALUES ('hello, world!'); +Query OK, 1 row affected (0.01 sec) + +mysql> SELECT COUNT(*) FROM test; ++----------+ +| COUNT(*) | ++----------+ +| 1 | ++----------+ +1 row in set (0.00 sec) + +mysql> exit +Bye +``` + +Check that the egress stats were updated: + +```console +$ curl -s http://localhost:8001/stats?filter=egress_mysql +mysql.egress_mysql.auth_switch_request: 0 +mysql.egress_mysql.decoder_errors: 0 +mysql.egress_mysql.login_attempts: 1 +mysql.egress_mysql.login_failures: 0 +mysql.egress_mysql.protocol_errors: 0 +mysql.egress_mysql.queries_parse_error: 0 +mysql.egress_mysql.queries_parsed: 7 +mysql.egress_mysql.sessions: 1 +mysql.egress_mysql.upgraded_to_ssl: 0 +``` + +Check that the TCP stats were updated: + +```console +$ curl -s http://localhost:8001/stats?filter=mysql_tcp +tcp.mysql_tcp.downstream_cx_no_route: 0 +tcp.mysql_tcp.downstream_cx_rx_bytes_buffered: 0 +tcp.mysql_tcp.downstream_cx_rx_bytes_total: 347 +tcp.mysql_tcp.downstream_cx_total: 1 +tcp.mysql_tcp.downstream_cx_tx_bytes_buffered: 0 +tcp.mysql_tcp.downstream_cx_tx_bytes_total: 702 +tcp.mysql_tcp.downstream_flow_control_paused_reading_total: 0 +tcp.mysql_tcp.downstream_flow_control_resumed_reading_total: 0 +tcp.mysql_tcp.idle_timeout: 0 +tcp.mysql_tcp.upstream_flush_active: 0 +tcp.mysql_tcp.upstream_flush_total: 0 +``` diff --git a/examples/mysql/docker-compose.yaml b/examples/mysql/docker-compose.yaml new file mode 100644 index 0000000000000..d4b8b13e13c1e --- /dev/null +++ b/examples/mysql/docker-compose.yaml @@ -0,0 +1,38 @@ +version: "3.7" +services: + + proxy: + build: + context: . + dockerfile: Dockerfile-proxy + volumes: + - ./envoy.yaml:/etc/envoy.yaml + networks: + envoymesh: + aliases: + - envoy + expose: + - "1999" + - "8001" + ports: + - "1999:1999" + - "8001:8001" + + mysql: + build: + context: . + dockerfile: Dockerfile-mysql + networks: + envoymesh: + aliases: + - mysql + environment: + - MYSQL_ALLOW_EMPTY_PASSWORD=yes + expose: + - "3306" + ports: + - "3306:3306" + +networks: + envoymesh: + name: envoymesh diff --git a/examples/mysql/envoy.yaml b/examples/mysql/envoy.yaml new file mode 100644 index 0000000000000..56504a42faf2c --- /dev/null +++ b/examples/mysql/envoy.yaml @@ -0,0 +1,39 @@ +static_resources: + listeners: + - name: mysql_listener + address: + socket_address: + address: 0.0.0.0 + port_value: 1999 + filter_chains: + - filters: + - name: envoy.filters.network.mysql_proxy + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.mysql_proxy.v1alpha1.MySQLProxy + stat_prefix: egress_mysql + - name: envoy.tcp_proxy + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy + stat_prefix: mysql_tcp + cluster: mysql_cluster + + clusters: + - name: mysql_cluster + connect_timeout: 1s + type: strict_dns + load_assignment: + cluster_name: mysql_cluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: mysql + port_value: 3306 + +admin: + access_log_path: "/dev/null" + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 diff --git a/examples/redis/Dockerfile-proxy b/examples/redis/Dockerfile-proxy index 26aaebb9ab6dc..92b320ea14879 100644 --- a/examples/redis/Dockerfile-proxy +++ b/examples/redis/Dockerfile-proxy @@ -1,2 +1,2 @@ -FROM envoyproxy/envoy:latest +FROM envoyproxy/envoy-dev:latest CMD /usr/local/bin/envoy -c /etc/envoy.yaml -l debug --service-cluster proxy diff --git a/examples/redis/README.md b/examples/redis/README.md index a6926ce3e9757..c1f18ed5e301b 100644 --- a/examples/redis/README.md +++ b/examples/redis/README.md @@ -3,8 +3,8 @@ In this example, we show how a [Redis filter](https://www.envoyproxy.io/docs/env # Usage -1. `docker-compose build` -2. `docker-compose up` +1. `docker-compose pull` +2. `docker-compose up --build` 3. Issue redis commands using your favourite redis client such as `redis-cli` ## Sample Output: diff --git a/examples/redis/docker-compose.yaml b/examples/redis/docker-compose.yaml index 0e18c9019c6e8..5b2d82a6e1163 100644 --- a/examples/redis/docker-compose.yaml +++ b/examples/redis/docker-compose.yaml @@ -1,4 +1,4 @@ -version: '2' +version: "3.7" services: proxy: diff --git a/examples/redis/envoy.yaml b/examples/redis/envoy.yaml index 19cc48a811417..bdf76fefe434f 100644 --- a/examples/redis/envoy.yaml +++ b/examples/redis/envoy.yaml @@ -8,7 +8,8 @@ static_resources: filter_chains: - filters: - name: envoy.redis_proxy - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.redis_proxy.v2.RedisProxy stat_prefix: egress_redis cluster: redis_cluster settings: @@ -18,10 +19,15 @@ static_resources: connect_timeout: 1s type: strict_dns # static lb_policy: MAGLEV - hosts: - - socket_address: - address: redis_server - port_value: 6379 + load_assignment: + cluster_name: redis_cluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: redis_server + port_value: 6379 admin: access_log_path: "/dev/null" address: diff --git a/examples/wasm/envoy_filter_http_wasm_example.cc b/examples/wasm/envoy_filter_http_wasm_example.cc index c7a53ec3f358e..e68ce4075624f 100644 --- a/examples/wasm/envoy_filter_http_wasm_example.cc +++ b/examples/wasm/envoy_filter_http_wasm_example.cc @@ -19,7 +19,7 @@ class ExampleContext : public Context { void onDelete() override; }; -std::unique_ptr Context::New(uint32_t id) { +std::unique_ptr NewContext(uint32_t id) { return std::unique_ptr(new ExampleContext(id)); } diff --git a/examples/wasm/envoy_filter_http_wasm_example.wasm b/examples/wasm/envoy_filter_http_wasm_example.wasm index 4851f62784505..b8c69be9df687 100644 Binary files a/examples/wasm/envoy_filter_http_wasm_example.wasm and b/examples/wasm/envoy_filter_http_wasm_example.wasm differ diff --git a/examples/wasm/envoy_filter_http_wasm_example.wat b/examples/wasm/envoy_filter_http_wasm_example.wat index 43ce15a9d9330..b1ef700026702 100644 --- a/examples/wasm/envoy_filter_http_wasm_example.wat +++ b/examples/wasm/envoy_filter_http_wasm_example.wat @@ -85,15 +85,17 @@ (export "_proxy_onLog" (func $_proxy_onLog)) (export "_proxy_onRequestBody" (func $_proxy_onRequestBody)) (export "_proxy_onRequestHeaders" (func $_proxy_onRequestHeaders)) + (export "_proxy_onRequestMetadata" (func $_proxy_onRequestMetadata)) (export "_proxy_onRequestTrailers" (func $_proxy_onRequestTrailers)) (export "_proxy_onResponseBody" (func $_proxy_onResponseBody)) (export "_proxy_onResponseHeaders" (func $_proxy_onResponseHeaders)) + (export "_proxy_onResponseMetadata" (func $_proxy_onResponseMetadata)) (export "_proxy_onResponseTrailers" (func $_proxy_onResponseTrailers)) (export "_proxy_onStart" (func $_proxy_onStart)) (export "_proxy_onTick" (func $_proxy_onTick)) - (export "_pthread_cond_broadcast" (func $__ZN7Context17onRequestTrailersEv)) - (export "_pthread_mutex_lock" (func $__ZN7Context17onRequestTrailersEv)) - (export "_pthread_mutex_unlock" (func $__ZN7Context17onRequestTrailersEv)) + (export "_pthread_cond_broadcast" (func $__ZN7Context17onRequestMetadataEv)) + (export "_pthread_mutex_lock" (func $__ZN7Context17onRequestMetadataEv)) + (export "_pthread_mutex_unlock" (func $__ZN7Context17onRequestMetadataEv)) (export "_sbrk" (func $_sbrk)) (export "dynCall_ii" (func $dynCall_ii)) (export "dynCall_iii" (func $dynCall_iii)) @@ -115,15 +117,15 @@ (global $33 (mut i32) (global.get $32)) (global $34 (mut i32) (i32.const 0)) (global $35 (mut i32) (i32.const 0)) - (global $36 (mut i32) (i32.const 18400)) - (global $37 (mut i32) (i32.const 5261280)) + (global $36 (mut i32) (i32.const 18416)) + (global $37 (mut i32) (i32.const 5261296)) (elem $29 (global.get $31) - $b0 $__ZN14ExampleContext16onRequestHeadersEv $__ZN7Context17onRequestTrailersEv $__ZN14ExampleContext17onResponseHeadersEv $__ZN7Context17onRequestTrailersEv $__ZN7Context17onRequestTrailersEv $__ZN7Context17onRequestTrailersEv $__ZNK6google8protobuf8internal12MapEntryImplINS0_27Struct_FieldsEntry_DoNotUseENS0_11MessageLiteENSt3__212basic_stringIcNS5_11char_traitsIcEENS5_9allocatorIcEEEENS0_5ValueELNS1_14WireFormatLite9FieldTypeE9ELSE_11ELi0EE3NewEv - $__ZNK6google8protobuf8internal12MapEntryImplINS0_27Struct_FieldsEntry_DoNotUseENS0_11MessageLiteENSt3__212basic_stringIcNS5_11char_traitsIcEENS5_9allocatorIcEEEENS0_5ValueELNS1_14WireFormatLite9FieldTypeE9ELSE_11ELi0EE8GetArenaEv $__ZNK6google8protobuf11MessageLite20GetMaybeArenaPointerEv $__ZNK6google8protobuf9ListValue13IsInitializedEv $__ZNK6google8protobuf8internal12MapEntryImplINS0_27Struct_FieldsEntry_DoNotUseENS0_11MessageLiteENSt3__212basic_stringIcNS5_11char_traitsIcEENS5_9allocatorIcEEEENS0_5ValueELNS1_14WireFormatLite9FieldTypeE9ELSE_11ELi0EE12ByteSizeLongEv $__ZNK6google8protobuf8internal12MapEntryImplINS0_27Struct_FieldsEntry_DoNotUseENS0_11MessageLiteENSt3__212basic_stringIcNS5_11char_traitsIcEENS5_9allocatorIcEEEENS0_5ValueELNS1_14WireFormatLite9FieldTypeE9ELSE_11ELi0EE13GetCachedSizeEv $__ZN7Context17onRequestTrailersEv $__ZNK6google8protobuf9ListValue20GetMaybeArenaPointerEv $__ZNK6google8protobuf8internal12MapEntryImplINS0_27Struct_FieldsEntry_DoNotUseENS0_11MessageLiteENSt3__212basic_stringIcNS5_11char_traitsIcEENS5_9allocatorIcEEEENS0_5ValueELNS1_14WireFormatLite9FieldTypeE9ELSE_11ELi0EE5valueEv - $__ZNK6google8protobuf5Value3NewEv $__ZNK6google8protobuf9ListValue8GetArenaEv $__ZNK6google8protobuf9ListValue20GetMaybeArenaPointerEv $__ZNK6google8protobuf9ListValue13IsInitializedEv $__ZNK6google8protobuf5Value12ByteSizeLongEv $__ZNK6google8protobuf5Value13GetCachedSizeEv $__ZNK6google8protobuf9ListValue3NewEv $__ZNK6google8protobuf9ListValue8GetArenaEv - $__ZNK6google8protobuf9ListValue20GetMaybeArenaPointerEv $__ZNK6google8protobuf9ListValue13IsInitializedEv $__ZNK6google8protobuf9ListValue12ByteSizeLongEv $__ZNK6google8protobuf9ListValue13GetCachedSizeEv $__ZNK6google8protobuf8internal12MapEntryImplINS0_27Struct_FieldsEntry_DoNotUseENS0_11MessageLiteENSt3__212basic_stringIcNS5_11char_traitsIcEENS5_9allocatorIcEEEENS0_5ValueELNS1_14WireFormatLite9FieldTypeE9ELSE_11ELi0EE15MapEntryWrapper3keyEv $__ZNK6google8protobuf9ListValue13GetCachedSizeEv $__ZNK6google8protobuf6Struct3NewEv $__ZNK6google8protobuf9ListValue8GetArenaEv - $__ZNK6google8protobuf9ListValue20GetMaybeArenaPointerEv $__ZNK6google8protobuf9ListValue13IsInitializedEv $__ZNK6google8protobuf6Struct12ByteSizeLongEv $__ZNK6google8protobuf9ListValue13GetCachedSizeEv $__ZNK6google8protobuf14FatalException4whatEv $__ZN7Context17onRequestTrailersEv $___stdio_close $__ZNKSt3__217bad_function_call4whatEv - $__ZNKSt11logic_error4whatEv $b0 $b0 $b0 $b0 $b0 $b0 $b0 + $b0 $__ZN14ExampleContext16onRequestHeadersEv $__ZN7Context17onRequestMetadataEv $__ZN7Context17onRequestMetadataEv $__ZN14ExampleContext17onResponseHeadersEv $__ZN7Context17onRequestMetadataEv $__ZN7Context17onRequestMetadataEv $__ZN7Context17onRequestMetadataEv + $__ZN7Context17onRequestMetadataEv $__ZNK6google8protobuf8internal12MapEntryImplINS0_27Struct_FieldsEntry_DoNotUseENS0_11MessageLiteENSt3__212basic_stringIcNS5_11char_traitsIcEENS5_9allocatorIcEEEENS0_5ValueELNS1_14WireFormatLite9FieldTypeE9ELSE_11ELi0EE3NewEv $__ZNK6google8protobuf8internal12MapEntryImplINS0_27Struct_FieldsEntry_DoNotUseENS0_11MessageLiteENSt3__212basic_stringIcNS5_11char_traitsIcEENS5_9allocatorIcEEEENS0_5ValueELNS1_14WireFormatLite9FieldTypeE9ELSE_11ELi0EE8GetArenaEv $__ZNK6google8protobuf11MessageLite20GetMaybeArenaPointerEv $__ZNK6google8protobuf9ListValue13IsInitializedEv $__ZNK6google8protobuf8internal12MapEntryImplINS0_27Struct_FieldsEntry_DoNotUseENS0_11MessageLiteENSt3__212basic_stringIcNS5_11char_traitsIcEENS5_9allocatorIcEEEENS0_5ValueELNS1_14WireFormatLite9FieldTypeE9ELSE_11ELi0EE12ByteSizeLongEv $__ZNK6google8protobuf8internal12MapEntryImplINS0_27Struct_FieldsEntry_DoNotUseENS0_11MessageLiteENSt3__212basic_stringIcNS5_11char_traitsIcEENS5_9allocatorIcEEEENS0_5ValueELNS1_14WireFormatLite9FieldTypeE9ELSE_11ELi0EE13GetCachedSizeEv $__ZN7Context17onRequestMetadataEv + $__ZNK6google8protobuf9ListValue20GetMaybeArenaPointerEv $__ZNK6google8protobuf8internal12MapEntryImplINS0_27Struct_FieldsEntry_DoNotUseENS0_11MessageLiteENSt3__212basic_stringIcNS5_11char_traitsIcEENS5_9allocatorIcEEEENS0_5ValueELNS1_14WireFormatLite9FieldTypeE9ELSE_11ELi0EE5valueEv $__ZNK6google8protobuf5Value3NewEv $__ZNK6google8protobuf9ListValue8GetArenaEv $__ZNK6google8protobuf9ListValue20GetMaybeArenaPointerEv $__ZNK6google8protobuf9ListValue13IsInitializedEv $__ZNK6google8protobuf5Value12ByteSizeLongEv $__ZNK6google8protobuf5Value13GetCachedSizeEv + $__ZNK6google8protobuf9ListValue3NewEv $__ZNK6google8protobuf9ListValue8GetArenaEv $__ZNK6google8protobuf9ListValue20GetMaybeArenaPointerEv $__ZNK6google8protobuf9ListValue13IsInitializedEv $__ZNK6google8protobuf9ListValue12ByteSizeLongEv $__ZNK6google8protobuf9ListValue13GetCachedSizeEv $__ZNK6google8protobuf8internal12MapEntryImplINS0_27Struct_FieldsEntry_DoNotUseENS0_11MessageLiteENSt3__212basic_stringIcNS5_11char_traitsIcEENS5_9allocatorIcEEEENS0_5ValueELNS1_14WireFormatLite9FieldTypeE9ELSE_11ELi0EE15MapEntryWrapper3keyEv $__ZNK6google8protobuf9ListValue13GetCachedSizeEv + $__ZNK6google8protobuf6Struct3NewEv $__ZNK6google8protobuf9ListValue8GetArenaEv $__ZNK6google8protobuf9ListValue20GetMaybeArenaPointerEv $__ZNK6google8protobuf9ListValue13IsInitializedEv $__ZNK6google8protobuf6Struct12ByteSizeLongEv $__ZNK6google8protobuf9ListValue13GetCachedSizeEv $__ZNK6google8protobuf14FatalException4whatEv $__ZN7Context17onRequestMetadataEv + $___stdio_close $__ZNKSt3__217bad_function_call4whatEv $__ZNKSt11logic_error4whatEv $b0 $b0 $b0 $b0 $b0 $b0 $b0 $b0 $b0 $b0 $b0 $b0 $b0 $b0 $b0 $b0 $b0 $b0 $b0 $b0 $b0 $b1 $__ZN7Context21isProactivelyCachableE12MetadataType $__ZNK6google8protobuf8internal12MapEntryImplINS0_27Struct_FieldsEntry_DoNotUseENS0_11MessageLiteENSt3__212basic_stringIcNS5_11char_traitsIcEENS5_9allocatorIcEEEENS0_5ValueELNS1_14WireFormatLite9FieldTypeE9ELSE_11ELi0EE3NewEPNS0_5ArenaE $__ZN6google8protobuf8internal12MapEntryImplINS0_27Struct_FieldsEntry_DoNotUseENS0_11MessageLiteENSt3__212basic_stringIcNS5_11char_traitsIcEENS5_9allocatorIcEEEENS0_5ValueELNS1_14WireFormatLite9FieldTypeE9ELSE_11ELi0EE27MergePartialFromCodedStreamEPNS0_2io16CodedInputStreamE $__ZNK6google8protobuf11MessageLite31SerializeWithCachedSizesToArrayEPh $__ZNK6google8protobuf5Value3NewEPNS0_5ArenaE $__ZN6google8protobuf5Value27MergePartialFromCodedStreamEPNS0_2io16CodedInputStreamE $__ZNK6google8protobuf9ListValue3NewEPNS0_5ArenaE @@ -148,7 +150,7 @@ $__ZNK10__cxxabiv117__class_type_info16search_below_dstEPNS_19__dynamic_cast_infoEPKvib $__ZNK10__cxxabiv120__si_class_type_info16search_below_dstEPNS_19__dynamic_cast_infoEPKvib $__ZNK10__cxxabiv121__vmi_class_type_info16search_below_dstEPNS_19__dynamic_cast_infoEPKvib $b9 $b9 $b9 $b10 $__ZNK10__cxxabiv117__class_type_info16search_above_dstEPNS_19__dynamic_cast_infoEPKvS4_ib $__ZNK10__cxxabiv120__si_class_type_info16search_above_dstEPNS_19__dynamic_cast_infoEPKvS4_ib $__ZNK10__cxxabiv121__vmi_class_type_info16search_above_dstEPNS_19__dynamic_cast_infoEPKvS4_ib $b11) (data $30 (i32.const 1024) - "d0\00\00i0\00\00q0\00\00w0") + "t0\00\00y0\00\00\810\00\00\870") (data $30 (i32.const 1168) "\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0" "\f0\f0\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\02\03\03\03\03\03\03\03\03\03\03\03\03\07\03\03\04\05\05\05\06\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0\f0" @@ -253,64 +255,64 @@ "\89\00\00\00\8b\00\00\00\95\00\00\00\97\00\00\00\9d\00\00\00\a3\00\00\00\a7\00\00\00\ad\00\00\00\b3\00\00\00\b5\00\00\00\bf\00\00\00\c1\00\00\00\c5\00\00\00\c7\00\00\00\d3\00\00\00\01\00\00\00" "\0b\00\00\00\0d\00\00\00\11\00\00\00\13\00\00\00\17\00\00\00\1d\00\00\00\1f\00\00\00%\00\00\00)\00\00\00+\00\00\00/\00\00\005\00\00\00;\00\00\00=\00\00\00C\00\00\00G\00\00\00" "I\00\00\00O\00\00\00S\00\00\00Y\00\00\00a\00\00\00e\00\00\00g\00\00\00k\00\00\00m\00\00\00q\00\00\00y\00\00\00\7f\00\00\00\83\00\00\00\89\00\00\00\8b\00\00\00\8f\00\00\00" - "\95\00\00\00\97\00\00\00\9d\00\00\00\a3\00\00\00\a7\00\00\00\a9\00\00\00\ad\00\00\00\b3\00\00\00\b5\00\00\00\bb\00\00\00\bf\00\00\00\c1\00\00\00\c5\00\00\00\c7\00\00\00\d1\00\00\00\a4 \00\00" - "\ff!\00\00\cc \00\00\ee!\00\00\e0\18\00\00\00\00\00\00\a4 \00\00\08\"\00\008!\00\00\n#\00\00\00\00\00\00\01\00\00\00\18\19\00\00\00\00\00\00\a4 \00\00I#\00\00\cc \00\00" - ",-\00\00\d0\19\00\00\00\00\00\00\cc \00\00\0e,\00\00@\19\00\00\00\00\00\00\cc \00\00\cb%\00\00P\19\00\00\00\00\00\00\cc \00\00\fb%\00\00`\19\00\00\00\00\00\00\cc \00\00" - "\c1&\00\00\d0\19\00\00\00\00\00\00\cc \00\00\db+\00\00\d0\19\00\00\00\00\00\008!\00\00\99*\00\00\00\00\00\00\01\00\00\00\98\19\00\00\00\00\00\00\a4 \00\00\06+\00\00\cc \00\00" - "\f5+\00\00\d0\19\00\00\00\00\00\00\cc \00\00_-\00\00\10\1a\00\00\00\00\00\00\ff\ff\ff\ff\ff\ff\ff\ff") + "\95\00\00\00\97\00\00\00\9d\00\00\00\a3\00\00\00\a7\00\00\00\a9\00\00\00\ad\00\00\00\b3\00\00\00\b5\00\00\00\bb\00\00\00\bf\00\00\00\c1\00\00\00\c5\00\00\00\c7\00\00\00\d1\00\00\00\b4 \00\00" + "\0f\"\00\00\dc \00\00\fe!\00\00\e0\18\00\00\00\00\00\00\b4 \00\00\18\"\00\00H!\00\00\1a#\00\00\00\00\00\00\01\00\00\00\18\19\00\00\00\00\00\00\b4 \00\00Y#\00\00\dc \00\00" + "<-\00\00\d0\19\00\00\00\00\00\00\dc \00\00\1e,\00\00@\19\00\00\00\00\00\00\dc \00\00\db%\00\00P\19\00\00\00\00\00\00\dc \00\00\0b&\00\00`\19\00\00\00\00\00\00\dc \00\00" + "\d1&\00\00\d0\19\00\00\00\00\00\00\dc \00\00\eb+\00\00\d0\19\00\00\00\00\00\00H!\00\00\a9*\00\00\00\00\00\00\01\00\00\00\98\19\00\00\00\00\00\00\b4 \00\00\16+\00\00\dc \00\00" + "\05,\00\00\d0\19\00\00\00\00\00\00\dc \00\00o-\00\00\10\1a\00\00\00\00\00\00\ff\ff\ff\ff\ff\ff\ff\ff") (data $30 (i32.const 6608) - "\a4 \00\00`2\00\00\cc \00\0016\00\00\f8\19\00\00\00\00\00\00\cc \00\00\ed6\00\00\f8\19\00\00\00\00\00\00\a4 \00\00\b97\00\00\cc \00\0028\00\00\10\1a\00\00\00\00\00\00" - "\a4 \00\00\f88\00\00\cc \00\00X9\00\00(\1a\00\00\00\00\00\00\cc \00\00\059\00\008\1a\00\00\00\00\00\00\a4 \00\00&9\00\00\cc \00\0039\00\00\18\1a\00\00\00\00\00\00" - "\cc \00\00::\00\00\10\1a\00\00\00\00\00\00\cc \00\00J:\00\00P\1a\00\00\00\00\00\00\cc \00\00\7f:\00\00(\1a\00\00\00\00\00\00\cc \00\00[:\00\00p\1a\00\00\00\00\00\00" - "\cc \00\00\a1:\00\00(\1a\00\00\00\00\00\00\1c!\00\00\c9:\00\00\1c!\00\00\cb:\00\00\cc \00\00\cd:\00\00\18\1a") + "\b4 \00\00p2\00\00\dc \00\00A6\00\00\f8\19\00\00\00\00\00\00\dc \00\00\fd6\00\00\f8\19\00\00\00\00\00\00\b4 \00\00\c97\00\00\dc \00\00B8\00\00\10\1a\00\00\00\00\00\00" + "\b4 \00\00\089\00\00\dc \00\00h9\00\00(\1a\00\00\00\00\00\00\dc \00\00\159\00\008\1a\00\00\00\00\00\00\b4 \00\0069\00\00\dc \00\00C9\00\00\18\1a\00\00\00\00\00\00" + "\dc \00\00J:\00\00\10\1a\00\00\00\00\00\00\dc \00\00Z:\00\00P\1a\00\00\00\00\00\00\dc \00\00\8f:\00\00(\1a\00\00\00\00\00\00\dc \00\00k:\00\00p\1a\00\00\00\00\00\00" + "\dc \00\00\b1:\00\00(\1a\00\00\00\00\00\00,!\00\00\d9:\00\00,!\00\00\db:\00\00\dc \00\00\dd:\00\00\18\1a") (data $30 (i32.const 6852) - "\e8\18\00\00\01\00\00\00\02\00\00\00\01\00\00\00\03\00\00\00\04\00\00\00\01\00\00\00\01\00\00\00\02\00\00\00\03\00\00\00\02\00\00\00\04\00\00\00\05\00\00\00\06\00\00\00\07\00\00\00\08\00\00\00" - "\01\00\00\00\02\00\00\00\03\00\00\00\04\00\00\00\01\00\00\00\01\00\00\00\01\00\00\00\00\00\00\00\e0\18\00\00\01\00\00\00\09\00\00\00\01\00\00\00\n\00\00\00\0b\00\00\00\05\00\00\00\03\00\00\00" - "\02\00\00\00\06\00\00\00\02\00\00\00\04\00\00\00\0c\00\00\00\0d\00\00\00\0e\00\00\00\08\00\00\00\01\00\00\00\02\00\00\00\03\00\00\00\04\00\00\00\01\00\00\00\01\00\00\00\01\00\00\00\ff\ff\ff\ff" - "\00\00\00\00\01") - (data $30 (i32.const 7060) - "@\19\00\00\0f\00\00\00\10\00\00\00\05\00\00\00\07\00\00\00\02\00\00\00\08\00\00\00\09\00\00\00\11\00\00\00\n\00\00\00\06\00\00\00\07\00\00\00\03\00\00\00\0b\00\00\00\08\00\00\00\04\00\00\00" - "\0c\00\00\00\04\00\00\00\0d\00\00\00\0e\00\00\00\0f\00\00\00\00\00\00\00\a0\19\00\00\12\00\00\00\13\00\00\00\09\00\00\00\10\00\00\00\05\00\00\00\11\00\00\00\12\00\00\00\14\00\00\00\13\00\00\00" - "\06\00\00\00\n\00\00\00\06\00\00\00\14\00\00\00\0b\00\00\00\04\00\00\00\15\00\00\00\05\00\00\00\0d\00\00\00\00\00\00\00 \19\00\00\15\00\00\00\16\00\00\00\0c\00\00\00\16\00\00\00\07\00\00\00" - "\17\00\00\00\18\00\00\00\17\00\00\00\19\00\00\00\06\00\00\00\0d\00\00\00\08\00\00\00\1a\00\00\00\0e\00\00\00\04\00\00\00\1b\00\00\00\05\00\00\00\0d\00\00\00\00\00\00\000\19\00\00\0f\00\00\00" - "\18\00\00\00\05\00\00\00\07\00\00\00\02\00\00\00\08\00\00\00\09\00\00\00\11\00\00\00\n\00\00\00\06\00\00\00\07\00\00\00\03\00\00\00\0b\00\00\00\08\00\00\00\04\00\00\00\0c\00\00\00\04\00\00\00" - "\0d\00\00\00\1c\00\00\00\1d\00\00\00\00\00\00\00p\19\00\00\19\00\00\00\1a\00\00\00\0f\00\00\00\1e\00\00\00\09\00\00\00\1f\00\00\00 \00\00\00\1b\00\00\00!\00\00\00\06\00\00\00\10\00\00\00" - "\n\00\00\00\"\00\00\00\11\00\00\00\04\00\00\00#\00\00\00\05\00\00\00\0d\00\00\00\00\00\00\00`\19\00\00\0f\00\00\00\1c\00\00\00\05\00\00\00\07\00\00\00\02\00\00\00\08\00\00\00\09\00\00\00" - "\11\00\00\00\n\00\00\00\06\00\00\00\07\00\00\00\03\00\00\00\0b\00\00\00\08\00\00\00\04\00\00\00\0c\00\00\00\04\00\00\00\0d\00\00\00\0e\00\00\00\0f\00\00\00\00\00\00\00\b0\19\00\00\1d\00\00\00" - "\1e\00\00\00$\00\00\00\00\00\00\00\d8\19\00\00\1f\00\00\00 \00\00\00\06\00\00\00\12\00\00\00\01\00\00\00\07\00\00\00%\00\00\00\00\00\00\00\e8\19\00\00\1f\00\00\00!\00\00\00\08\00\00\00" - "\13\00\00\00\02\00\00\00\07\00\00\00%") - (data $30 (i32.const 7657) - "\01\00\00\00\09\00\00\00\00\00\00\08\00\00\00\01\00\00\00 \00\00\00\00\10\04\00\00\d0B\00\00\d4B\00\00\10\0d\00\00\18\1e\00\00\05") - (data $30 (i32.const 7716) - "&") - (data $30 (i32.const 7740) - "\09\00\00\00\n\00\00\00\deB") - (data $30 (i32.const 7764) + "\e8\18\00\00\01\00\00\00\02\00\00\00\01\00\00\00\03\00\00\00\04\00\00\00\01\00\00\00\02\00\00\00\01\00\00\00\03\00\00\00\04\00\00\00\05\00\00\00\02\00\00\00\06\00\00\00\05\00\00\00\06\00\00\00" + "\07\00\00\00\08\00\00\00\01\00\00\00\02\00\00\00\03\00\00\00\04\00\00\00\01\00\00\00\01\00\00\00\01\00\00\00\00\00\00\00\e0\18\00\00\01\00\00\00\09\00\00\00\01\00\00\00\n\00\00\00\0b\00\00\00" + "\07\00\00\00\02\00\00\00\03\00\00\00\03\00\00\00\08\00\00\00\05\00\00\00\02\00\00\00\06\00\00\00\0c\00\00\00\0d\00\00\00\0e\00\00\00\08\00\00\00\01\00\00\00\02\00\00\00\03\00\00\00\04\00\00\00" + "\01\00\00\00\01\00\00\00\01\00\00\00\ff\ff\ff\ff\00\00\00\00\01") + (data $30 (i32.const 7076) + "@\19\00\00\0f\00\00\00\10\00\00\00\05\00\00\00\09\00\00\00\02\00\00\00\n\00\00\00\0b\00\00\00\11\00\00\00\0c\00\00\00\06\00\00\00\07\00\00\00\03\00\00\00\0d\00\00\00\08\00\00\00\04\00\00\00" + "\0e\00\00\00\04\00\00\00\0f\00\00\00\10\00\00\00\11\00\00\00\00\00\00\00\a0\19\00\00\12\00\00\00\13\00\00\00\09\00\00\00\12\00\00\00\05\00\00\00\13\00\00\00\14\00\00\00\14\00\00\00\15\00\00\00" + "\06\00\00\00\n\00\00\00\06\00\00\00\16\00\00\00\0b\00\00\00\04\00\00\00\17\00\00\00\05\00\00\00\0f\00\00\00\00\00\00\00 \19\00\00\15\00\00\00\16\00\00\00\0c\00\00\00\18\00\00\00\07\00\00\00" + "\19\00\00\00\1a\00\00\00\17\00\00\00\1b\00\00\00\06\00\00\00\0d\00\00\00\08\00\00\00\1c\00\00\00\0e\00\00\00\04\00\00\00\1d\00\00\00\05\00\00\00\0f\00\00\00\00\00\00\000\19\00\00\0f\00\00\00" + "\18\00\00\00\05\00\00\00\09\00\00\00\02\00\00\00\n\00\00\00\0b\00\00\00\11\00\00\00\0c\00\00\00\06\00\00\00\07\00\00\00\03\00\00\00\0d\00\00\00\08\00\00\00\04\00\00\00\0e\00\00\00\04\00\00\00" + "\0f\00\00\00\1e\00\00\00\1f\00\00\00\00\00\00\00p\19\00\00\19\00\00\00\1a\00\00\00\0f\00\00\00 \00\00\00\09\00\00\00!\00\00\00\"\00\00\00\1b\00\00\00#\00\00\00\06\00\00\00\10\00\00\00" + "\n\00\00\00$\00\00\00\11\00\00\00\04\00\00\00%\00\00\00\05\00\00\00\0f\00\00\00\00\00\00\00`\19\00\00\0f\00\00\00\1c\00\00\00\05\00\00\00\09\00\00\00\02\00\00\00\n\00\00\00\0b\00\00\00" + "\11\00\00\00\0c\00\00\00\06\00\00\00\07\00\00\00\03\00\00\00\0d\00\00\00\08\00\00\00\04\00\00\00\0e\00\00\00\04\00\00\00\0f\00\00\00\10\00\00\00\11\00\00\00\00\00\00\00\b0\19\00\00\1d\00\00\00" + "\1e\00\00\00&\00\00\00\00\00\00\00\d8\19\00\00\1f\00\00\00 \00\00\00\06\00\00\00\12\00\00\00\01\00\00\00\07\00\00\00'\00\00\00\00\00\00\00\e8\19\00\00\1f\00\00\00!\00\00\00\08\00\00\00" + "\13\00\00\00\02\00\00\00\07\00\00\00'") + (data $30 (i32.const 7673) + "\01\00\00\00\09\00\00\00\00\00\00\08\00\00\00\01\00\00\00 \00\00\00\00\10\04\00\00\e0B\00\00\e4B\00\00\10\0d\00\00(\1e\00\00\05") + (data $30 (i32.const 7732) + "(") + (data $30 (i32.const 7756) + "\09\00\00\00\n\00\00\00\eeB") + (data $30 (i32.const 7780) "\02") - (data $30 (i32.const 7779) + (data $30 (i32.const 7795) "\ff\ff\ff\ff\ff") - (data $30 (i32.const 7828) + (data $30 (i32.const 7844) "\05") - (data $30 (i32.const 7840) - "&") - (data $30 (i32.const 7864) - "\0b\00\00\00\n\00\00\00\08;\00\00\00\04") - (data $30 (i32.const 7888) + (data $30 (i32.const 7856) + "(") + (data $30 (i32.const 7880) + "\0b\00\00\00\n\00\00\00\18;\00\00\00\04") + (data $30 (i32.const 7904) "\01") - (data $30 (i32.const 7903) + (data $30 (i32.const 7919) "\n\ff\ff\ff\ff") - (data $30 (i32.const 7952) - "\94\1e") - (data $30 (i32.const 7992) + (data $30 (i32.const 7968) + "\a4\1e") + (data $30 (i32.const 8008) "\0c") - (data $30 (i32.const 8031) + (data $30 (i32.const 8047) "\ff\ff\ff\ff\ff") - (data $30 (i32.const 8268) - "`@") - (data $30 (i32.const 8329) - "\1a\00\00\"\00\00\00#\00\00\00'\00\00\00\02\00\00\00\00\00\00\00\18\1a\00\00$\00\00\00%\00\00\00&\00\00\00'\00\00\00\0d\00\00\00\01\00\00\00\02\00\00\00\02\00\00\00\00\00\00\00@" - "\1a\00\00$\00\00\00(\00\00\00&\00\00\00'\00\00\00\0d\00\00\00\02\00\00\00\03\00\00\00\03\00\00\00\00\00\00\00P\1a\00\00)\00\00\00*\00\00\00(\00\00\00\00\00\00\00`\1a\00\00)" - "\00\00\00+\00\00\00(\00\00\00\00\00\00\00\90\1a\00\00$\00\00\00,\00\00\00&\00\00\00'\00\00\00\0e\00\00\00\00\00\00\00\b0\1a\00\00$\00\00\00-\00\00\00&\00\00\00'\00\00\00\0d" + (data $30 (i32.const 8284) + "p@") + (data $30 (i32.const 8345) + "\1a\00\00\"\00\00\00#\00\00\00)\00\00\00\02\00\00\00\00\00\00\00\18\1a\00\00$\00\00\00%\00\00\00&\00\00\00'\00\00\00\0d\00\00\00\01\00\00\00\02\00\00\00\02\00\00\00\00\00\00\00@" + "\1a\00\00$\00\00\00(\00\00\00&\00\00\00'\00\00\00\0d\00\00\00\02\00\00\00\03\00\00\00\03\00\00\00\00\00\00\00P\1a\00\00)\00\00\00*\00\00\00*\00\00\00\00\00\00\00`\1a\00\00)" + "\00\00\00+\00\00\00*\00\00\00\00\00\00\00\90\1a\00\00$\00\00\00,\00\00\00&\00\00\00'\00\00\00\0e\00\00\00\00\00\00\00\b0\1a\00\00$\00\00\00-\00\00\00&\00\00\00'\00\00\00\0d" "\00\00\00\03\00\00\00\04\00\00\00\04\00\00\00onStart\00onCreate \00onRequestHeaders \00headers: \00onR" "esponseHeaders \00newheader\00newheadervalue\00location\00envoy-wasm\00onR" "equestBody \00onDone \00onLog \00onDelete \0014ExampleContext\007Context\00N" @@ -467,7 +469,7 @@ end ;; $if ) - (func $__ZN7Context3NewEj (type $3) + (func $__Z10NewContextj (type $3) (param $0 i32) (param $1 i32) (local $2 i32) @@ -589,15 +591,15 @@ i32.const 7 i32.store8 local.get $0 - i32.const 8536 + i32.const 8552 i32.load align=1 i32.store align=1 local.get $0 - i32.const 8540 + i32.const 8556 i32.load16_s align=1 i32.store16 offset=4 align=1 local.get $0 - i32.const 8542 + i32.const 8558 i32.load8_s i32.store8 offset=6 local.get $0 @@ -645,7 +647,7 @@ call $__ZNSt3__29to_stringEj local.get $2 local.get $1 - i32.const 8544 + i32.const 8560 call $__ZNSt3__212basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE6insertEmPKc local.tee $0 i64.load align=4 @@ -776,15 +778,15 @@ i32.const 17 i32.store offset=4 local.get $7 - i32.const 8554 + i32.const 8570 i64.load align=1 i64.store align=1 local.get $7 - i32.const 8562 + i32.const 8578 i64.load align=1 i64.store offset=8 align=1 local.get $7 - i32.const 8570 + i32.const 8586 i32.load8_s i32.store8 offset=16 local.get $7 @@ -1035,11 +1037,11 @@ i32.const 9 i32.store8 local.get $5 - i32.const 8572 + i32.const 8588 i64.load align=1 i64.store align=1 local.get $5 - i32.const 8580 + i32.const 8596 i32.load8_s i32.store8 offset=8 local.get $5 @@ -1590,15 +1592,15 @@ i32.const 18 i32.store offset=4 local.get $7 - i32.const 8582 + i32.const 8598 i64.load align=1 i64.store align=1 local.get $7 - i32.const 8590 + i32.const 8606 i64.load align=1 i64.store offset=8 align=1 local.get $7 - i32.const 8598 + i32.const 8614 i32.load16_s align=1 i32.store16 offset=16 align=1 local.get $7 @@ -1849,11 +1851,11 @@ i32.const 9 i32.store8 local.get $5 - i32.const 8572 + i32.const 8588 i64.load align=1 i64.store align=1 local.get $5 - i32.const 8580 + i32.const 8596 i32.load8_s i32.store8 offset=8 local.get $5 @@ -2303,15 +2305,15 @@ end ;; $block_0 end ;; $if_8 i32.const 2 - i32.const 8601 + i32.const 8617 i32.const 9 - i32.const 8611 + i32.const 8627 i32.const 14 call $_proxy_addHeaderMapValue i32.const 2 - i32.const 8626 + i32.const 8642 i32.const 8 - i32.const 8635 + i32.const 8651 i32.const 10 call $_proxy_replaceHeaderMapValue local.get $4 @@ -2411,15 +2413,15 @@ i32.const 14 i32.store offset=4 local.get $0 - i32.const 8646 + i32.const 8662 i64.load align=1 i64.store align=1 local.get $0 - i32.const 8654 + i32.const 8670 i32.load align=1 i32.store offset=8 align=1 local.get $0 - i32.const 8658 + i32.const 8674 i32.load16_s align=1 i32.store16 offset=12 align=1 local.get $0 @@ -2619,7 +2621,7 @@ call $__ZNSt3__29to_stringEj local.get $2 local.get $1 - i32.const 8661 + i32.const 8677 call $__ZNSt3__212basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE6insertEmPKc local.tee $0 i64.load align=4 @@ -2703,7 +2705,7 @@ call $__ZNSt3__29to_stringEj local.get $2 local.get $1 - i32.const 8669 + i32.const 8685 call $__ZNSt3__212basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE6insertEmPKc local.tee $0 i64.load align=4 @@ -2787,7 +2789,7 @@ call $__ZNSt3__29to_stringEj local.get $2 local.get $1 - i32.const 8676 + i32.const 8692 call $__ZNSt3__212basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE6insertEmPKc local.tee $0 i64.load align=4 @@ -2856,7 +2858,7 @@ (local $3 i32) (local $4 i32) local.get $0 - i32.const 6952 + i32.const 6960 i32.store local.get $0 i32.load offset=136 @@ -3276,7 +3278,7 @@ nop ) - (func $__ZN7Context17onRequestTrailersEv (type $4) + (func $__ZN7Context17onRequestMetadataEv (type $4) (param $0 i32) (result i32) i32.const 0 @@ -3496,7 +3498,7 @@ i32.const 4 call $___cxa_allocate_exception local.tee $1 - i32.const 8332 + i32.const 8348 i32.store local.get $1 i32.const 6656 @@ -4302,7 +4304,7 @@ i32.const 4 call $___cxa_allocate_exception local.tee $1 - i32.const 8332 + i32.const 8348 i32.store local.get $1 i32.const 6656 @@ -4856,7 +4858,7 @@ i32.const 4 call $___cxa_allocate_exception local.tee $1 - i32.const 8332 + i32.const 8348 i32.store local.get $1 i32.const 6656 @@ -5333,7 +5335,7 @@ local.tee $5 call $__ZNSt11logic_errorC2EPKc local.get $5 - i32.const 8456 + i32.const 8472 i32.store local.get $5 i32.const 6752 @@ -6398,11 +6400,11 @@ if $if local.get $3 i32.const 3 - i32.const 8883 + i32.const 8899 i32.const 370 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $3 - i32.const 8932 + i32.const 8948 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $3 @@ -6447,53 +6449,53 @@ (func $__ZN30protobuf_struct_5flite_2eprotoL21InitDefaultsListValueEv (type $8) call $__ZN6google8protobuf8internal13VerifyVersionEiiPKc - i32.const 16156 + i32.const 16172 i32.const 0 i32.store - i32.const 16148 - i32.const 16296 + i32.const 16164 + i32.const 16312 i32.store - i32.const 16152 + i32.const 16168 i32.const 0 i32.store - i32.const 16160 + i32.const 16176 i32.const 0 i32.store - i32.const 16144 - i32.const 7064 + i32.const 16160 + i32.const 7080 i32.store - i32.const 16168 + i32.const 16184 call $__ZN6google8protobuf6StructC2Ev i32.const 48 - i32.const 16168 + i32.const 16184 call $__ZN6google8protobuf8internal13OnShutdownRunEPFvPKvES3_ - i32.const 16200 - i32.const 7152 + i32.const 16216 + i32.const 7168 i32.store - i32.const 16204 + i32.const 16220 i32.const 0 i32.store - i32.const 16216 + i32.const 16232 i32.const 0 i32.store - i32.const 7040 + i32.const 7056 i32.load if $if call $__ZN6google8protobuf8internal11InitSCCImplEPNS1_11SCCInfoBaseE end ;; $if - i32.const 16220 + i32.const 16236 i32.const 0 i32.store i32.const 48 - i32.const 16200 + i32.const 16216 call $__ZN6google8protobuf8internal13OnShutdownRunEPFvPKvES3_ - i32.const 16256 + i32.const 16272 call $__ZN6google8protobuf9ListValueC2Ev i32.const 48 - i32.const 16256 + i32.const 16272 call $__ZN6google8protobuf8internal13OnShutdownRunEPFvPKvES3_ - i32.const 16152 - i32.const 16200 + i32.const 16168 + i32.const 16216 i32.store ) @@ -6501,7 +6503,7 @@ (param $0 i32) (local $1 i32) local.get $0 - i32.const 7400 + i32.const 7416 i32.store local.get $0 i32.const 4 @@ -6522,7 +6524,7 @@ local.get $0 i32.const 0 i32.store offset=24 - i32.const 7040 + i32.const 7056 i32.load i32.eqz if $if @@ -6535,7 +6537,7 @@ (param $0 i32) (local $1 i32) local.get $0 - i32.const 7232 + i32.const 7248 i32.store local.get $0 i32.const 4 @@ -6549,7 +6551,7 @@ local.get $1 i64.const 0 i64.store offset=16 align=4 - i32.const 7040 + i32.const 7056 i32.load i32.eqz if $if @@ -6574,7 +6576,7 @@ i32.add global.set $36 local.get $0 - i32.const 7232 + i32.const 7248 i32.store local.get $0 i32.const 4 @@ -6601,11 +6603,11 @@ if $if_0 local.get $2 i32.const 3 - i32.const 9071 + i32.const 9087 i32.const 915 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $2 - i32.const 10251 + i32.const 10267 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $2 @@ -6745,19 +6747,19 @@ i32.const 25 i32.store offset=4 local.get $2 - i32.const 11538 + i32.const 11554 i64.load align=1 i64.store align=1 local.get $2 - i32.const 11546 + i32.const 11562 i64.load align=1 i64.store offset=8 align=1 local.get $2 - i32.const 11554 + i32.const 11570 i64.load align=1 i64.store offset=16 align=1 local.get $2 - i32.const 11562 + i32.const 11578 i32.load8_s i32.store8 offset=24 local.get $2 @@ -6873,10 +6875,10 @@ local.get $1 call $__ZN6google8protobuf9ListValue9MergeFromERKS1_ else - i32.const 11104 - i32.const 11145 + i32.const 11120 + i32.const 11161 i32.const 92 - i32.const 11194 + i32.const 11210 call $___assert_fail end ;; $if ) @@ -7309,7 +7311,7 @@ local.tee $1 i32.const -2 i32.and - i32.const 16296 + i32.const 16312 local.get $1 i32.const 1 i32.and @@ -7421,7 +7423,7 @@ local.tee $0 i32.const -2 i32.and - i32.const 16296 + i32.const 16312 local.get $0 i32.const 1 i32.and @@ -7440,7 +7442,7 @@ local.tee $2 i32.const -2 i32.and - i32.const 16296 + i32.const 16312 local.get $2 i32.const 1 i32.and @@ -7500,11 +7502,11 @@ if $if local.get $3 i32.const 3 - i32.const 9131 + i32.const 9147 i32.const 1505 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $3 - i32.const 9183 + i32.const 9199 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $3 @@ -7536,11 +7538,11 @@ local.set $3 local.get $2 i32.const 3 - i32.const 9131 + i32.const 9147 i32.const 1506 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $2 - i32.const 9214 + i32.const 9230 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $2 @@ -7575,7 +7577,7 @@ local.tee $2 i32.const -2 i32.and - i32.const 16296 + i32.const 16312 local.get $2 i32.const 1 i32.and @@ -7726,7 +7728,7 @@ local.tee $2 i32.const -2 i32.and - i32.const 16296 + i32.const 16312 local.get $2 i32.const 1 i32.and @@ -7848,7 +7850,7 @@ local.tee $1 i32.const -2 i32.and - i32.const 16296 + i32.const 16312 local.get $1 i32.const 1 i32.and @@ -7968,13 +7970,13 @@ i32.store offset=12 end ;; $if_1 local.get $2 - i32.const 16296 + i32.const 16312 i32.store offset=4 local.get $2 i32.const 0 i32.store offset=8 local.get $2 - i32.const 7312 + i32.const 7328 i32.store local.get $2 local.get $6 @@ -8224,11 +8226,11 @@ else local.get $4 i32.const 3 - i32.const 9256 + i32.const 9272 i32.const 418 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $4 - i32.const 9340 + i32.const 9356 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $4 @@ -8319,11 +8321,11 @@ if $if_4 local.get $2 i32.const 3 - i32.const 9256 + i32.const 9272 i32.const 427 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $2 - i32.const 9457 + i32.const 9473 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $2 @@ -8401,11 +8403,11 @@ if $if_1 local.get $3 i32.const 3 - i32.const 9256 + i32.const 9272 i32.const 451 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $3 - i32.const 9297 + i32.const 9313 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $3 @@ -8546,11 +8548,11 @@ end ;; $block_0 local.get $3 i32.const 3 - i32.const 9256 + i32.const 9272 i32.const 476 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $3 - i32.const 9488 + i32.const 9504 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $3 @@ -9212,7 +9214,7 @@ (param $0 i32) (local $1 i32) local.get $0 - i32.const 7480 + i32.const 7496 i32.store local.get $0 i32.load offset=12 @@ -9222,7 +9224,7 @@ local.get $0 i32.load offset=4 local.tee $1 - i32.const 16296 + i32.const 16312 i32.eq local.get $1 i32.eqz @@ -9266,7 +9268,7 @@ (local $1 i32) (local $2 i32) local.get $0 - i32.const 7480 + i32.const 7496 i32.store local.get $0 i32.load offset=12 @@ -9278,7 +9280,7 @@ local.get $0 i32.load offset=4 local.tee $1 - i32.const 16296 + i32.const 16312 i32.eq local.get $1 i32.eqz @@ -9342,7 +9344,7 @@ i32.const 0 i32.store offset=12 local.get $1 - i32.const 16296 + i32.const 16312 i32.store offset=4 local.get $1 i32.const 0 @@ -9351,7 +9353,7 @@ i32.const 0 i32.store offset=16 local.get $1 - i32.const 7064 + i32.const 7080 i32.store local.get $1 ) @@ -9379,7 +9381,7 @@ local.get $0 i32.load offset=4 local.tee $1 - i32.const 16296 + i32.const 16312 i32.ne if $if local.get $1 @@ -9471,10 +9473,10 @@ local.get $1 call $__ZN6google8protobuf8internal12MapEntryImplINS0_27Struct_FieldsEntry_DoNotUseENS0_11MessageLiteENSt3__212basic_stringIcNS5_11char_traitsIcEENS5_9allocatorIcEEEENS0_5ValueELNS1_14WireFormatLite9FieldTypeE9ELSE_11ELi0EE17MergeFromInternalERKSF_ else - i32.const 11104 - i32.const 11145 + i32.const 11120 + i32.const 11161 i32.const 92 - i32.const 11194 + i32.const 11210 call $___assert_fail end ;; $if ) @@ -9601,13 +9603,13 @@ local.get $6 i32.load local.tee $0 - i32.const 16296 + i32.const 16312 i32.eq if $if_0 local.get $6 local.get $9 i32.load - i32.const 16296 + i32.const 16312 call $__ZN6google8protobuf8internal14ArenaStringPtr14CreateInstanceEPNS0_5ArenaEPKNSt3__212basic_stringIcNS5_11char_traitsIcEENS5_9allocatorIcEEEE local.get $6 i32.load @@ -10124,7 +10126,7 @@ i32.const 24 call $__ZN6google8protobuf8internal9ArenaImpl15AllocateAlignedEm local.tee $1 - i32.const 7152 + i32.const 7168 i32.store local.get $1 local.get $0 @@ -10132,7 +10134,7 @@ local.get $1 i32.const 0 i32.store offset=16 - i32.const 7040 + i32.const 7056 i32.load if $if_1 call $__ZN6google8protobuf8internal11InitSCCImplEPNS1_11SCCInfoBaseE @@ -10143,7 +10145,7 @@ i32.const 24 call $__Znwm local.tee $0 - i32.const 7152 + i32.const 7168 i32.store local.get $0 i32.const 0 @@ -10151,7 +10153,7 @@ local.get $0 i32.const 0 i32.store offset=16 - i32.const 7040 + i32.const 7056 i32.load if $if_2 call $__ZN6google8protobuf8internal11InitSCCImplEPNS1_11SCCInfoBaseE @@ -10478,7 +10480,7 @@ i32.const 3 i32.store local.get $6 - i32.const 16296 + i32.const 16312 i32.store end ;; $if_6 local.get $12 @@ -10499,12 +10501,12 @@ local.get $6 i32.load local.tee $3 - i32.const 16296 + i32.const 16312 i32.eq if $if_8 (result i32) local.get $6 local.get $2 - i32.const 16296 + i32.const 16312 call $__ZN6google8protobuf8internal14ArenaStringPtr14CreateInstanceEPNS0_5ArenaEPKNSt3__212basic_stringIcNS5_11char_traitsIcEENS5_9allocatorIcEEEE local.get $6 i32.load @@ -10528,9 +10530,9 @@ i32.load local.tee $3 else - i32.const 16296 + i32.const 16312 local.set $3 - i32.const 16296 + i32.const 16312 end ;; $if_10 local.set $2 local.get $3 @@ -10548,9 +10550,9 @@ i32.load local.tee $3 else - i32.const 16296 + i32.const 16312 local.set $3 - i32.const 16296 + i32.const 16312 end ;; $if_12 local.set $8 local.get $2 @@ -10568,7 +10570,7 @@ i32.and end ;; $if_13 i32.const 0 - i32.const 9532 + i32.const 9548 call $__ZN6google8protobuf8internal14WireFormatLite16VerifyUtf8StringEPKciNS2_9OperationES4_ br_if $loop i32.const 0 @@ -10986,7 +10988,7 @@ local.get $0 i32.load offset=8 local.tee $0 - i32.const 16296 + i32.const 16312 i32.eq local.get $0 i32.eqz @@ -11357,7 +11359,7 @@ local.get $5 select i32.const 0 - i32.const 9567 + i32.const 9583 call $__ZN6google8protobuf8internal14WireFormatLite16VerifyUtf8StringEPKciNS2_9OperationES4_ select br $block_4 @@ -11539,7 +11541,7 @@ (param $1 i32) (local $2 i32) local.get $0 - i32.const 7232 + i32.const 7248 i32.store local.get $0 local.get $1 @@ -11556,7 +11558,7 @@ local.get $2 i64.const 0 i64.store offset=8 align=4 - i32.const 7040 + i32.const 7056 i32.load i32.eqz if $if @@ -11873,7 +11875,7 @@ i32.const 0 i32.store offset=12 local.get $2 - i32.const 16296 + i32.const 16312 i32.store offset=4 local.get $2 i32.const 0 @@ -11882,7 +11884,7 @@ i32.const 0 i32.store offset=16 local.get $2 - i32.const 7064 + i32.const 7080 i32.store end ;; $if_9 local.get $0 @@ -11926,13 +11928,13 @@ local.tee $6 i32.load local.tee $3 - i32.const 16296 + i32.const 16312 i32.eq if $if_11 (result i32) local.get $6 local.get $2 i32.load offset=12 - i32.const 16296 + i32.const 16312 call $__ZN6google8protobuf8internal14ArenaStringPtr14CreateInstanceEPNS0_5ArenaEPKNSt3__212basic_stringIcNS5_11char_traitsIcEENS5_9allocatorIcEEEE local.get $6 i32.load @@ -12239,7 +12241,7 @@ i32.const 0 i32.store offset=12 local.get $2 - i32.const 16296 + i32.const 16312 i32.store offset=4 local.get $2 i32.const 0 @@ -12248,7 +12250,7 @@ i32.const 0 i32.store offset=16 local.get $2 - i32.const 7064 + i32.const 7080 i32.store end ;; $if local.get $0 @@ -12340,13 +12342,13 @@ local.tee $5 i32.load local.tee $0 - i32.const 16296 + i32.const 16312 i32.eq if $if_2 local.get $5 local.get $3 i32.load offset=12 - i32.const 16296 + i32.const 16312 call $__ZN6google8protobuf8internal14ArenaStringPtr14CreateInstanceEPNS0_5ArenaEPKNSt3__212basic_stringIcNS5_11char_traitsIcEENS5_9allocatorIcEEEE local.get $5 i32.load @@ -12514,7 +12516,7 @@ i32.store offset=12 end ;; $if local.get $0 - i32.const 16296 + i32.const 16312 i32.store offset=4 local.get $0 i32.const 0 @@ -12523,7 +12525,7 @@ i32.const 0 i32.store offset=16 local.get $0 - i32.const 7064 + i32.const 7080 i32.store local.get $0 ) @@ -12903,11 +12905,11 @@ if $if local.get $3 i32.const 3 - i32.const 9071 + i32.const 9087 i32.const 796 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $3 - i32.const 9606 + i32.const 9622 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $3 @@ -13048,7 +13050,7 @@ i32.const 8 i32.add local.tee $0 - i32.const 16296 + i32.const 16312 i32.store end ;; $if_4 local.get $2 @@ -13068,7 +13070,7 @@ local.get $0 i32.load local.tee $2 - i32.const 16296 + i32.const 16312 i32.eq if $if_6 local.get $0 @@ -13151,7 +13153,7 @@ i32.const 5 i32.eq br_if $block_8 - i32.const 16168 + i32.const 16184 end ;; $if_8 br $block_7 end ;; $block_8 @@ -13208,7 +13210,7 @@ i32.const 6 i32.eq br_if $block_10 - i32.const 16256 + i32.const 16272 end ;; $if_10 br $block_9 end ;; $block_10 @@ -13254,11 +13256,11 @@ if $if local.get $4 i32.const 3 - i32.const 9071 + i32.const 9087 i32.const 341 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $4 - i32.const 9606 + i32.const 9622 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $4 @@ -13449,11 +13451,11 @@ if $if local.get $3 i32.const 3 - i32.const 9071 + i32.const 9087 i32.const 1040 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $3 - i32.const 9606 + i32.const 9622 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $3 @@ -13554,11 +13556,11 @@ if $if local.get $2 i32.const 3 - i32.const 9131 + i32.const 9147 i32.const 1586 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $2 - i32.const 9640 + i32.const 9656 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $2 @@ -13711,7 +13713,7 @@ i32.const 16 i32.add local.tee $1 - i32.const 7152 + i32.const 7168 i32.store local.get $1 call $__ZN6google8protobuf5Value10SharedDtorEv @@ -13801,11 +13803,11 @@ if $if local.get $3 i32.const 3 - i32.const 9256 + i32.const 9272 i32.const 601 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $3 - i32.const 10137 + i32.const 10153 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $3 @@ -13877,11 +13879,11 @@ end ;; $block_0 local.get $5 i32.const 3 - i32.const 9256 + i32.const 9272 i32.const 607 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $5 - i32.const 10171 + i32.const 10187 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $5 @@ -13922,11 +13924,11 @@ end ;; $block_2 local.get $3 i32.const 3 - i32.const 9256 + i32.const 9272 i32.const 612 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $3 - i32.const 10215 + i32.const 10231 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $3 @@ -15074,11 +15076,11 @@ if $if_0 local.get $1 i32.const 3 - i32.const 9071 + i32.const 9087 i32.const 495 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $1 - i32.const 10251 + i32.const 10267 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $1 @@ -15327,7 +15329,7 @@ local.get $1 call $__ZNSt3__212basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEC2ERKS5_ local.get $0 - i32.const 7152 + i32.const 7168 i32.store offset=16 local.get $0 i32.const 0 @@ -15335,7 +15337,7 @@ local.get $0 i32.const 0 i32.store offset=32 - i32.const 7040 + i32.const 7056 i32.load if $if_0 call $__ZN6google8protobuf8internal11InitSCCImplEPNS1_11SCCInfoBaseE @@ -15374,7 +15376,7 @@ i32.load local.set $0 local.get $2 - i32.const 7152 + i32.const 7168 i32.store offset=16 local.get $2 local.get $0 @@ -15382,7 +15384,7 @@ local.get $2 i32.const 0 i32.store offset=32 - i32.const 7040 + i32.const 7056 i32.load if $if_2 call $__ZN6google8protobuf8internal11InitSCCImplEPNS1_11SCCInfoBaseE @@ -15441,11 +15443,11 @@ if $if local.get $3 i32.const 3 - i32.const 9256 + i32.const 9272 i32.const 765 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $3 - i32.const 10721 + i32.const 10737 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $3 @@ -15665,11 +15667,11 @@ if $if_0 local.get $8 i32.const 3 - i32.const 9256 + i32.const 9272 i32.const 672 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $8 - i32.const 10295 + i32.const 10311 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $8 @@ -15705,11 +15707,11 @@ if $if_1 local.get $6 i32.const 3 - i32.const 9256 + i32.const 9272 i32.const 678 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $6 - i32.const 10396 + i32.const 10412 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $6 @@ -15789,11 +15791,11 @@ else local.get $7 i32.const 3 - i32.const 9256 + i32.const 9272 i32.const 878 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $7 - i32.const 10452 + i32.const 10468 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $7 @@ -15823,11 +15825,11 @@ if $if_6 local.get $4 i32.const 3 - i32.const 9256 + i32.const 9272 i32.const 685 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $4 - i32.const 10492 + i32.const 10508 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $4 @@ -15955,11 +15957,11 @@ end ;; $block_0 local.get $10 i32.const 3 - i32.const 9256 + i32.const 9272 i32.const 837 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $10 - i32.const 10614 + i32.const 10630 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $10 @@ -16228,11 +16230,11 @@ end ;; $if_8 local.get $12 i32.const 3 - i32.const 9256 + i32.const 9272 i32.const 848 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $12 - i32.const 10679 + i32.const 10695 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $12 @@ -16301,11 +16303,11 @@ if $if local.get $4 i32.const 3 - i32.const 9256 + i32.const 9272 i32.const 713 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $4 - i32.const 10567 + i32.const 10583 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $4 @@ -17627,11 +17629,11 @@ if $if local.get $4 i32.const 3 - i32.const 9256 + i32.const 9272 i32.const 926 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $4 - i32.const 10774 + i32.const 10790 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $4 @@ -17649,11 +17651,11 @@ if $if_0 local.get $2 i32.const 3 - i32.const 9256 + i32.const 9272 i32.const 927 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $2 - i32.const 10809 + i32.const 10825 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $2 @@ -17885,7 +17887,7 @@ (param $1 i32) (local $2 i32) local.get $0 - i32.const 7400 + i32.const 7416 i32.store local.get $0 local.get $1 @@ -17913,7 +17915,7 @@ local.get $0 i32.const 0 i32.store offset=24 - i32.const 7040 + i32.const 7056 i32.load i32.eqz if $if @@ -18293,11 +18295,11 @@ if $if_4 local.get $7 i32.const 3 - i32.const 9256 + i32.const 9272 i32.const 527 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $7 - i32.const 10846 + i32.const 10862 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $7 @@ -18559,7 +18561,7 @@ i32.add global.set $36 local.get $0 - i32.const 7400 + i32.const 7416 i32.store local.get $0 i32.const 4 @@ -18586,11 +18588,11 @@ if $if_0 local.get $1 i32.const 3 - i32.const 9071 + i32.const 9087 i32.const 150 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $1 - i32.const 10251 + i32.const 10267 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $1 @@ -18673,19 +18675,19 @@ i32.const 22 i32.store offset=4 local.get $2 - i32.const 11204 + i32.const 11220 i64.load align=1 i64.store align=1 local.get $2 - i32.const 11212 + i32.const 11228 i64.load align=1 i64.store offset=8 align=1 local.get $2 - i32.const 11220 + i32.const 11236 i32.load align=1 i32.store offset=16 align=1 local.get $2 - i32.const 11224 + i32.const 11240 i32.load16_s align=1 i32.store16 offset=20 align=1 local.get $2 @@ -18767,10 +18769,10 @@ local.get $1 call $__ZN6google8protobuf6Struct9MergeFromERKS1_ else - i32.const 11104 - i32.const 11145 + i32.const 11120 + i32.const 11161 i32.const 92 - i32.const 11194 + i32.const 11210 call $___assert_fail end ;; $if ) @@ -18881,13 +18883,13 @@ i32.store offset=12 end ;; $if_1 local.get $2 - i32.const 16296 + i32.const 16312 i32.store offset=4 local.get $2 i32.const 0 i32.store offset=8 local.get $2 - i32.const 7312 + i32.const 7328 i32.store local.get $2 local.get $6 @@ -18956,7 +18958,7 @@ local.get $4 local.get $2 i32.const 1 - i32.const 9567 + i32.const 9583 call $__ZN6google8protobuf8internal14WireFormatLite16VerifyUtf8StringEPKciNS2_9OperationES4_ drop local.get $5 @@ -19109,13 +19111,13 @@ i32.store offset=12 end ;; $if_8 local.get $2 - i32.const 16296 + i32.const 16312 i32.store offset=4 local.get $2 i32.const 0 i32.store offset=8 local.get $2 - i32.const 7312 + i32.const 7328 i32.store local.get $2 local.get $9 @@ -19183,7 +19185,7 @@ local.get $6 local.get $2 i32.const 1 - i32.const 9567 + i32.const 9583 call $__ZN6google8protobuf8internal14WireFormatLite16VerifyUtf8StringEPKciNS2_9OperationES4_ drop local.get $4 @@ -19221,7 +19223,7 @@ local.tee $0 i32.const -2 i32.and - i32.const 16296 + i32.const 16312 local.get $0 i32.const 1 i32.and @@ -19240,7 +19242,7 @@ local.tee $2 i32.const -2 i32.and - i32.const 16296 + i32.const 16312 local.get $2 i32.const 1 i32.and @@ -22352,13 +22354,13 @@ i32.add local.tee $2 i32.load - i32.const 16296 + i32.const 16312 i32.eq if $if_1 local.get $2 local.get $3 i32.load - i32.const 16296 + i32.const 16312 call $__ZN6google8protobuf8internal14ArenaStringPtr14CreateInstanceEPNS0_5ArenaEPKNSt3__212basic_stringIcNS5_11char_traitsIcEENS5_9allocatorIcEEEE end ;; $if_1 local.get $1 @@ -22374,7 +22376,7 @@ local.get $2 i32.load local.tee $6 - i32.const 16296 + i32.const 16312 i32.eq if $if_2 local.get $2 @@ -22453,7 +22455,7 @@ local.get $0 i32.load offset=8 local.tee $1 - i32.const 16152 + i32.const 16168 i32.load local.get $1 select @@ -22489,11 +22491,11 @@ if $if local.get $2 i32.const 3 - i32.const 9131 + i32.const 9147 i32.const 1567 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $2 - i32.const 11511 + i32.const 11527 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $2 @@ -22585,7 +22587,7 @@ (local $1 i32) (local $2 i32) local.get $0 - i32.const 7152 + i32.const 7168 i32.store local.get $0 call $__ZN6google8protobuf5Value10SharedDtorEv @@ -22641,7 +22643,7 @@ (local $1 i32) (local $2 i32) local.get $0 - i32.const 7152 + i32.const 7168 i32.store local.get $0 call $__ZN6google8protobuf5Value10SharedDtorEv @@ -22708,19 +22710,19 @@ i32.const 21 i32.store offset=4 local.get $2 - i32.const 11593 + i32.const 11609 i64.load align=1 i64.store align=1 local.get $2 - i32.const 11601 + i32.const 11617 i64.load align=1 i64.store offset=8 align=1 local.get $2 - i32.const 11609 + i32.const 11625 i32.load align=1 i32.store offset=16 align=1 local.get $2 - i32.const 11613 + i32.const 11629 i32.load8_s i32.store8 offset=20 local.get $2 @@ -22800,10 +22802,10 @@ local.get $1 call $__ZN6google8protobuf5Value9MergeFromERKS1_ else - i32.const 11104 - i32.const 11145 + i32.const 11120 + i32.const 11161 i32.const 92 - i32.const 11194 + i32.const 11210 call $___assert_fail end ;; $if ) @@ -22876,7 +22878,7 @@ local.get $4 local.get $2 i32.const 1 - i32.const 9532 + i32.const 9548 call $__ZN6google8protobuf8internal14WireFormatLite16VerifyUtf8StringEPKciNS2_9OperationES4_ drop local.get $3 @@ -22887,7 +22889,7 @@ local.get $5 i32.load else - i32.const 16296 + i32.const 16312 end ;; $if_3 local.get $1 call $__ZN6google8protobuf8internal14WireFormatLite23WriteStringMaybeAliasedEiRKNSt3__212basic_stringIcNS3_11char_traitsIcEENS3_9allocatorIcEEEEPNS0_2io17CodedOutputStreamE @@ -22940,7 +22942,7 @@ local.tee $0 i32.const -2 i32.and - i32.const 16296 + i32.const 16312 local.get $0 i32.const 1 i32.and @@ -22959,7 +22961,7 @@ local.tee $2 i32.const -2 i32.and - i32.const 16296 + i32.const 16312 local.get $2 i32.const 1 i32.and @@ -22993,13 +22995,13 @@ ) (func $__GLOBAL__sub_I_proxy_wasm_intrinsics_cc (type $8) - i32.const 16328 + i32.const 16344 i64.const 0 i64.store align=4 - i32.const 16336 + i32.const 16352 i64.const 0 i64.store align=4 - i32.const 16344 + i32.const 16360 i32.const 1065353216 i32.store ) @@ -23097,7 +23099,7 @@ i32.add global.set $36 block $block - i32.const 16332 + i32.const 16348 i32.load local.tee $4 i32.eqz @@ -23129,7 +23131,7 @@ local.tee $6 i32.const 2 i32.shl - i32.const 16328 + i32.const 16344 i32.load i32.add i32.load @@ -23219,7 +23221,7 @@ br_if $block local.get $2 i32.load - i32.load offset=52 + i32.load offset=60 local.set $1 local.get $2 local.get $1 @@ -23249,7 +23251,7 @@ (local $3 i32) (local $4 i32) (local $5 i32) - i32.const 16332 + i32.const 16348 i32.load local.tee $2 i32.eqz @@ -23283,7 +23285,7 @@ local.tee $5 i32.const 2 i32.shl - i32.const 16328 + i32.const 16344 i32.load i32.add i32.load @@ -23377,7 +23379,7 @@ end ;; $if_9 local.get $0 i32.load - i32.load offset=44 + i32.load offset=52 local.set $1 local.get $0 local.get $1 @@ -23407,7 +23409,7 @@ i32.add global.set $36 block $block - i32.const 16332 + i32.const 16348 i32.load local.tee $7 i32.eqz @@ -23439,7 +23441,7 @@ local.tee $9 i32.const 2 i32.shl - i32.const 16328 + i32.const 16344 i32.load i32.add i32.load @@ -23529,7 +23531,7 @@ br_if $block local.get $0 i32.load - i32.load offset=80 + i32.load offset=88 local.set $9 i32.const 8 call $__Znwm @@ -23582,7 +23584,7 @@ (local $4 i32) (local $5 i32) (local $6 i32) - i32.const 16332 + i32.const 16348 i32.load local.tee $3 i32.eqz @@ -23616,7 +23618,7 @@ local.tee $6 i32.const 2 i32.shl - i32.const 16328 + i32.const 16344 i32.load i32.add i32.load @@ -23710,9 +23712,7 @@ end ;; $if_9 local.get $0 i32.load - i32.const -64 - i32.sub - i32.load + i32.load offset=72 local.set $2 local.get $0 local.get $1 @@ -23742,7 +23742,7 @@ i32.add global.set $36 block $block - i32.const 16332 + i32.const 16348 i32.load local.tee $6 i32.eqz @@ -23774,7 +23774,7 @@ local.tee $8 i32.const 2 i32.shl - i32.const 16328 + i32.const 16344 i32.load i32.add i32.load @@ -23864,7 +23864,7 @@ br_if $block local.get $0 i32.load - i32.load offset=76 + i32.load offset=84 local.set $8 i32.const 8 call $__Znwm @@ -23916,7 +23916,7 @@ (local $4 i32) (local $5 i32) (local $6 i32) - i32.const 16332 + i32.const 16348 i32.load local.tee $3 i32.eqz @@ -23950,7 +23950,7 @@ local.tee $6 i32.const 2 i32.shl - i32.const 16328 + i32.const 16344 i32.load i32.add i32.load @@ -24044,7 +24044,7 @@ end ;; $if_9 local.get $0 i32.load - i32.load offset=68 + i32.load offset=76 local.set $2 local.get $0 local.get $1 @@ -24064,7 +24064,7 @@ (local $4 i32) (local $5 i32) (local $6 i32) - i32.const 16332 + i32.const 16348 i32.load local.tee $3 i32.eqz @@ -24098,7 +24098,7 @@ local.tee $6 i32.const 2 i32.shl - i32.const 16328 + i32.const 16344 i32.load i32.add i32.load @@ -24192,7 +24192,7 @@ end ;; $if_9 local.get $0 i32.load - i32.load offset=72 + i32.load offset=80 local.set $2 local.get $0 local.get $1 @@ -24228,7 +24228,7 @@ i32.add global.set $36 block $block - i32.const 16332 + i32.const 16348 i32.load local.tee $10 i32.eqz @@ -24260,7 +24260,7 @@ local.tee $12 i32.const 2 i32.shl - i32.const 16328 + i32.const 16344 i32.load i32.add i32.load @@ -24358,7 +24358,7 @@ br_if $block local.get $0 i32.load - i32.load offset=60 + i32.load offset=68 local.set $12 i32.const 8 call $__Znwm @@ -24461,7 +24461,7 @@ (local $3 i32) (local $4 i32) (local $5 i32) - i32.const 16332 + i32.const 16348 i32.load local.tee $2 i32.eqz @@ -24495,7 +24495,7 @@ local.tee $5 i32.const 2 i32.shl - i32.const 16328 + i32.const 16344 i32.load i32.add i32.load @@ -24589,7 +24589,7 @@ end ;; $if_9 local.get $0 i32.load - i32.load offset=48 + i32.load offset=56 local.set $1 local.get $0 local.get $1 @@ -24610,7 +24610,7 @@ (local $5 i32) (local $6 i32) (local $7 i32) - i32.const 16332 + i32.const 16348 i32.load local.tee $4 i32.eqz @@ -24645,7 +24645,7 @@ local.tee $7 i32.const 2 i32.shl - i32.const 16328 + i32.const 16344 i32.load i32.add i32.load @@ -24743,7 +24743,7 @@ end ;; $if_9 local.get $0 i32.load - i32.load offset=24 + i32.load offset=28 local.set $3 local.get $0 local.get $1 @@ -24766,7 +24766,7 @@ (local $3 i32) (local $4 i32) (local $5 i32) - i32.const 16332 + i32.const 16348 i32.load local.tee $2 i32.eqz @@ -24801,7 +24801,7 @@ local.tee $5 i32.const 2 i32.shl - i32.const 16328 + i32.const 16344 i32.load i32.add i32.load @@ -24908,6 +24908,156 @@ call_indirect $29 (type $4) ) + (func $_proxy_onRequestMetadata (type $4) + (param $0 i32) + (result i32) + (local $1 i32) + (local $2 i32) + (local $3 i32) + (local $4 i32) + (local $5 i32) + i32.const 16348 + i32.load + local.tee $2 + i32.eqz + if $if + i32.const 0 + return + end ;; $if + local.get $2 + local.get $2 + i32.const -1 + i32.add + local.tee $3 + i32.and + i32.eqz + local.tee $4 + if $if_0 (result i32) + local.get $0 + local.get $3 + i32.and + else + local.get $2 + local.get $0 + i32.gt_u + if $if_1 (result i32) + local.get $0 + else + local.get $0 + local.get $2 + call $i32u-rem + end ;; $if_1 + end ;; $if_0 + local.tee $5 + i32.const 2 + i32.shl + i32.const 16344 + i32.load + i32.add + i32.load + local.tee $1 + i32.eqz + if $if_2 + i32.const 0 + return + end ;; $if_2 + local.get $1 + i32.load + local.tee $1 + i32.eqz + if $if_3 + i32.const 0 + return + end ;; $if_3 + block $block + local.get $4 + if $if_4 + loop $loop + local.get $0 + local.get $1 + i32.load offset=4 + local.tee $2 + i32.eq + local.tee $4 + local.get $5 + local.get $2 + local.get $3 + i32.and + i32.eq + i32.or + if $if_5 + local.get $4 + if $if_6 + local.get $0 + local.get $1 + i32.load offset=8 + i32.eq + br_if $block + end ;; $if_6 + local.get $1 + i32.load + local.tee $1 + br_if $loop + end ;; $if_5 + end ;; $loop + else + loop $loop_0 + block $block_0 + local.get $0 + local.get $1 + i32.load offset=4 + local.tee $3 + i32.eq + if $if_7 + local.get $0 + local.get $1 + i32.load offset=8 + i32.eq + br_if $block + else + local.get $3 + local.get $2 + i32.ge_u + if $if_8 + local.get $3 + local.get $2 + call $i32u-rem + local.set $3 + end ;; $if_8 + local.get $3 + local.get $5 + i32.ne + br_if $block_0 + end ;; $if_7 + local.get $1 + i32.load + local.tee $1 + br_if $loop_0 + end ;; $block_0 + end ;; $loop_0 + end ;; $if_4 + i32.const 0 + return + end ;; $block + local.get $1 + i32.load offset=12 + local.tee $0 + i32.eqz + if $if_9 + i32.const 0 + return + end ;; $if_9 + local.get $0 + i32.load + i32.load offset=24 + local.set $1 + local.get $0 + local.get $1 + i32.const 63 + i32.and + call_indirect $29 (type $4) + ) + (func $_proxy_onRequestTrailers (type $4) (param $0 i32) (result i32) @@ -24916,7 +25066,7 @@ (local $3 i32) (local $4 i32) (local $5 i32) - i32.const 16332 + i32.const 16348 i32.load local.tee $2 i32.eqz @@ -24951,7 +25101,7 @@ local.tee $5 i32.const 2 i32.shl - i32.const 16328 + i32.const 16344 i32.load i32.add i32.load @@ -25049,7 +25199,7 @@ end ;; $if_9 local.get $0 i32.load - i32.load offset=28 + i32.load offset=32 local.set $1 local.get $0 local.get $1 @@ -25068,7 +25218,7 @@ (local $5 i32) (local $6 i32) (local $7 i32) - i32.const 16332 + i32.const 16348 i32.load local.tee $4 i32.eqz @@ -25103,7 +25253,7 @@ local.tee $7 i32.const 2 i32.shl - i32.const 16328 + i32.const 16344 i32.load i32.add i32.load @@ -25201,7 +25351,7 @@ end ;; $if_9 local.get $0 i32.load - i32.load offset=36 + i32.load offset=44 local.set $3 local.get $0 local.get $1 @@ -25224,7 +25374,7 @@ (local $3 i32) (local $4 i32) (local $5 i32) - i32.const 16332 + i32.const 16348 i32.load local.tee $2 i32.eqz @@ -25259,7 +25409,7 @@ local.tee $5 i32.const 2 i32.shl - i32.const 16328 + i32.const 16344 i32.load i32.add i32.load @@ -25357,7 +25507,7 @@ end ;; $if_9 local.get $0 i32.load - i32.load offset=32 + i32.load offset=36 local.set $1 local.get $0 local.get $1 @@ -25366,7 +25516,7 @@ call_indirect $29 (type $4) ) - (func $_proxy_onResponseTrailers (type $4) + (func $_proxy_onResponseMetadata (type $4) (param $0 i32) (result i32) (local $1 i32) @@ -25374,7 +25524,7 @@ (local $3 i32) (local $4 i32) (local $5 i32) - i32.const 16332 + i32.const 16348 i32.load local.tee $2 i32.eqz @@ -25409,7 +25559,7 @@ local.tee $5 i32.const 2 i32.shl - i32.const 16328 + i32.const 16344 i32.load i32.add i32.load @@ -25516,6 +25666,156 @@ call_indirect $29 (type $4) ) + (func $_proxy_onResponseTrailers (type $4) + (param $0 i32) + (result i32) + (local $1 i32) + (local $2 i32) + (local $3 i32) + (local $4 i32) + (local $5 i32) + i32.const 16348 + i32.load + local.tee $2 + i32.eqz + if $if + i32.const 0 + return + end ;; $if + local.get $2 + local.get $2 + i32.const -1 + i32.add + local.tee $3 + i32.and + i32.eqz + local.tee $4 + if $if_0 (result i32) + local.get $0 + local.get $3 + i32.and + else + local.get $2 + local.get $0 + i32.gt_u + if $if_1 (result i32) + local.get $0 + else + local.get $0 + local.get $2 + call $i32u-rem + end ;; $if_1 + end ;; $if_0 + local.tee $5 + i32.const 2 + i32.shl + i32.const 16344 + i32.load + i32.add + i32.load + local.tee $1 + i32.eqz + if $if_2 + i32.const 0 + return + end ;; $if_2 + local.get $1 + i32.load + local.tee $1 + i32.eqz + if $if_3 + i32.const 0 + return + end ;; $if_3 + block $block + local.get $4 + if $if_4 + loop $loop + local.get $0 + local.get $1 + i32.load offset=4 + local.tee $2 + i32.eq + local.tee $4 + local.get $5 + local.get $2 + local.get $3 + i32.and + i32.eq + i32.or + if $if_5 + local.get $4 + if $if_6 + local.get $0 + local.get $1 + i32.load offset=8 + i32.eq + br_if $block + end ;; $if_6 + local.get $1 + i32.load + local.tee $1 + br_if $loop + end ;; $if_5 + end ;; $loop + else + loop $loop_0 + block $block_0 + local.get $0 + local.get $1 + i32.load offset=4 + local.tee $3 + i32.eq + if $if_7 + local.get $0 + local.get $1 + i32.load offset=8 + i32.eq + br_if $block + else + local.get $3 + local.get $2 + i32.ge_u + if $if_8 + local.get $3 + local.get $2 + call $i32u-rem + local.set $3 + end ;; $if_8 + local.get $3 + local.get $5 + i32.ne + br_if $block_0 + end ;; $if_7 + local.get $1 + i32.load + local.tee $1 + br_if $loop_0 + end ;; $block_0 + end ;; $loop_0 + end ;; $if_4 + i32.const 0 + return + end ;; $block + local.get $1 + i32.load offset=12 + local.tee $0 + i32.eqz + if $if_9 + i32.const 0 + return + end ;; $if_9 + local.get $0 + i32.load + i32.load offset=48 + local.set $1 + local.get $0 + local.get $1 + i32.const 63 + i32.and + call_indirect $29 (type $4) + ) + (func $_proxy_onStart (type $8) (local $0 i32) (local $1 i32) @@ -25541,7 +25841,9 @@ call $__ZL13ensureContextj local.tee $0 i32.load - i32.load offset=56 + i32.const -64 + i32.sub + i32.load local.set $1 local.get $0 local.get $1 @@ -25636,7 +25938,7 @@ end ;; $if local.get $1 local.get $0 - call $__ZN7Context3NewEj + call $__Z10NewContextj local.get $1 i32.load local.set $3 @@ -25748,7 +26050,7 @@ local.tee $5 i32.store block $block - i32.const 16332 + i32.const 16348 i32.load local.tee $4 i32.eqz @@ -25782,7 +26084,7 @@ local.tee $2 i32.const 2 i32.shl - i32.const 16328 + i32.const 16344 i32.load i32.add i32.load @@ -25852,13 +26154,13 @@ end ;; $if_3 end ;; $if_2 end ;; $if - i32.const 16344 + i32.const 16360 f32.load local.tee $10 local.get $4 f32.convert_i32_u f32.mul - i32.const 16340 + i32.const 16356 i32.load i32.const 1 i32.add @@ -25885,7 +26187,7 @@ i32.shl i32.or local.set $2 - i32.const 16328 + i32.const 16344 local.get $11 local.get $10 f32.div @@ -25901,7 +26203,7 @@ local.get $7 i32.load local.set $2 - i32.const 16332 + i32.const 16348 i32.load local.tee $3 i32.const -1 @@ -25932,7 +26234,7 @@ local.get $4 local.set $3 end ;; $if_6 - i32.const 16328 + i32.const 16344 i32.load local.get $2 i32.const 2 @@ -25950,19 +26252,19 @@ i32.store else local.get $1 - i32.const 16336 + i32.const 16352 i32.load i32.store - i32.const 16336 + i32.const 16352 local.get $1 i32.store - i32.const 16328 + i32.const 16344 i32.load local.get $2 i32.const 2 i32.shl i32.add - i32.const 16336 + i32.const 16352 i32.store local.get $1 i32.load @@ -25994,7 +26296,7 @@ i32.and end ;; $if_11 local.set $2 - i32.const 16328 + i32.const 16344 i32.load local.get $2 i32.const 2 @@ -26004,8 +26306,8 @@ i32.store end ;; $if_10 end ;; $if_9 - i32.const 16340 - i32.const 16340 + i32.const 16356 + i32.const 16356 i32.load i32.const 1 i32.add @@ -26166,7 +26468,7 @@ local.tee $3 call $__ZNSt11logic_errorC2EPKc local.get $3 - i32.const 8456 + i32.const 8472 i32.store local.get $3 i32.const 6752 @@ -26498,7 +26800,7 @@ local.get $0 i32.load local.set $1 - i32.const 16332 + i32.const 16348 i32.load local.tee $3 i32.eqz @@ -26533,7 +26835,7 @@ local.tee $5 i32.const 2 i32.shl - i32.const 16328 + i32.const 16344 i32.load local.tee $4 i32.add @@ -26662,7 +26964,7 @@ block $block_1 (result i32) block $block_2 local.get $2 - i32.const 16336 + i32.const 16352 i32.eq br_if $block_2 local.get $2 @@ -26757,7 +27059,7 @@ local.tee $4 i32.ne if $if_20 (result i32) - i32.const 16328 + i32.const 16344 i32.load local.get $4 i32.const 2 @@ -26777,8 +27079,8 @@ local.get $1 i32.const 0 i32.store - i32.const 16340 - i32.const 16340 + i32.const 16356 + i32.const 16356 i32.load i32.const -1 i32.add @@ -26819,7 +27121,7 @@ (param $0 i32) (local $1 i32) local.get $0 - i32.const 7568 + i32.const 7584 i32.store local.get $0 i32.const 12 @@ -26840,7 +27142,7 @@ (param $0 i32) (local $1 i32) local.get $0 - i32.const 7568 + i32.const 7584 i32.store local.get $0 i32.const 12 @@ -26935,7 +27237,7 @@ local.get $1 i32.const 3 i32.store - i32.const 16348 + i32.const 16364 i32.load i32.const -1 i32.ne @@ -26949,7 +27251,7 @@ local.get $2 call $__ZNSt3__211__call_onceERVmPvPFvS2_E end ;; $if_0 - i32.const 16352 + i32.const 16368 i32.load local.tee $2 call $__ZNSt3__25mutex4lockEv @@ -27008,7 +27310,7 @@ local.get $0 i32.const 0 i32.store offset=24 - i32.const 16352 + i32.const 16368 local.get $0 i32.store i32.const 51 @@ -27038,7 +27340,7 @@ (param $2 i32) (param $3 i32) local.get $0 - i32.const 7568 + i32.const 7584 i32.store local.get $0 local.get $1 @@ -27074,7 +27376,7 @@ global.set $36 return end ;; $if - i32.const 7700 + i32.const 7716 i32.load local.set $5 local.get $3 @@ -27116,7 +27418,7 @@ (func $__ZN6google8protobuf8internal22DeleteLogSilencerCountEv (type $8) (local $0 i32) - i32.const 16352 + i32.const 16368 i32.load local.tee $0 if $if @@ -27125,7 +27427,7 @@ local.get $0 call $__ZdlPv end ;; $if - i32.const 16352 + i32.const 16368 i32.const 0 i32.store ) @@ -27152,11 +27454,11 @@ i32.const 16 i32.add global.set $36 - i32.const 16288 + i32.const 16304 i32.load8_s i32.eqz if $if - i32.const 16288 + i32.const 16304 call $___cxa_guard_acquire if $if_0 i32.const 40 @@ -27176,12 +27478,12 @@ local.get $2 i64.const 0 i64.store offset=32 align=4 - i32.const 16356 + i32.const 16372 local.get $2 i32.store end ;; $if_0 end ;; $if - i32.const 16356 + i32.const 16372 i32.load local.tee $2 i32.const 12 @@ -27293,7 +27595,7 @@ local.tee $3 call $__ZNSt11logic_errorC2EPKc local.get $3 - i32.const 8456 + i32.const 8472 i32.store local.get $3 i32.const 6752 @@ -27421,7 +27723,7 @@ i32.store local.get $2 i32.const 128 - i32.const 12481 + i32.const 12497 local.get $3 call $_snprintf drop @@ -27459,7 +27761,7 @@ i32.store local.get $2 i32.const 128 - i32.const 14432 + i32.const 14448 local.get $3 call $_snprintf drop @@ -27530,14 +27832,14 @@ i32.const 16 i32.add global.set $36 - i32.const 16360 + i32.const 16376 i64.const 0 i64.store align=4 - i32.const 16368 + i32.const 16384 i64.const 0 i64.store align=4 local.get $0 - i32.const 17108 + i32.const 17124 i32.store local.get $0 i32.const 0 @@ -27549,12 +27851,12 @@ local.get $0 i64.load align=4 i64.store align=4 - i32.const 16376 + i32.const 16392 i32.const 1 local.get $1 call $__ZN6google8protobuf4util6StatusC2ENS1_5error4CodeENS0_11StringPieceE local.get $0 - i32.const 17108 + i32.const 17124 i32.store local.get $0 i32.const 0 @@ -27563,7 +27865,7 @@ local.get $0 i64.load align=4 i64.store align=4 - i32.const 16392 + i32.const 16408 i32.const 2 local.get $1 call $__ZN6google8protobuf4util6StatusC2ENS1_5error4CodeENS0_11StringPieceE @@ -27780,11 +28082,11 @@ if $if_0 local.get $4 i32.const 3 - i32.const 12484 + i32.const 12500 i32.const 116 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $4 - i32.const 12509 + i32.const 12525 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $4 @@ -38971,11 +39273,11 @@ end ;; $block_0 local.get $14 i32.const 3 - i32.const 12596 + i32.const 12612 i32.const 571 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $14 - i32.const 12638 + i32.const 12654 call $__ZN6google8protobuf8internal10LogMessagelsEPKc local.get $4 call $__ZN6google8protobuf8internal10LogMessagelsEi @@ -39010,16 +39312,16 @@ global.set $36 local.get $1 i32.const 3 - i32.const 12596 + i32.const 12612 i32.const 534 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $1 - i32.const 12638 + i32.const 12654 call $__ZN6google8protobuf8internal10LogMessagelsEPKc local.get $0 i32.load offset=4 call $__ZN6google8protobuf8internal10LogMessagelsEj - i32.const 12668 + i32.const 12684 call $__ZN6google8protobuf8internal10LogMessagelsEPKc local.get $0 i32.load offset=12 @@ -39040,22 +39342,22 @@ i32.const 32 i32.add global.set $36 - i32.const 16320 + i32.const 16336 i32.load8_s i32.eqz if $if - i32.const 16320 + i32.const 16336 call $___cxa_guard_acquire drop end ;; $if call $_pthread_self local.set $1 - i32.const 16436 + i32.const 16452 i32.load local.get $1 call $_pthread_equal if $if_0 - i32.const 7040 + i32.const 7056 i32.load i32.const 1 i32.eq @@ -39066,11 +39368,11 @@ end ;; $if_1 local.get $0 i32.const 3 - i32.const 12596 + i32.const 12612 i32.const 801 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $0 - i32.const 12680 + i32.const 12696 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $0 @@ -39079,35 +39381,35 @@ global.set $36 return end ;; $if_0 - i32.const 16312 + i32.const 16328 i32.load8_s i32.eqz if $if_2 - i32.const 16312 + i32.const 16328 call $___cxa_guard_acquire if $if_3 - i32.const 16296 + i32.const 16312 i64.const 0 i64.store - i32.const 16304 + i32.const 16320 i32.const 0 i32.store i32.const 52 - i32.const 16296 + i32.const 16312 call $__ZN6google8protobuf8internal13OnShutdownRunEPFvPKvES3_ end ;; $if_3 end ;; $if_2 - i32.const 16408 + i32.const 16424 call $__ZNSt3__25mutex4lockEv - i32.const 16436 + i32.const 16452 local.get $1 i32.store - i32.const 7040 + i32.const 7056 call $__ZN6google8protobuf8internal12_GLOBAL__N_111InitSCC_DFSEPNS1_11SCCInfoBaseE - i32.const 16436 + i32.const 16452 i32.const 0 i32.store - i32.const 16408 + i32.const 16424 call $__ZNSt3__25mutex6unlockEv local.get $0 global.set $36 @@ -39203,31 +39505,31 @@ i32.const 50 i32.store offset=4 local.get $2 - i32.const 12845 + i32.const 12861 i64.load align=1 i64.store align=1 local.get $2 - i32.const 12853 + i32.const 12869 i64.load align=1 i64.store offset=8 align=1 local.get $2 - i32.const 12861 + i32.const 12877 i64.load align=1 i64.store offset=16 align=1 local.get $2 - i32.const 12869 + i32.const 12885 i64.load align=1 i64.store offset=24 align=1 local.get $2 - i32.const 12877 + i32.const 12893 i64.load align=1 i64.store offset=32 align=1 local.get $2 - i32.const 12885 + i32.const 12901 i64.load align=1 i64.store offset=40 align=1 local.get $2 - i32.const 12893 + i32.const 12909 i32.load16_s align=1 i32.store16 offset=48 align=1 local.get $2 @@ -39247,7 +39549,7 @@ i32.load local.set $2 local.get $0 - i32.const 17109 + i32.const 17125 i32.load8_s i32.const 1 i32.and @@ -39357,11 +39659,11 @@ if $if_0 local.get $3 i32.const 3 - i32.const 12775 + i32.const 12791 i32.const 373 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $3 - i32.const 12807 + i32.const 12823 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $3 @@ -39457,13 +39759,13 @@ if $if_0 local.get $3 i32.const 3 - i32.const 12928 + i32.const 12944 i32.const 59 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $3 - i32.const 12962 + i32.const 12978 call $__ZN6google8protobuf8internal10LogMessagelsEPKc - i32.const 13079 + i32.const 13095 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $3 @@ -41285,11 +41587,11 @@ if $if_1 local.get $4 i32.const 3 - i32.const 13127 + i32.const 13143 i32.const 507 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $4 - i32.const 13163 + i32.const 13179 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $4 @@ -41506,11 +41808,11 @@ if $if_1 local.get $7 i32.const 3 - i32.const 13127 + i32.const 13143 i32.const 516 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $7 - i32.const 13163 + i32.const 13179 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $7 @@ -42230,37 +42532,37 @@ i32.load offset=8 i32.store offset=8 end ;; $if_0 - i32.const 13209 + i32.const 13225 i32.const 0 local.get $2 i32.const 1 i32.eq select - i32.const 13221 + i32.const 13237 local.get $2 select local.set $1 local.get $7 i32.const 2 - i32.const 13127 + i32.const 13143 i32.const 626 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $7 - i32.const 13235 + i32.const 13251 call $__ZN6google8protobuf8internal10LogMessagelsEPKc local.get $4 call $__ZN6google8protobuf8internal10LogMessagelsERKNSt3__212basic_stringIcNS3_11char_traitsIcEENS3_9allocatorIcEEEE - i32.const 13248 + i32.const 13264 call $__ZN6google8protobuf8internal10LogMessagelsEPKc - i32.const 13267 + i32.const 13283 call $__ZN6google8protobuf8internal10LogMessagelsEPKc local.get $1 call $__ZN6google8protobuf8internal10LogMessagelsEPKc - i32.const 13284 + i32.const 13300 call $__ZN6google8protobuf8internal10LogMessagelsEPKc - i32.const 13297 + i32.const 13313 call $__ZN6google8protobuf8internal10LogMessagelsEPKc - i32.const 13353 + i32.const 13369 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $7 @@ -43074,11 +43376,11 @@ if $if_2 local.get $2 i32.const 3 - i32.const 13361 + i32.const 13377 i32.const 591 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $2 - i32.const 13396 + i32.const 13412 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $2 @@ -43224,16 +43526,16 @@ local.set $2 local.get $1 i32.const 2 - i32.const 13361 + i32.const 13377 i32.const 190 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $1 - i32.const 13433 + i32.const 13449 call $__ZN6google8protobuf8internal10LogMessagelsEPKc local.get $0 i32.load offset=36 call $__ZN6google8protobuf8internal10LogMessagelsEi - i32.const 13500 + i32.const 13516 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $1 @@ -45869,13 +46171,13 @@ if $if local.get $3 i32.const 3 - i32.const 13645 + i32.const 13661 i32.const 132 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $3 - i32.const 13725 + i32.const 13741 call $__ZN6google8protobuf8internal10LogMessagelsEPKc - i32.const 13769 + i32.const 13785 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $3 @@ -45898,11 +46200,11 @@ if $if_0 local.get $3 i32.const 3 - i32.const 13645 + i32.const 13661 i32.const 134 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $3 - i32.const 13824 + i32.const 13840 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $3 @@ -45936,11 +46238,11 @@ local.set $4 local.get $2 i32.const 3 - i32.const 13645 + i32.const 13661 i32.const 135 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $2 - i32.const 13694 + i32.const 13710 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $2 @@ -46006,11 +46308,11 @@ if $if local.get $3 i32.const 3 - i32.const 13645 + i32.const 13661 i32.const 151 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $3 - i32.const 13914 + i32.const 13930 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $3 @@ -46084,13 +46386,13 @@ end ;; $if_3 local.get $4 i32.const 2 - i32.const 13645 + i32.const 13661 i32.const 164 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $4 - i32.const 13991 + i32.const 14007 call $__ZN6google8protobuf8internal10LogMessagelsEPKc - i32.const 14041 + i32.const 14057 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $4 @@ -46168,11 +46470,11 @@ if $if local.get $2 i32.const 3 - i32.const 13645 + i32.const 13661 i32.const 182 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $2 - i32.const 13694 + i32.const 13710 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $2 @@ -46196,11 +46498,11 @@ if $if_0 local.get $2 i32.const 3 - i32.const 13645 + i32.const 13661 i32.const 183 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $2 - i32.const 13914 + i32.const 13930 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $2 @@ -46231,11 +46533,11 @@ if $if_2 local.get $3 i32.const 3 - i32.const 13645 + i32.const 13661 i32.const 184 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $3 - i32.const 13946 + i32.const 13962 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $3 @@ -46300,11 +46602,11 @@ if $if local.get $1 i32.const 3 - i32.const 13645 + i32.const 13661 i32.const 189 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $1 - i32.const 13914 + i32.const 13930 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $1 @@ -46341,7 +46643,7 @@ (param $1 i32) (param $2 i32) local.get $0 - i32.const 7588 + i32.const 7604 i32.store local.get $0 local.get $1 @@ -46364,7 +46666,7 @@ (param $0 i32) (param $1 i32) local.get $0 - i32.const 7624 + i32.const 7640 i32.store local.get $0 local.get $1 @@ -46472,7 +46774,7 @@ i32.const 0 i32.store offset=8 local.get $0 - i32.const 13229 + i32.const 13245 local.get $2 call $__ZN6google8protobuf13StringAppendVEPNSt3__212basic_stringIcNS1_11char_traitsIcEENS1_9allocatorIcEEEEPKcPi local.get $2 @@ -46505,7 +46807,7 @@ i32.const 241 return end ;; $if - i32.const 7668 + i32.const 7684 i32.load local.set $13 local.get $0 @@ -46515,18 +46817,18 @@ i32.const -7 i32.add local.set $10 - i32.const 7696 + i32.const 7712 i32.load local.set $4 - i32.const 7676 + i32.const 7692 i32.load local.set $11 - i32.const 7680 + i32.const 7696 i32.load local.set $12 - i32.const 7684 + i32.const 7700 i32.load - i32.const 7652 + i32.const 7668 i32.load i32.add local.tee $8 @@ -46739,7 +47041,7 @@ local.get $1 local.get $14 i32.sub - i32.const 7656 + i32.const 7672 i32.load i32.ge_u if $if_7 @@ -46771,7 +47073,7 @@ local.get $1 local.get $8 i32.sub - i32.const 7656 + i32.const 7672 i32.load i32.lt_u if $if_9 (result i32) @@ -46977,11 +47279,11 @@ local.set $1 local.get $0 i32.const 3 - i32.const 14103 + i32.const 14119 i32.const 47 call $__ZN6google8protobuf8internal10LogMessageC2ENS0_8LogLevelEPKci local.get $0 - i32.const 14142 + i32.const 14158 call $__ZN6google8protobuf8internal10LogMessagelsEPKc call $__ZN6google8protobuf8internal11LogFinisheraSERNS1_10LogMessageE local.get $0 @@ -47293,7 +47595,7 @@ (func $___errno_location (type $12) (result i32) - i32.const 16504 + i32.const 16520 ) (func $_dummy_252 (type $4) @@ -47559,69 +47861,69 @@ i32.add local.set $5 local.get $4 - i32.const 7956 + i32.const 7972 i64.load align=4 i64.store align=4 local.get $4 - i32.const 7964 + i32.const 7980 i64.load align=4 i64.store offset=8 align=4 local.get $4 - i32.const 7972 + i32.const 7988 i64.load align=4 i64.store offset=16 align=4 local.get $4 - i32.const 7980 + i32.const 7996 i64.load align=4 i64.store offset=24 align=4 local.get $4 - i32.const 7988 + i32.const 8004 i64.load align=4 i64.store offset=32 align=4 local.get $4 - i32.const 7996 + i32.const 8012 i64.load align=4 i64.store offset=40 align=4 local.get $4 - i32.const 8004 + i32.const 8020 i64.load align=4 i64.store offset=48 align=4 local.get $4 - i32.const 8012 + i32.const 8028 i64.load align=4 i64.store offset=56 align=4 local.get $4 i32.const -64 i32.sub - i32.const 8020 + i32.const 8036 i64.load align=4 i64.store align=4 local.get $4 - i32.const 8028 + i32.const 8044 i64.load align=4 i64.store offset=72 align=4 local.get $4 - i32.const 8036 + i32.const 8052 i64.load align=4 i64.store offset=80 align=4 local.get $4 - i32.const 8044 + i32.const 8060 i64.load align=4 i64.store offset=88 align=4 local.get $4 - i32.const 8052 + i32.const 8068 i64.load align=4 i64.store offset=96 align=4 local.get $4 - i32.const 8060 + i32.const 8076 i64.load align=4 i64.store offset=104 align=4 local.get $4 - i32.const 8068 + i32.const 8084 i64.load align=4 i64.store offset=112 align=4 local.get $4 - i32.const 8076 + i32.const 8092 i32.load i32.store offset=120 block $block @@ -48691,7 +48993,7 @@ local.set $6 i32.const 0 local.set $10 - i32.const 14309 + i32.const 14325 local.set $7 local.get $1 local.get $6 @@ -48723,7 +49025,7 @@ i64.store i32.const 1 local.set $10 - i32.const 14309 + i32.const 14325 local.set $7 else local.get $5 @@ -48732,9 +49034,9 @@ i32.const 0 i32.ne local.set $10 - i32.const 14310 - i32.const 14311 - i32.const 14309 + i32.const 14326 + i32.const 14327 + i32.const 14325 local.get $5 i32.const 1 i32.and @@ -48749,7 +49051,7 @@ end ;; $block_26 i32.const 0 local.set $10 - i32.const 14309 + i32.const 14325 local.set $7 local.get $11 i64.load @@ -48764,7 +49066,7 @@ local.set $6 i32.const 0 local.set $10 - i32.const 14309 + i32.const 14325 local.set $16 i32.const 1 local.set $13 @@ -48783,7 +49085,7 @@ local.get $11 i32.load local.tee $5 - i32.const 14319 + i32.const 14335 local.get $5 select local.set $6 @@ -48836,7 +49138,7 @@ local.set $6 i32.const 0 local.set $10 - i32.const 14309 + i32.const 14325 local.set $16 local.get $1 local.set $13 @@ -48866,11 +49168,11 @@ local.tee $7 select local.set $10 - i32.const 14309 + i32.const 14325 local.get $6 i32.const 4 i32.shr_u - i32.const 14309 + i32.const 14325 i32.add local.get $7 select @@ -48891,7 +49193,7 @@ local.set $18 i32.const 0 local.set $10 - i32.const 14309 + i32.const 14325 local.set $16 local.get $1 local.get $14 @@ -49892,13 +50194,13 @@ local.tee $1 call $___DOUBLE_BITS local.set $24 - i32.const 14326 + i32.const 14342 local.set $18 i32.const 1 else - i32.const 14329 - i32.const 14332 - i32.const 14327 + i32.const 14345 + i32.const 14348 + i32.const 14343 local.get $4 i32.const 1 i32.and @@ -49921,8 +50223,8 @@ i64.const 9218868437227405312 i64.eq if $if_0 (result i32) - i32.const 14353 - i32.const 14357 + i32.const 14369 + i32.const 14373 local.get $5 i32.const 32 i32.and @@ -49930,8 +50232,8 @@ i32.ne local.tee $3 select - i32.const 14345 - i32.const 14349 + i32.const 14361 + i32.const 14365 local.get $3 select local.get $1 @@ -51237,7 +51539,7 @@ i32.eqz if $if_46 local.get $0 - i32.const 14361 + i32.const 14377 i32.const 1 call $_out_281 end ;; $if_46 @@ -51394,7 +51696,7 @@ br $block_4 end ;; $if_53 local.get $0 - i32.const 14361 + i32.const 14377 i32.const 1 call $_out_281 local.get $8 @@ -51732,7 +52034,7 @@ (func $_pthread_self (type $12) (result i32) - i32.const 8080 + i32.const 8096 ) (func $___strerror_l (type $6) @@ -52648,13 +52950,13 @@ (func $___ofl_lock (type $12) (result i32) - i32.const 16508 + i32.const 16524 call $___lock - i32.const 16516 + i32.const 16532 ) (func $___ofl_unlock (type $8) - i32.const 16508 + i32.const 16524 call $___unlock ) @@ -52673,7 +52975,7 @@ local.get $1 i32.store local.get $0 - i32.const 12361 + i32.const 12377 local.get $2 call $_vfprintf local.set $0 @@ -52714,10 +53016,10 @@ end ;; $block local.set $0 else - i32.const 7952 + i32.const 7968 i32.load if $if_2 (result i32) - i32.const 7952 + i32.const 7968 i32.load call $_fflush else @@ -52893,7 +53195,7 @@ i32.const 245 i32.lt_u if $if (result i32) - i32.const 16520 + i32.const 16536 i32.load local.tee $7 i32.const 16 @@ -52925,7 +53227,7 @@ local.tee $1 i32.const 3 i32.shl - i32.const 16560 + i32.const 16576 i32.add local.tee $2 i32.const 8 @@ -52942,7 +53244,7 @@ local.get $2 i32.eq if $if_1 - i32.const 16520 + i32.const 16536 i32.const 1 local.get $1 i32.shl @@ -52952,7 +53254,7 @@ i32.and i32.store else - i32.const 16536 + i32.const 16552 i32.load local.get $0 i32.gt_u @@ -53002,7 +53304,7 @@ return end ;; $if_0 local.get $2 - i32.const 16528 + i32.const 16544 i32.load local.tee $13 i32.gt_u @@ -53081,7 +53383,7 @@ local.tee $1 i32.const 3 i32.shl - i32.const 16560 + i32.const 16576 i32.add local.tee $5 i32.const 8 @@ -53098,7 +53400,7 @@ local.get $5 i32.eq if $if_6 - i32.const 16520 + i32.const 16536 i32.const 1 local.get $1 i32.shl @@ -53109,7 +53411,7 @@ local.tee $4 i32.store else - i32.const 16536 + i32.const 16552 i32.load local.get $3 i32.gt_u @@ -53162,7 +53464,7 @@ i32.store local.get $13 if $if_9 - i32.const 16540 + i32.const 16556 i32.load local.set $2 local.get $13 @@ -53171,7 +53473,7 @@ local.tee $3 i32.const 3 i32.shl - i32.const 16560 + i32.const 16576 i32.add local.set $0 i32.const 1 @@ -53181,7 +53483,7 @@ local.get $4 i32.and if $if_10 - i32.const 16536 + i32.const 16552 i32.load local.get $0 i32.const 8 @@ -53199,7 +53501,7 @@ local.set $12 end ;; $if_11 else - i32.const 16520 + i32.const 16536 local.get $3 local.get $4 i32.or @@ -53224,10 +53526,10 @@ local.get $0 i32.store offset=12 end ;; $if_9 - i32.const 16528 + i32.const 16544 local.get $5 i32.store - i32.const 16540 + i32.const 16556 local.get $7 i32.store local.get $14 @@ -53235,7 +53537,7 @@ local.get $10 return end ;; $if_5 - i32.const 16524 + i32.const 16540 i32.load local.tee $12 if $if_12 (result i32) @@ -53298,7 +53600,7 @@ i32.add i32.const 2 i32.shl - i32.const 16824 + i32.const 16840 i32.add i32.load local.tee $4 @@ -53347,7 +53649,7 @@ br $loop end ;; $block end ;; $loop - i32.const 16536 + i32.const 16552 i32.load local.tee $15 local.get $4 @@ -53478,7 +53780,7 @@ local.tee $0 i32.const 2 i32.shl - i32.const 16824 + i32.const 16840 i32.add local.tee $3 i32.load @@ -53490,7 +53792,7 @@ local.get $1 i32.eqz if $if_25 - i32.const 16524 + i32.const 16540 local.get $12 i32.const 1 local.get $0 @@ -53502,7 +53804,7 @@ br $block_2 end ;; $if_25 else - i32.const 16536 + i32.const 16552 i32.load local.get $11 i32.gt_u @@ -53528,7 +53830,7 @@ br_if $block_2 end ;; $if_26 end ;; $if_24 - i32.const 16536 + i32.const 16552 i32.load local.tee $3 local.get $1 @@ -53561,7 +53863,7 @@ i32.load offset=20 local.tee $0 if $if_30 - i32.const 16536 + i32.const 16552 i32.load local.get $0 i32.gt_u @@ -53619,7 +53921,7 @@ i32.store local.get $13 if $if_33 - i32.const 16540 + i32.const 16556 i32.load local.set $2 local.get $13 @@ -53628,7 +53930,7 @@ local.tee $3 i32.const 3 i32.shl - i32.const 16560 + i32.const 16576 i32.add local.set $0 i32.const 1 @@ -53638,7 +53940,7 @@ local.get $7 i32.and if $if_34 - i32.const 16536 + i32.const 16552 i32.load local.get $0 i32.const 8 @@ -53656,7 +53958,7 @@ local.set $16 end ;; $if_35 else - i32.const 16520 + i32.const 16536 local.get $3 local.get $7 i32.or @@ -53681,10 +53983,10 @@ local.get $0 i32.store offset=12 end ;; $if_33 - i32.const 16528 + i32.const 16544 local.get $10 i32.store - i32.const 16540 + i32.const 16556 local.get $8 i32.store end ;; $if_32 @@ -53715,7 +54017,7 @@ i32.const -8 i32.and local.set $4 - i32.const 16524 + i32.const 16540 i32.load local.tee $6 if $if_37 (result i32) @@ -53796,7 +54098,7 @@ local.get $18 i32.const 2 i32.shl - i32.const 16824 + i32.const 16840 i32.add i32.load local.tee $0 @@ -53957,7 +54259,7 @@ i32.add i32.const 2 i32.shl - i32.const 16824 + i32.const 16840 i32.add i32.load end ;; $if_43 @@ -54017,13 +54319,13 @@ local.get $5 if $if_46 (result i32) local.get $2 - i32.const 16528 + i32.const 16544 i32.load local.get $4 i32.sub i32.lt_u if $if_47 (result i32) - i32.const 16536 + i32.const 16552 i32.load local.tee $17 local.get $5 @@ -54154,7 +54456,7 @@ local.tee $0 i32.const 2 i32.shl - i32.const 16824 + i32.const 16840 i32.add local.tee $1 i32.load @@ -54166,7 +54468,7 @@ local.get $7 i32.eqz if $if_59 - i32.const 16524 + i32.const 16540 local.get $6 i32.const 1 local.get $0 @@ -54179,7 +54481,7 @@ br $block_8 end ;; $if_59 else - i32.const 16536 + i32.const 16552 i32.load local.get $15 i32.gt_u @@ -54209,7 +54511,7 @@ end ;; $if_61 end ;; $if_60 end ;; $if_58 - i32.const 16536 + i32.const 16552 i32.load local.tee $1 local.get $7 @@ -54242,7 +54544,7 @@ i32.load offset=20 local.tee $0 if $if_65 - i32.const 16536 + i32.const 16552 i32.load local.get $0 i32.gt_u @@ -54318,10 +54620,10 @@ local.get $1 i32.const 3 i32.shl - i32.const 16560 + i32.const 16576 i32.add local.set $0 - i32.const 16520 + i32.const 16536 i32.load local.tee $3 i32.const 1 @@ -54330,7 +54632,7 @@ local.tee $1 i32.and if $if_69 - i32.const 16536 + i32.const 16552 i32.load local.get $0 i32.const 8 @@ -54348,7 +54650,7 @@ local.set $19 end ;; $if_70 else - i32.const 16520 + i32.const 16536 local.get $1 local.get $3 i32.or @@ -54444,7 +54746,7 @@ local.tee $1 i32.const 2 i32.shl - i32.const 16824 + i32.const 16840 i32.add local.set $0 local.get $8 @@ -54467,7 +54769,7 @@ i32.and i32.eqz if $if_73 - i32.const 16524 + i32.const 16540 local.get $3 local.get $4 i32.or @@ -54548,7 +54850,7 @@ unreachable end ;; $if_75 end ;; $loop_4 - i32.const 16536 + i32.const 16552 i32.load local.get $4 i32.gt_u @@ -54571,7 +54873,7 @@ end ;; $if_77 end ;; $block_10 end ;; $if_74 - i32.const 16536 + i32.const 16552 i32.load local.tee $0 local.get $10 @@ -54626,13 +54928,13 @@ end ;; $if local.set $3 block $block_11 - i32.const 16528 + i32.const 16544 i32.load local.tee $1 local.get $3 i32.ge_u if $if_79 - i32.const 16540 + i32.const 16556 i32.load local.set $0 local.get $1 @@ -54642,13 +54944,13 @@ i32.const 15 i32.gt_u if $if_80 - i32.const 16540 + i32.const 16556 local.get $0 local.get $3 i32.add local.tee $4 i32.store - i32.const 16528 + i32.const 16544 local.get $2 i32.store local.get $4 @@ -54667,10 +54969,10 @@ i32.or i32.store offset=4 else - i32.const 16528 + i32.const 16544 i32.const 0 i32.store - i32.const 16540 + i32.const 16556 i32.const 0 i32.store local.get $0 @@ -54693,13 +54995,13 @@ br $block_11 end ;; $if_79 block $block_12 - i32.const 16532 + i32.const 16548 i32.load local.tee $1 local.get $3 i32.gt_u if $if_81 - i32.const 16532 + i32.const 16548 local.get $1 local.get $3 i32.sub @@ -54711,31 +55013,31 @@ i32.const 47 i32.add local.tee $6 - i32.const 16992 + i32.const 17008 i32.load if $if_82 (result i32) - i32.const 17000 + i32.const 17016 i32.load else - i32.const 17000 + i32.const 17016 i32.const 4096 i32.store - i32.const 16996 + i32.const 17012 i32.const 4096 i32.store - i32.const 17004 + i32.const 17020 i32.const -1 i32.store - i32.const 17008 + i32.const 17024 i32.const -1 i32.store - i32.const 17012 + i32.const 17028 i32.const 0 i32.store - i32.const 16964 + i32.const 16980 i32.const 0 i32.store - i32.const 16992 + i32.const 17008 local.get $14 i32.const -16 i32.and @@ -54761,12 +55063,12 @@ i32.const 0 return end ;; $if_83 - i32.const 16960 + i32.const 16976 i32.load local.tee $0 if $if_84 local.get $4 - i32.const 16952 + i32.const 16968 i32.load local.tee $2 i32.add @@ -54790,7 +55092,7 @@ local.set $10 block $block_13 block $block_14 - i32.const 16964 + i32.const 16980 i32.load i32.const 4 i32.and @@ -54801,12 +55103,12 @@ block $block_15 block $block_16 block $block_17 - i32.const 16544 + i32.const 16560 i32.load local.tee $0 i32.eqz br_if $block_17 - i32.const 16968 + i32.const 16984 local.set $2 loop $loop_5 block $block_18 @@ -54872,12 +55174,12 @@ if $if_89 (result i32) i32.const 0 else - i32.const 16952 + i32.const 16968 i32.load local.tee $5 local.get $4 local.get $0 - i32.const 16996 + i32.const 17012 i32.load local.tee $1 i32.const -1 @@ -54907,7 +55209,7 @@ i32.gt_u i32.and if $if_90 (result i32) - i32.const 16960 + i32.const 16976 i32.load local.tee $7 if $if_91 @@ -54965,7 +55267,7 @@ end ;; $if_94 unreachable end ;; $if_93 - i32.const 17000 + i32.const 17016 i32.load local.tee $2 local.get $6 @@ -55002,8 +55304,8 @@ end ;; $if_95 local.set $1 end ;; $block_15 - i32.const 16964 - i32.const 16964 + i32.const 16980 + i32.const 16980 i32.load i32.const 4 i32.or @@ -55058,28 +55360,28 @@ end ;; $if_96 br $block_13 end ;; $block_14 - i32.const 16952 + i32.const 16968 local.get $1 - i32.const 16952 + i32.const 16968 i32.load i32.add local.tee $2 i32.store local.get $2 - i32.const 16956 + i32.const 16972 i32.load i32.gt_u if $if_97 - i32.const 16956 + i32.const 16972 local.get $2 i32.store end ;; $if_97 - i32.const 16544 + i32.const 16560 i32.load local.tee $6 if $if_98 block $block_19 - i32.const 16968 + i32.const 16984 local.set $2 block $block_20 block $block_21 @@ -55142,17 +55444,17 @@ i32.add local.set $0 local.get $1 - i32.const 16532 + i32.const 16548 i32.load i32.add local.tee $4 local.get $2 i32.sub local.set $1 - i32.const 16544 + i32.const 16560 local.get $0 i32.store - i32.const 16532 + i32.const 16548 local.get $1 i32.store local.get $0 @@ -55165,8 +55467,8 @@ i32.add i32.const 40 i32.store offset=4 - i32.const 16548 - i32.const 17008 + i32.const 16564 + i32.const 17024 i32.load i32.store br $block_19 @@ -55174,12 +55476,12 @@ end ;; $if_99 end ;; $block_20 local.get $0 - i32.const 16536 + i32.const 16552 i32.load local.tee $2 i32.lt_u if $if_101 - i32.const 16536 + i32.const 16552 local.get $0 i32.store local.get $0 @@ -55189,7 +55491,7 @@ local.get $1 i32.add local.set $5 - i32.const 16968 + i32.const 16984 local.set $4 block $block_22 block $block_23 @@ -55273,14 +55575,14 @@ local.get $6 i32.eq if $if_103 - i32.const 16532 + i32.const 16548 local.get $4 - i32.const 16532 + i32.const 16548 i32.load i32.add local.tee $0 i32.store - i32.const 16544 + i32.const 16560 local.get $7 i32.store local.get $7 @@ -55291,18 +55593,18 @@ else block $block_24 local.get $1 - i32.const 16540 + i32.const 16556 i32.load i32.eq if $if_104 - i32.const 16528 + i32.const 16544 local.get $4 - i32.const 16528 + i32.const 16544 i32.load i32.add local.tee $0 i32.store - i32.const 16540 + i32.const 16556 local.get $7 i32.store local.get $7 @@ -55347,7 +55649,7 @@ local.get $5 i32.const 3 i32.shl - i32.const 16560 + i32.const 16576 i32.add local.tee $0 i32.ne @@ -55371,8 +55673,8 @@ local.get $6 i32.eq if $if_109 - i32.const 16520 - i32.const 16520 + i32.const 16536 + i32.const 16536 i32.load i32.const 1 local.get $5 @@ -55537,7 +55839,7 @@ local.tee $0 i32.const 2 i32.shl - i32.const 16824 + i32.const 16840 i32.add local.tee $3 i32.load @@ -55549,8 +55851,8 @@ i32.store local.get $9 br_if $block_30 - i32.const 16524 - i32.const 16524 + i32.const 16540 + i32.const 16540 i32.load i32.const 1 local.get $0 @@ -55562,7 +55864,7 @@ br $block_25 end ;; $block_30 else - i32.const 16536 + i32.const 16552 i32.load local.get $10 i32.gt_u @@ -55588,7 +55890,7 @@ br_if $block_25 end ;; $if_121 end ;; $if_120 - i32.const 16536 + i32.const 16552 i32.load local.tee $3 local.get $9 @@ -55625,7 +55927,7 @@ local.tee $0 i32.eqz br_if $block_25 - i32.const 16536 + i32.const 16552 i32.load local.get $0 i32.gt_u @@ -55682,10 +55984,10 @@ local.get $3 i32.const 3 i32.shl - i32.const 16560 + i32.const 16576 i32.add local.set $0 - i32.const 16520 + i32.const 16536 i32.load local.tee $1 i32.const 1 @@ -55695,7 +55997,7 @@ i32.and if $if_127 block $block_31 - i32.const 16536 + i32.const 16552 i32.load local.get $0 i32.const 8 @@ -55714,7 +56016,7 @@ call $_abort end ;; $block_31 else - i32.const 16520 + i32.const 16536 local.get $1 local.get $3 i32.or @@ -55810,7 +56112,7 @@ local.tee $3 i32.const 2 i32.shl - i32.const 16824 + i32.const 16840 i32.add local.set $0 local.get $7 @@ -55825,7 +56127,7 @@ local.get $1 i32.const 0 i32.store - i32.const 16524 + i32.const 16540 i32.load local.tee $1 i32.const 1 @@ -55835,7 +56137,7 @@ i32.and i32.eqz if $if_131 - i32.const 16524 + i32.const 16540 local.get $1 local.get $4 i32.or @@ -55916,7 +56218,7 @@ unreachable end ;; $if_133 end ;; $loop_9 - i32.const 16536 + i32.const 16552 i32.load local.get $4 i32.gt_u @@ -55939,7 +56241,7 @@ end ;; $if_135 end ;; $block_32 end ;; $if_132 - i32.const 16536 + i32.const 16552 i32.load local.tee $0 local.get $11 @@ -55982,7 +56284,7 @@ return end ;; $if_102 end ;; $block_22 - i32.const 16968 + i32.const 16984 local.set $2 loop $loop_10 block $block_33 @@ -56039,7 +56341,7 @@ i32.const 8 i32.add local.set $4 - i32.const 16544 + i32.const 16560 local.get $0 i32.const 0 local.get $0 @@ -56058,7 +56360,7 @@ i32.add local.tee $10 i32.store - i32.const 16532 + i32.const 16548 local.get $1 i32.const -40 i32.add @@ -56077,8 +56379,8 @@ i32.add i32.const 40 i32.store offset=4 - i32.const 16548 - i32.const 17008 + i32.const 16564 + i32.const 17024 i32.load i32.store local.get $2 @@ -56088,23 +56390,23 @@ i32.const 27 i32.store local.get $4 - i32.const 16968 + i32.const 16984 i64.load align=4 i64.store align=4 local.get $4 - i32.const 16976 + i32.const 16992 i64.load align=4 i64.store offset=8 align=4 - i32.const 16968 + i32.const 16984 local.get $0 i32.store - i32.const 16972 + i32.const 16988 local.get $1 i32.store - i32.const 16980 + i32.const 16996 i32.const 0 i32.store - i32.const 16976 + i32.const 16992 local.get $4 i32.store local.get $2 @@ -56161,10 +56463,10 @@ local.get $1 i32.const 3 i32.shl - i32.const 16560 + i32.const 16576 i32.add local.set $0 - i32.const 16520 + i32.const 16536 i32.load local.tee $2 i32.const 1 @@ -56173,7 +56475,7 @@ local.tee $1 i32.and if $if_141 - i32.const 16536 + i32.const 16552 i32.load local.get $0 i32.const 8 @@ -56191,7 +56493,7 @@ local.set $22 end ;; $if_142 else - i32.const 16520 + i32.const 16536 local.get $1 local.get $2 i32.or @@ -56287,7 +56589,7 @@ local.tee $1 i32.const 2 i32.shl - i32.const 16824 + i32.const 16840 i32.add local.set $0 local.get $6 @@ -56299,7 +56601,7 @@ local.get $9 i32.const 0 i32.store - i32.const 16524 + i32.const 16540 i32.load local.tee $2 i32.const 1 @@ -56309,7 +56611,7 @@ i32.and i32.eqz if $if_145 - i32.const 16524 + i32.const 16540 local.get $2 local.get $5 i32.or @@ -56390,7 +56692,7 @@ unreachable end ;; $if_147 end ;; $loop_12 - i32.const 16536 + i32.const 16552 i32.load local.get $5 i32.gt_u @@ -56413,7 +56715,7 @@ end ;; $if_149 end ;; $block_34 end ;; $if_146 - i32.const 16536 + i32.const 16552 i32.load local.tee $0 local.get $8 @@ -56449,7 +56751,7 @@ end ;; $if_139 end ;; $block_19 else - i32.const 16536 + i32.const 16552 i32.load local.tee $2 i32.eqz @@ -56458,37 +56760,25 @@ i32.lt_u i32.or if $if_151 - i32.const 16536 + i32.const 16552 local.get $0 i32.store end ;; $if_151 - i32.const 16968 + i32.const 16984 local.get $0 i32.store - i32.const 16972 + i32.const 16988 local.get $1 i32.store - i32.const 16980 + i32.const 16996 i32.const 0 i32.store - i32.const 16556 - i32.const 16992 - i32.load - i32.store - i32.const 16552 - i32.const -1 - i32.store i32.const 16572 - i32.const 16560 - i32.store - i32.const 16568 - i32.const 16560 - i32.store - i32.const 16580 - i32.const 16568 + i32.const 17008 + i32.load i32.store - i32.const 16576 i32.const 16568 + i32.const -1 i32.store i32.const 16588 i32.const 16576 @@ -56670,7 +56960,19 @@ i32.const 16816 i32.const 16808 i32.store - i32.const 16544 + i32.const 16828 + i32.const 16816 + i32.store + i32.const 16824 + i32.const 16816 + i32.store + i32.const 16836 + i32.const 16824 + i32.store + i32.const 16832 + i32.const 16824 + i32.store + i32.const 16560 local.get $0 i32.const 0 local.get $0 @@ -56689,7 +56991,7 @@ i32.add local.tee $4 i32.store - i32.const 16532 + i32.const 16548 local.get $1 i32.const -40 i32.add @@ -56708,18 +57010,18 @@ i32.add i32.const 40 i32.store offset=4 - i32.const 16548 - i32.const 17008 + i32.const 16564 + i32.const 17024 i32.load i32.store end ;; $if_98 - i32.const 16532 + i32.const 16548 i32.load local.tee $0 local.get $3 i32.gt_u if $if_152 - i32.const 16532 + i32.const 16548 local.get $0 local.get $3 i32.sub @@ -56736,9 +57038,9 @@ i32.const 0 return end ;; $block_12 - i32.const 16544 + i32.const 16560 local.get $3 - i32.const 16544 + i32.const 16560 i32.load local.tee $0 i32.add @@ -56790,7 +57092,7 @@ i32.const -8 i32.add local.tee $4 - i32.const 16536 + i32.const 16552 i32.load local.tee $12 i32.lt_u @@ -56850,7 +57152,7 @@ i32.add local.set $4 local.get $0 - i32.const 16540 + i32.const 16556 i32.load i32.eq if $if_5 @@ -56872,7 +57174,7 @@ local.set $5 br $block end ;; $if_6 - i32.const 16528 + i32.const 16544 local.get $4 i32.store local.get $3 @@ -56909,7 +57211,7 @@ local.get $2 i32.const 3 i32.shl - i32.const 16560 + i32.const 16576 i32.add local.tee $3 i32.ne @@ -56932,8 +57234,8 @@ local.get $5 i32.eq if $if_11 - i32.const 16520 - i32.const 16520 + i32.const 16536 + i32.const 16536 i32.load i32.const 1 local.get $2 @@ -57106,7 +57408,7 @@ local.tee $2 i32.const 2 i32.shl - i32.const 16824 + i32.const 16840 i32.add local.tee $6 i32.load @@ -57118,8 +57420,8 @@ local.get $8 i32.eqz if $if_24 - i32.const 16524 - i32.const 16524 + i32.const 16540 + i32.const 16540 i32.load i32.const 1 local.get $2 @@ -57136,7 +57438,7 @@ br $block end ;; $if_24 else - i32.const 16536 + i32.const 16552 i32.load local.get $13 i32.gt_u @@ -57169,7 +57471,7 @@ end ;; $if_26 end ;; $if_25 end ;; $if_23 - i32.const 16536 + i32.const 16552 i32.load local.tee $6 local.get $8 @@ -57205,7 +57507,7 @@ i32.load offset=4 local.tee $2 if $if_30 - i32.const 16536 + i32.const 16552 i32.load local.get $2 i32.gt_u @@ -57280,18 +57582,18 @@ local.get $1 else local.get $7 - i32.const 16544 + i32.const 16560 i32.load i32.eq if $if_35 - i32.const 16532 + i32.const 16548 local.get $1 - i32.const 16532 + i32.const 16548 i32.load i32.add local.tee $0 i32.store - i32.const 16544 + i32.const 16560 local.get $3 i32.store local.get $3 @@ -57299,34 +57601,34 @@ i32.const 1 i32.or i32.store offset=4 - i32.const 16540 + i32.const 16556 i32.load local.get $3 i32.ne if $if_36 return end ;; $if_36 - i32.const 16540 + i32.const 16556 i32.const 0 i32.store - i32.const 16528 + i32.const 16544 i32.const 0 i32.store return end ;; $if_35 local.get $7 - i32.const 16540 + i32.const 16556 i32.load i32.eq if $if_37 - i32.const 16528 + i32.const 16544 local.get $1 - i32.const 16528 + i32.const 16544 i32.load i32.add local.tee $0 i32.store - i32.const 16540 + i32.const 16556 local.get $5 i32.store local.get $3 @@ -57365,12 +57667,12 @@ local.get $6 i32.const 3 i32.shl - i32.const 16560 + i32.const 16576 i32.add local.tee $0 i32.ne if $if_39 - i32.const 16536 + i32.const 16552 i32.load local.get $2 i32.gt_u @@ -57389,8 +57691,8 @@ local.get $2 i32.eq if $if_42 - i32.const 16520 - i32.const 16520 + i32.const 16536 + i32.const 16536 i32.load i32.const 1 local.get $6 @@ -57410,7 +57712,7 @@ i32.add local.set $16 else - i32.const 16536 + i32.const 16552 i32.load local.get $1 i32.gt_u @@ -57493,7 +57795,7 @@ br $loop_0 end ;; $block_4 end ;; $loop_0 - i32.const 16536 + i32.const 16552 i32.load local.get $1 i32.gt_u @@ -57508,7 +57810,7 @@ end ;; $if_49 end ;; $block_3 else - i32.const 16536 + i32.const 16552 i32.load local.get $7 i32.load offset=8 @@ -57555,7 +57857,7 @@ local.tee $0 i32.const 2 i32.shl - i32.const 16824 + i32.const 16840 i32.add local.tee $1 i32.load @@ -57567,8 +57869,8 @@ local.get $10 i32.eqz if $if_55 - i32.const 16524 - i32.const 16524 + i32.const 16540 + i32.const 16540 i32.load i32.const 1 local.get $0 @@ -57580,7 +57882,7 @@ br $block_2 end ;; $if_55 else - i32.const 16536 + i32.const 16552 i32.load local.get $8 i32.gt_u @@ -57606,7 +57908,7 @@ br_if $block_2 end ;; $if_56 end ;; $if_54 - i32.const 16536 + i32.const 16552 i32.load local.tee $1 local.get $10 @@ -57642,7 +57944,7 @@ i32.load offset=4 local.tee $0 if $if_60 - i32.const 16536 + i32.const 16552 i32.load local.get $0 i32.gt_u @@ -57671,11 +57973,11 @@ local.get $4 i32.store local.get $3 - i32.const 16540 + i32.const 16556 i32.load i32.eq if $if_62 (result i32) - i32.const 16528 + i32.const 16544 local.get $4 i32.store return @@ -57694,10 +57996,10 @@ local.get $1 i32.const 3 i32.shl - i32.const 16560 + i32.const 16576 i32.add local.set $0 - i32.const 16520 + i32.const 16536 i32.load local.tee $5 i32.const 1 @@ -57706,7 +58008,7 @@ local.tee $1 i32.and if $if_64 - i32.const 16536 + i32.const 16552 i32.load local.get $0 i32.const 8 @@ -57724,7 +58026,7 @@ local.set $17 end ;; $if_65 else - i32.const 16520 + i32.const 16536 local.get $1 local.get $5 i32.or @@ -57820,7 +58122,7 @@ local.tee $1 i32.const 2 i32.shl - i32.const 16824 + i32.const 16840 i32.add local.set $0 local.get $3 @@ -57832,7 +58134,7 @@ local.get $3 i32.const 0 i32.store offset=16 - i32.const 16524 + i32.const 16540 i32.load local.tee $4 i32.const 1 @@ -57904,7 +58206,7 @@ unreachable end ;; $if_70 end ;; $loop_1 - i32.const 16536 + i32.const 16552 i32.load local.get $2 i32.gt_u @@ -57927,7 +58229,7 @@ end ;; $if_72 end ;; $block_6 end ;; $if_69 - i32.const 16536 + i32.const 16552 i32.load local.tee $0 local.get $14 @@ -57962,7 +58264,7 @@ end ;; $if_73 end ;; $block_5 else - i32.const 16524 + i32.const 16540 local.get $2 local.get $4 i32.or @@ -57980,8 +58282,8 @@ local.get $3 i32.store offset=8 end ;; $if_68 - i32.const 16552 - i32.const 16552 + i32.const 16568 + i32.const 16568 i32.load i32.const -1 i32.add @@ -57991,7 +58293,7 @@ if $if_74 return end ;; $if_74 - i32.const 16976 + i32.const 16992 local.set $0 loop $loop_2 local.get $0 @@ -58003,7 +58305,7 @@ local.get $3 br_if $loop_2 end ;; $loop_2 - i32.const 16552 + i32.const 16568 i32.const -1 i32.store ) @@ -58019,7 +58321,7 @@ (func $__ZNKSt3__217bad_function_call4whatEv (type $4) (param $0 i32) (result i32) - i32.const 14363 + i32.const 14379 ) (func $__ZNSt3__212__next_primeEm (type $4) @@ -59640,7 +59942,7 @@ (param $0 i32) (local $1 i32) local.get $0 - call $__ZN7Context17onRequestTrailersEv + call $__ZN7Context17onRequestMetadataEv local.tee $1 if $if call $__ZNKSt3__221__basic_string_commonILb1EE20__throw_length_errorEv @@ -59650,55 +59952,55 @@ (func $__ZNSt3__25mutex6unlockEv (type $0) (param $0 i32) local.get $0 - call $__ZN7Context17onRequestTrailersEv + call $__ZN7Context17onRequestMetadataEv drop ) (func $__ZNSt3__211__call_onceERVmPvPFvS2_E (type $0) (param $0 i32) - i32.const 17016 - call $__ZN7Context17onRequestTrailersEv + i32.const 17032 + call $__ZN7Context17onRequestMetadataEv drop loop $loop - i32.const 16348 + i32.const 16364 i32.load i32.const 1 i32.eq if $if - i32.const 17044 - i32.const 17016 + i32.const 17060 + i32.const 17032 call $_pthread_cond_wait drop br $loop end ;; $if end ;; $loop - i32.const 16348 + i32.const 16364 i32.load if $if_0 - i32.const 17016 - call $__ZN7Context17onRequestTrailersEv + i32.const 17032 + call $__ZN7Context17onRequestMetadataEv drop else - i32.const 16348 + i32.const 16364 i32.const 1 i32.store - i32.const 17016 - call $__ZN7Context17onRequestTrailersEv + i32.const 17032 + call $__ZN7Context17onRequestMetadataEv drop local.get $0 i32.const 158 call_indirect $29 (type $0) - i32.const 17016 - call $__ZN7Context17onRequestTrailersEv + i32.const 17032 + call $__ZN7Context17onRequestMetadataEv drop - i32.const 16348 + i32.const 16364 i32.const -1 i32.store - i32.const 17016 - call $__ZN7Context17onRequestTrailersEv + i32.const 17032 + call $__ZN7Context17onRequestMetadataEv drop - i32.const 17044 - call $__ZN7Context17onRequestTrailersEv + i32.const 17060 + call $__ZN7Context17onRequestMetadataEv drop end ;; $if_0 ) @@ -59795,12 +60097,12 @@ (func $__ZNSt11logic_errorC2EPKc (type $0) (param $0 i32) local.get $0 - i32.const 8436 + i32.const 8452 i32.store local.get $0 i32.const 4 i32.add - i32.const 12413 + i32.const 12429 call $__ZNSt3__218__libcpp_refstringC2EPKc ) @@ -61090,7 +61392,7 @@ local.get $4 i32.const 1 i32.add - i32.const 14432 + i32.const 14448 local.get $5 call $_snprintf local.tee $3 @@ -61230,7 +61532,7 @@ local.get $4 i32.const 1 i32.add - i32.const 14435 + i32.const 14451 local.get $5 call $_snprintf local.tee $3 @@ -61340,9 +61642,9 @@ i64.ne if $if_1 local.get $2 - i32.const 14575 + i32.const 14591 i32.store - i32.const 14525 + i32.const 14541 local.get $2 call $_abort_message end ;; $if_1 @@ -61393,7 +61695,7 @@ call_indirect $29 (type $4) local.set $0 local.get $1 - i32.const 14575 + i32.const 14591 i32.store local.get $1 local.get $2 @@ -61401,23 +61703,23 @@ local.get $1 local.get $0 i32.store offset=8 - i32.const 14439 + i32.const 14455 local.get $1 call $_abort_message else local.get $3 - i32.const 14575 + i32.const 14591 i32.store local.get $3 local.get $2 i32.store offset=4 - i32.const 14484 + i32.const 14500 local.get $3 call $_abort_message end ;; $if_3 end ;; $if_0 end ;; $if - i32.const 14563 + i32.const 14579 local.get $5 call $_abort_message ) @@ -61432,15 +61734,15 @@ i32.const 16 i32.add global.set $36 - i32.const 17092 + i32.const 17108 i32.const 5 call $_pthread_once if $if - i32.const 14714 + i32.const 14730 local.get $0 call $_abort_message else - i32.const 17096 + i32.const 17112 i32.load call $_pthread_getspecific local.set $1 @@ -61465,7 +61767,7 @@ local.get $2 local.get $1 i32.store - i32.const 7700 + i32.const 7716 i32.load local.tee $1 local.get $0 @@ -62348,11 +62650,11 @@ i32.const 16 i32.add global.set $36 - i32.const 17096 + i32.const 17112 i32.const 53 call $_pthread_key_create if $if - i32.const 14763 + i32.const 14779 local.get $0 call $_abort_message else @@ -62372,12 +62674,12 @@ global.set $36 local.get $0 call $_free - i32.const 17096 + i32.const 17112 i32.load i32.const 0 call $_pthread_setspecific if $if - i32.const 14813 + i32.const 14829 local.get $1 call $_abort_message else @@ -62389,7 +62691,7 @@ (func $__ZNSt11logic_errorD2Ev (type $0) (param $0 i32) local.get $0 - i32.const 8436 + i32.const 8452 i32.store local.get $0 i32.const 4 @@ -63223,8 +63525,8 @@ (func $__ZSt15get_new_handlerv (type $12) (result i32) (local $0 i32) - i32.const 17100 - i32.const 17100 + i32.const 17116 + i32.const 17116 i32.load local.tee $0 i32.store @@ -64119,5 +64421,5 @@ call $dynCall_viji ) ;; User section "emscripten_metadata": - ;; "\00\00\00\01\e0\87\01\e3\01" + ;; "\00\00\00\01\f0\87\01\e3\01" ) \ No newline at end of file diff --git a/examples/zipkin-tracing/docker-compose.yml b/examples/zipkin-tracing/docker-compose.yaml similarity index 98% rename from examples/zipkin-tracing/docker-compose.yml rename to examples/zipkin-tracing/docker-compose.yaml index 02673d611ee73..132e752793058 100644 --- a/examples/zipkin-tracing/docker-compose.yml +++ b/examples/zipkin-tracing/docker-compose.yaml @@ -1,4 +1,4 @@ -version: '2' +version: "3.7" services: front-envoy: diff --git a/examples/zipkin-tracing/front-envoy-zipkin.yaml b/examples/zipkin-tracing/front-envoy-zipkin.yaml index e4e5a9cfb7fb4..c5ae662492fad 100644 --- a/examples/zipkin-tracing/front-envoy-zipkin.yaml +++ b/examples/zipkin-tracing/front-envoy-zipkin.yaml @@ -7,7 +7,8 @@ static_resources: filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager generate_request_id: true tracing: operation_name: egress @@ -28,29 +29,40 @@ static_resources: operation: checkAvailability http_filters: - name: envoy.router - config: {} + typed_config: {} clusters: - name: service1 connect_timeout: 0.250s type: strict_dns lb_policy: round_robin http2_protocol_options: {} - hosts: - - socket_address: - address: service1 - port_value: 80 + load_assignment: + cluster_name: service1 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: service1 + port_value: 80 - name: zipkin connect_timeout: 1s type: strict_dns lb_policy: round_robin - hosts: - - socket_address: - address: zipkin - port_value: 9411 + load_assignment: + cluster_name: zipkin + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: zipkin + port_value: 9411 tracing: http: name: envoy.zipkin - config: + typed_config: + "@type": type.googleapis.com/envoy.config.trace.v2.ZipkinConfig collector_cluster: zipkin collector_endpoint: "/api/v1/spans" admin: diff --git a/examples/zipkin-tracing/service1-envoy-zipkin.yaml b/examples/zipkin-tracing/service1-envoy-zipkin.yaml index fbc9503c0a666..a8f7f3672b2bf 100644 --- a/examples/zipkin-tracing/service1-envoy-zipkin.yaml +++ b/examples/zipkin-tracing/service1-envoy-zipkin.yaml @@ -7,7 +7,8 @@ static_resources: filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager tracing: operation_name: ingress codec_type: auto @@ -27,7 +28,7 @@ static_resources: operation: checkAvailability http_filters: - name: envoy.router - config: {} + typed_config: {} - address: socket_address: address: 0.0.0.0 @@ -35,7 +36,8 @@ static_resources: filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager tracing: operation_name: egress codec_type: auto @@ -55,37 +57,53 @@ static_resources: operation: checkStock http_filters: - name: envoy.router - config: {} + typed_config: {} clusters: - name: local_service connect_timeout: 0.250s type: strict_dns lb_policy: round_robin - hosts: - - socket_address: - address: 127.0.0.1 - port_value: 8080 + load_assignment: + cluster_name: local_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8080 - name: service2 connect_timeout: 0.250s type: strict_dns lb_policy: round_robin http2_protocol_options: {} - hosts: - - socket_address: - address: service2 - port_value: 80 + load_assignment: + cluster_name: service2 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: service2 + port_value: 80 - name: zipkin connect_timeout: 1s type: strict_dns lb_policy: round_robin - hosts: - - socket_address: - address: zipkin - port_value: 9411 + load_assignment: + cluster_name: zipkin + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: zipkin + port_value: 9411 tracing: http: name: envoy.zipkin - config: + typed_config: + "@type": type.googleapis.com/envoy.config.trace.v2.ZipkinConfig collector_cluster: zipkin collector_endpoint: "/api/v1/spans" admin: diff --git a/examples/zipkin-tracing/service2-envoy-zipkin.yaml b/examples/zipkin-tracing/service2-envoy-zipkin.yaml index b17b3197014bf..fd4ed0c1a51cb 100644 --- a/examples/zipkin-tracing/service2-envoy-zipkin.yaml +++ b/examples/zipkin-tracing/service2-envoy-zipkin.yaml @@ -7,7 +7,8 @@ static_resources: filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager tracing: operation_name: ingress codec_type: auto @@ -27,28 +28,39 @@ static_resources: operation: checkStock http_filters: - name: envoy.router - config: {} + typed_config: {} clusters: - name: local_service connect_timeout: 0.250s type: strict_dns lb_policy: round_robin - hosts: - - socket_address: - address: 127.0.0.1 - port_value: 8080 + load_assignment: + cluster_name: local_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8080 - name: zipkin connect_timeout: 1s type: strict_dns lb_policy: round_robin - hosts: - - socket_address: - address: zipkin - port_value: 9411 + load_assignment: + cluster_name: zipkin + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: zipkin + port_value: 9411 tracing: http: name: envoy.zipkin - config: + typed_config: + "@type": type.googleapis.com/envoy.config.trace.v2.ZipkinConfig collector_cluster: zipkin collector_endpoint: "/api/v1/spans" admin: diff --git a/include/envoy/access_log/BUILD b/include/envoy/access_log/BUILD index d05c37b156130..da0469451a0b8 100644 --- a/include/envoy/access_log/BUILD +++ b/include/envoy/access_log/BUILD @@ -12,7 +12,6 @@ envoy_cc_library( name = "access_log_interface", hdrs = ["access_log.h"], deps = [ - "//include/envoy/filesystem:filesystem_interface", "//include/envoy/http:header_map_interface", "//include/envoy/stream_info:stream_info_interface", ], diff --git a/include/envoy/access_log/access_log.h b/include/envoy/access_log/access_log.h index 5f04026a2aae0..952265933065d 100644 --- a/include/envoy/access_log/access_log.h +++ b/include/envoy/access_log/access_log.h @@ -4,13 +4,34 @@ #include #include "envoy/common/pure.h" -#include "envoy/filesystem/filesystem.h" #include "envoy/http/header_map.h" #include "envoy/stream_info/stream_info.h" namespace Envoy { namespace AccessLog { +class AccessLogFile { +public: + virtual ~AccessLogFile() {} + + /** + * Write data to the file. + */ + virtual void write(absl::string_view) PURE; + + /** + * Reopen the file. + */ + virtual void reopen() PURE; + + /** + * Synchronously flush all pending data to disk. + */ + virtual void flush() PURE; +}; + +using AccessLogFileSharedPtr = std::shared_ptr; + class AccessLogManager { public: virtual ~AccessLogManager() {} @@ -25,10 +46,10 @@ class AccessLogManager { * @param file_name specifies the file to create/open. * @return the opened file. */ - virtual Filesystem::FileSharedPtr createAccessLog(const std::string& file_name) PURE; + virtual AccessLogFileSharedPtr createAccessLog(const std::string& file_name) PURE; }; -typedef std::unique_ptr AccessLogManagerPtr; +using AccessLogManagerPtr = std::unique_ptr; /** * Interface for access log filters. @@ -41,11 +62,12 @@ class Filter { * Evaluate whether an access log should be written based on request and response data. * @return TRUE if the log should be written. */ - virtual bool evaluate(const StreamInfo::StreamInfo& info, - const Http::HeaderMap& request_headers) PURE; + virtual bool evaluate(const StreamInfo::StreamInfo& info, const Http::HeaderMap& request_headers, + const Http::HeaderMap& response_headers, + const Http::HeaderMap& response_trailers) PURE; }; -typedef std::unique_ptr FilterPtr; +using FilterPtr = std::unique_ptr; /** * Abstract access logger for requests and connections. @@ -67,7 +89,7 @@ class Instance { const StreamInfo::StreamInfo& stream_info) PURE; }; -typedef std::shared_ptr InstanceSharedPtr; +using InstanceSharedPtr = std::shared_ptr; /** * Interface for access log formatter. @@ -91,7 +113,7 @@ class Formatter { const StreamInfo::StreamInfo& stream_info) const PURE; }; -typedef std::unique_ptr FormatterPtr; +using FormatterPtr = std::unique_ptr; /** * Interface for access log provider. @@ -115,7 +137,7 @@ class FormatterProvider { const StreamInfo::StreamInfo& stream_info) const PURE; }; -typedef std::unique_ptr FormatterProviderPtr; +using FormatterProviderPtr = std::unique_ptr; } // namespace AccessLog } // namespace Envoy diff --git a/include/envoy/api/BUILD b/include/envoy/api/BUILD index 9b902d753c035..bade7600a3eba 100644 --- a/include/envoy/api/BUILD +++ b/include/envoy/api/BUILD @@ -18,7 +18,17 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "io_error_interface", + hdrs = ["io_error.h"], +) + envoy_cc_library( name = "os_sys_calls_interface", - hdrs = ["os_sys_calls.h"], + hdrs = [ + "os_sys_calls.h", + "os_sys_calls_common.h", + "os_sys_calls_hot_restart.h", + "os_sys_calls_linux.h", + ], ) diff --git a/include/envoy/api/api.h b/include/envoy/api/api.h index 26724bc11e742..44af42935e7e4 100644 --- a/include/envoy/api/api.h +++ b/include/envoy/api/api.h @@ -5,7 +5,6 @@ #include "envoy/common/time.h" #include "envoy/event/dispatcher.h" -#include "envoy/event/timer.h" #include "envoy/filesystem/filesystem.h" #include "envoy/stats/store.h" #include "envoy/thread/thread.h" @@ -26,6 +25,14 @@ class Api { */ virtual Event::DispatcherPtr allocateDispatcher() PURE; + /** + * Allocate a dispatcher. + * @param watermark_factory the watermark factory, ownership is transferred to the dispatcher. + * @return Event::DispatcherPtr which is owned by the caller. + */ + virtual Event::DispatcherPtr + allocateDispatcher(Buffer::WatermarkFactoryPtr&& watermark_factory) PURE; + /** * @return a reference to the ThreadFactory */ @@ -37,10 +44,9 @@ class Api { virtual Filesystem::Instance& fileSystem() PURE; /** - * @return a reference to the TimeSystem - * TODO(jmarantz): change this to return a TimeSource. + * @return a reference to the TimeSource */ - virtual Event::TimeSystem& timeSystem() PURE; + virtual TimeSource& timeSource() PURE; }; typedef std::unique_ptr ApiPtr; diff --git a/include/envoy/api/io_error.h b/include/envoy/api/io_error.h new file mode 100644 index 0000000000000..fb5dda090efc5 --- /dev/null +++ b/include/envoy/api/io_error.h @@ -0,0 +1,80 @@ +#pragma once + +#include +#include + +#include "envoy/common/platform.h" +#include "envoy/common/pure.h" + +namespace Envoy { +namespace Api { + +/** + * Base class for any I/O error. + */ +class IoError { +public: + enum class IoErrorCode { + // No data available right now, try again later. + Again, + // Not supported. + NoSupport, + // Address family not supported. + AddressFamilyNoSupport, + // During non-blocking connect, the connection cannot be completed immediately. + InProgress, + // Permission denied. + Permission, + // Other error codes cannot be mapped to any one above in getErrorCode(). + UnknownError + }; + virtual ~IoError() {} + + virtual IoErrorCode getErrorCode() const PURE; + virtual std::string getErrorDetails() const PURE; +}; + +using IoErrorDeleterType = void (*)(IoError*); +using IoErrorPtr = std::unique_ptr; + +/** + * Basic type for return result which has a return code and error code defined + * according to different implementations. + * If the call succeeds, ok() should return true and |rc_| is valid. Otherwise |err_| + * can be passed into IoError::getErrorCode() to extract the error. In this + * case, |rc_| is invalid. + */ +template struct IoCallResult { + IoCallResult(ReturnValue rc, IoErrorPtr err) : rc_(rc), err_(std::move(err)) {} + + IoCallResult(IoCallResult&& result) + : rc_(result.rc_), err_(std::move(result.err_)) {} + + virtual ~IoCallResult() {} + + IoCallResult& operator=(IoCallResult&& result) { + rc_ = result.rc_; + err_ = std::move(result.err_); + return *this; + } + + /** + * @return true if the call succeeds. + */ + bool ok() const { return err_ == nullptr; } + + // TODO(danzh): rename it to be more meaningful, i.e. return_value_. + ReturnValue rc_; + IoErrorPtr err_; +}; + +using IoCallBoolResult = IoCallResult; +using IoCallSizeResult = IoCallResult; +using IoCallUint64Result = IoCallResult; + +inline Api::IoCallUint64Result ioCallUint64ResultNoError() { + return IoCallUint64Result(0, IoErrorPtr(nullptr, [](IoError*) {})); +} + +} // namespace Api +} // namespace Envoy diff --git a/include/envoy/api/os_sys_calls.h b/include/envoy/api/os_sys_calls.h index 51f90a1afa6d4..5528d14f5c782 100644 --- a/include/envoy/api/os_sys_calls.h +++ b/include/envoy/api/os_sys_calls.h @@ -1,40 +1,24 @@ #pragma once +#ifndef WIN32 #include #include // for mode_t #include // for sockaddr #include #include // for iovec +#endif + #include #include +#include "envoy/api/os_sys_calls_common.h" #include "envoy/common/pure.h" +#include "envoy/common/platform.h" namespace Envoy { namespace Api { -/** - * SysCallResult holds the rc and errno values resulting from a system call. - */ -template struct SysCallResult { - - /** - * The return code from the system call. - */ - T rc_; - - /** - * The errno value as captured after the system call. - */ - int errno_; -}; - -typedef SysCallResult SysCallIntResult; -typedef SysCallResult SysCallSizeResult; -typedef SysCallResult SysCallPtrResult; -typedef SysCallResult SysCallStringResult; - class OsSysCalls { public: virtual ~OsSysCalls() {} @@ -49,18 +33,6 @@ class OsSysCalls { */ virtual SysCallIntResult ioctl(int sockfd, unsigned long int request, void* argp) PURE; - /** - * Open file by full_path with given flags and mode. - * @return file descriptor. - */ - virtual SysCallIntResult open(const std::string& full_path, int flags, int mode) PURE; - - /** - * Write num_bytes to fd from buffer. - * @return number of bytes written if non negative, otherwise error code. - */ - virtual SysCallSizeResult write(int fd, const void* buffer, size_t num_bytes) PURE; - /** * @see writev (man 2 writev) */ @@ -76,22 +48,17 @@ class OsSysCalls { */ virtual SysCallSizeResult recv(int socket, void* buffer, size_t length, int flags) PURE; + /** + * @see recv (man 2 recvfrom) + */ + virtual SysCallSizeResult recvfrom(int sockfd, void* buffer, size_t length, int flags, + struct sockaddr* addr, socklen_t* addrlen) PURE; /** * Release all resources allocated for fd. * @return zero on success, -1 returned otherwise. */ virtual SysCallIntResult close(int fd) PURE; - /** - * @see shm_open (man 3 shm_open) - */ - virtual SysCallIntResult shmOpen(const char* name, int oflag, mode_t mode) PURE; - - /** - * @see shm_unlink (man 3 shm_unlink) - */ - virtual SysCallIntResult shmUnlink(const char* name) PURE; - /** * @see man 2 ftruncate */ diff --git a/include/envoy/api/os_sys_calls_common.h b/include/envoy/api/os_sys_calls_common.h new file mode 100644 index 0000000000000..3c283e064bbfd --- /dev/null +++ b/include/envoy/api/os_sys_calls_common.h @@ -0,0 +1,31 @@ +#pragma once + +#include +#include + +namespace Envoy { +namespace Api { +/** + * SysCallResult holds the rc and errno values resulting from a system call. + */ +template struct SysCallResult { + + /** + * The return code from the system call. + */ + T rc_; + + /** + * The errno value as captured after the system call. + */ + int errno_; +}; + +typedef SysCallResult SysCallIntResult; +typedef SysCallResult SysCallSizeResult; +typedef SysCallResult SysCallPtrResult; +typedef SysCallResult SysCallStringResult; +typedef SysCallResult SysCallBoolResult; + +} // namespace Api +} // namespace Envoy diff --git a/include/envoy/api/os_sys_calls_hot_restart.h b/include/envoy/api/os_sys_calls_hot_restart.h new file mode 100644 index 0000000000000..7e557cf4d295a --- /dev/null +++ b/include/envoy/api/os_sys_calls_hot_restart.h @@ -0,0 +1,32 @@ +#pragma once + +#ifndef WIN32 +#include // for mode_t + +#endif + +#include "envoy/api/os_sys_calls_common.h" +#include "envoy/common/pure.h" + +namespace Envoy { +namespace Api { + +class HotRestartOsSysCalls { +public: + virtual ~HotRestartOsSysCalls() {} + + /** + * @see shm_open (man 3 shm_open) + */ + virtual SysCallIntResult shmOpen(const char* name, int oflag, mode_t mode) PURE; + + /** + * @see shm_unlink (man 3 shm_unlink) + */ + virtual SysCallIntResult shmUnlink(const char* name) PURE; +}; + +typedef std::unique_ptr HotRestartOsSysCallsPtr; + +} // namespace Api +} // namespace Envoy diff --git a/include/envoy/api/os_sys_calls_linux.h b/include/envoy/api/os_sys_calls_linux.h new file mode 100644 index 0000000000000..cd90daea538df --- /dev/null +++ b/include/envoy/api/os_sys_calls_linux.h @@ -0,0 +1,28 @@ +#pragma once + +#if !defined(__linux__) +#error "Linux platform file is part of non-Linux build." +#endif + +#include + +#include "envoy/api/os_sys_calls_common.h" +#include "envoy/common/pure.h" + +namespace Envoy { +namespace Api { + +class LinuxOsSysCalls { +public: + virtual ~LinuxOsSysCalls() {} + + /** + * @see sched_getaffinity (man 2 sched_getaffinity) + */ + virtual SysCallIntResult sched_getaffinity(pid_t pid, size_t cpusetsize, cpu_set_t* mask) PURE; +}; + +typedef std::unique_ptr LinuxOsSysCallsPtr; + +} // namespace Api +} // namespace Envoy diff --git a/include/envoy/buffer/BUILD b/include/envoy/buffer/BUILD index 3f04200863700..2a24c6fa24099 100644 --- a/include/envoy/buffer/BUILD +++ b/include/envoy/buffer/BUILD @@ -13,6 +13,7 @@ envoy_cc_library( hdrs = ["buffer.h"], deps = [ "//include/envoy/api:os_sys_calls_interface", + "//include/envoy/network:io_handle_interface", "//source/common/common:byte_order_lib", ], ) diff --git a/include/envoy/buffer/buffer.h b/include/envoy/buffer/buffer.h index 1c6b7a11749f0..48f5c1842a0f6 100644 --- a/include/envoy/buffer/buffer.h +++ b/include/envoy/buffer/buffer.h @@ -8,6 +8,7 @@ #include "envoy/api/os_sys_calls.h" #include "envoy/common/exception.h" #include "envoy/common/pure.h" +#include "envoy/network/io_handle.h" #include "common/common/byte_order.h" @@ -22,6 +23,8 @@ namespace Buffer { struct RawSlice { void* mem_ = nullptr; size_t len_ = 0; + + bool operator==(const RawSlice& rhs) const { return mem_ == rhs.mem_ && len_ == rhs.len_; } }; /** @@ -98,8 +101,10 @@ class Instance { virtual void prepend(Instance& data) PURE; /** - * Commit a set of slices originally obtained from reserve(). The number of slices can be - * different from the number obtained from reserve(). The size of each slice can also be altered. + * Commit a set of slices originally obtained from reserve(). The number of slices should match + * the number obtained from reserve(). The size of each slice can also be altered. Commit must + * occur following a reserve() without any mutating operations in between other than to the iovecs + * len_ fields. * @param iovecs supplies the array of slices to commit. * @param num_iovecs supplies the size of the slices array. */ @@ -161,12 +166,12 @@ class Instance { /** * Read from a file descriptor directly into the buffer. - * @param fd supplies the descriptor to read from. + * @param io_handle supplies the io handle to read from. * @param max_length supplies the maximum length to read. - * @return a Api::SysCallIntResult with rc_ = the number of bytes read if successful, or rc_ = -1 - * for failure. If the call is successful, errno_ shouldn't be used. + * @return a IoCallUint64Result with err_ = nullptr and rc_ = the number of bytes + * read if successful, or err_ = some IoError for failure. If call failed, rc_ shouldn't be used. */ - virtual Api::SysCallIntResult read(int fd, uint64_t max_length) PURE; + virtual Api::IoCallUint64Result read(Network::IoHandle& io_handle, uint64_t max_length) PURE; /** * Reserve space in the buffer. @@ -194,11 +199,12 @@ class Instance { /** * Write the buffer out to a file descriptor. - * @param fd supplies the descriptor to write to. - * @return a Api::SysCallIntResult with rc_ = the number of bytes written if successful, or rc_ = - * -1 for failure. If the call is successful, errno_ shouldn't be used. + * @param io_handle supplies the io_handle to write to. + * @return a IoCallUint64Result with err_ = nullptr and rc_ = the number of bytes + * written if successful, or err_ = some IoError for failure. If call failed, rc_ shouldn't be + * used. */ - virtual Api::SysCallIntResult write(int fd) PURE; + virtual Api::IoCallUint64Result write(Network::IoHandle& io_handle) PURE; /** * Copy an integer out of the buffer. diff --git a/include/envoy/common/platform.h b/include/envoy/common/platform.h index e98b9d1365627..12e8f4bca33c2 100644 --- a/include/envoy/common/platform.h +++ b/include/envoy/common/platform.h @@ -2,12 +2,18 @@ // NOLINT(namespace-envoy) #ifdef _MSC_VER -#include +#include #define PACKED_STRUCT(definition, ...) \ __pragma(pack(push, 1)) definition, ##__VA_ARGS__; \ __pragma(pack(pop)) +#ifdef _M_X64 +using ssize_t = int64_t; +#else +#error Envoy is not supported on 32-bit Windows +#endif + #else #define PACKED_STRUCT(definition, ...) definition, ##__VA_ARGS__ __attribute__((packed)) diff --git a/include/envoy/common/token_bucket.h b/include/envoy/common/token_bucket.h index 59a21642213d9..2f04c7c8c461f 100644 --- a/include/envoy/common/token_bucket.h +++ b/include/envoy/common/token_bucket.h @@ -15,19 +15,29 @@ namespace Envoy { */ class TokenBucket { public: - virtual ~TokenBucket() {} + virtual ~TokenBucket() = default; /** - * @param tokens supplies the number of tokens to be consumed. Default is 1. - * @return true if bucket is not empty, otherwise it returns false. + * @param tokens supplies the number of tokens to be consumed. + * @param allow_partial supplies whether the token bucket will allow consumption of less tokens + * than asked for. If allow_partial is true, the bucket contains 3 tokens, + * and the caller asks for 5, the bucket will return 3 tokens and now be + * empty. + * @return the number of tokens actually consumed. */ - virtual bool consume(uint64_t tokens = 1) PURE; + virtual uint64_t consume(uint64_t tokens, bool allow_partial) PURE; /** * @return returns the approximate time until a next token is available. Currently it * returns the upper bound on the amount of time until a next token is available. */ - virtual uint64_t nextTokenAvailableMs() PURE; + virtual std::chrono::milliseconds nextTokenAvailable() PURE; + + /** + * Reset the bucket with a specific number of tokens. Refill will begin again from the time that + * this routine is called. + */ + virtual void reset(uint64_t num_tokens) PURE; }; typedef std::unique_ptr TokenBucketPtr; diff --git a/include/envoy/config/BUILD b/include/envoy/config/BUILD index 5c41821d10c80..777cf7a32fb25 100644 --- a/include/envoy/config/BUILD +++ b/include/envoy/config/BUILD @@ -8,6 +8,27 @@ load( envoy_package() +envoy_cc_library( + name = "config_provider_interface", + hdrs = ["config_provider.h"], + external_deps = ["abseil_optional"], + deps = [ + "//include/envoy/common:time_interface", + "//source/common/common:assert_lib", + "//source/common/protobuf", + ], +) + +envoy_cc_library( + name = "config_provider_manager_interface", + hdrs = ["config_provider_manager.h"], + deps = [ + ":config_provider_interface", + "//include/envoy/server:filter_config_interface", + "//source/common/protobuf", + ], +) + envoy_cc_library( name = "grpc_mux_interface", hdrs = ["grpc_mux.h"], @@ -23,6 +44,7 @@ envoy_cc_library( deps = [ "//include/envoy/stats:stats_macros", "//source/common/protobuf", + "@envoy_api//envoy/api/v2:discovery_cc", ], ) @@ -35,21 +57,10 @@ envoy_cc_library( ) envoy_cc_library( - name = "config_provider_interface", - hdrs = ["config_provider.h"], - external_deps = ["abseil_optional"], + name = "xds_grpc_context_interface", + hdrs = ["xds_grpc_context.h"], deps = [ - "//include/envoy/common:time_interface", - "//source/common/protobuf", - ], -) - -envoy_cc_library( - name = "config_provider_manager_interface", - hdrs = ["config_provider_manager.h"], - deps = [ - ":config_provider_interface", - "//include/envoy/server:filter_config_interface", + ":subscription_interface", "//source/common/protobuf", ], ) diff --git a/include/envoy/config/config_provider.h b/include/envoy/config/config_provider.h index a42f512dea4e9..8a8fbecb2efd1 100644 --- a/include/envoy/config/config_provider.h +++ b/include/envoy/config/config_provider.h @@ -4,6 +4,7 @@ #include "envoy/common/time.h" +#include "common/common/assert.h" #include "common/protobuf/protobuf.h" #include "absl/types/optional.h" @@ -41,6 +42,23 @@ class ConfigProvider { }; using ConfigConstSharedPtr = std::shared_ptr; + /** + * The type of API represented by a ConfigProvider. + */ + enum class ApiType { + /** + * A "Full" API delivers a complete configuration as part of each resource (top level + * config proto); i.e., each resource contains the whole representation of the config intent. An + * example of this type of API is RDS. + */ + Full, + /** + * A "Delta" API delivers a subset of the config intent as part of each resource (top level + * config proto). Examples of this type of API are CDS, LDS and SRDS. + */ + Delta + }; + /** * Stores the config proto as well as the associated version. */ @@ -51,10 +69,26 @@ class ConfigProvider { std::string version_; }; + using ConfigProtoVector = std::vector; + /** + * Stores the config protos associated with a "Delta" API. + */ + template struct ConfigProtoInfoVector { + const std::vector config_protos_; + + // Only populated by dynamic config providers. + std::string version_; + }; + virtual ~ConfigProvider() = default; /** - * Returns a ConfigProtoInfo associated with the provider. + * The type of API. + */ + virtual ApiType apiType() const PURE; + + /** + * Returns a ConfigProtoInfo associated with a ApiType::Full provider. * @return absl::optional> an optional ConfigProtoInfo; the value is set when a * config is available. */ @@ -69,6 +103,27 @@ class ConfigProvider { return ConfigProtoInfo

{*config_proto, getConfigVersion()}; } + /** + * Returns a ConfigProtoInfoVector associated with a ApiType::Delta provider. + * @return absl::optional an optional ConfigProtoInfoVector; the value is + * set when a config is available. + */ + template absl::optional> configProtoInfoVector() const { + static_assert(std::is_base_of::value, + "Proto type must derive from Protobuf::Message"); + + const ConfigProtoVector config_protos = getConfigProtos(); + if (config_protos.empty()) { + return absl::nullopt; + } + std::vector ret_protos; + ret_protos.reserve(config_protos.size()); + for (const auto* elem : config_protos) { + ret_protos.push_back(static_cast(elem)); + } + return ConfigProtoInfoVector

{ret_protos, getConfigVersion()}; + } + /** * Returns the Config corresponding to the provider. * @return std::shared_ptr a shared pointer to the Config. @@ -92,13 +147,20 @@ class ConfigProvider { * @return Protobuf::Message* the config proto corresponding to the Config instantiated by the * provider. */ - virtual const Protobuf::Message* getConfigProto() const PURE; + virtual const Protobuf::Message* getConfigProto() const { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } + + /** + * Returns the config protos associated with the provider. + * @return const ConfigProtoVector the config protos corresponding to the Config instantiated by + * the provider. + */ + virtual ConfigProtoVector getConfigProtos() const { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } /** * Returns the config version associated with the provider. * @return std::string the config version. */ - virtual std::string getConfigVersion() const PURE; + virtual std::string getConfigVersion() const { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } /** * Returns the config implementation associated with the provider. diff --git a/include/envoy/config/config_provider_manager.h b/include/envoy/config/config_provider_manager.h index 3c3eddba9c2f0..81beb1ecb5fbb 100644 --- a/include/envoy/config/config_provider_manager.h +++ b/include/envoy/config/config_provider_manager.h @@ -25,6 +25,17 @@ namespace Config { */ class ConfigProviderManager { public: + class OptionalArg { + public: + virtual ~OptionalArg() = default; + }; + + class NullOptionalArg : public OptionalArg { + public: + NullOptionalArg() = default; + ~NullOptionalArg() override = default; + }; + virtual ~ConfigProviderManager() = default; /** @@ -34,6 +45,7 @@ class ConfigProviderManager { * @param config_source_proto supplies the proto containing the xDS API configuration. * @param factory_context is the context to use for the provider. * @param stat_prefix supplies the prefix to use for statistics. + * @param optarg supplies an optional argument with data specific to the concrete class. * @return ConfigProviderPtr a newly allocated dynamic config provider which shares underlying * data structures with other dynamic providers configured with the same * API source. @@ -41,17 +53,42 @@ class ConfigProviderManager { virtual ConfigProviderPtr createXdsConfigProvider(const Protobuf::Message& config_source_proto, Server::Configuration::FactoryContext& factory_context, - const std::string& stat_prefix) PURE; + const std::string& stat_prefix, const OptionalArg& optarg) PURE; /** * Returns a ConfigProvider associated with a statically specified configuration. * @param config_proto supplies the configuration proto. * @param factory_context is the context to use for the provider. + * @param optarg supplies an optional argument with data specific to the concrete class. * @return ConfigProviderPtr a newly allocated static config provider. */ virtual ConfigProviderPtr createStaticConfigProvider(const Protobuf::Message& config_proto, - Server::Configuration::FactoryContext& factory_context) PURE; + Server::Configuration::FactoryContext& factory_context, + const OptionalArg& optarg) { + UNREFERENCED_PARAMETER(config_proto); + UNREFERENCED_PARAMETER(factory_context); + UNREFERENCED_PARAMETER(optarg); + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } + + /** + * Returns a ConfigProvider associated with a statically specified configuration. This is intended + * to be used when a set of configuration protos is required to build the full configuration. + * @param config_protos supplies a vector of configuration protos. + * @param factory_context is the context to use for the provider. + * @param optarg supplies an optional argument with data specific to the concrete class. + * @return ConfigProviderPtr a newly allocated static config provider. + */ + virtual ConfigProviderPtr + createStaticConfigProvider(std::vector>&& config_protos, + Server::Configuration::FactoryContext& factory_context, + const OptionalArg& optarg) { + UNREFERENCED_PARAMETER(config_protos); + UNREFERENCED_PARAMETER(factory_context); + UNREFERENCED_PARAMETER(optarg); + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } }; } // namespace Config diff --git a/include/envoy/config/grpc_mux.h b/include/envoy/config/grpc_mux.h index 6872a62d875b6..fb66a78abdcbb 100644 --- a/include/envoy/config/grpc_mux.h +++ b/include/envoy/config/grpc_mux.h @@ -82,7 +82,7 @@ class GrpcMux { * Start a configuration subscription asynchronously for some API type and resources. * @param type_url type URL corresponding to xDS API, e.g. * type.googleapis.com/envoy.api.v2.Cluster. - * @param resources vector of resource names to watch for. If this is empty, then all + * @param resources set of resource names to watch for. If this is empty, then all * resources for type_url will result in callbacks. * @param callbacks the callbacks to be notified of configuration updates. These must be valid * until GrpcMuxWatch is destroyed. @@ -90,7 +90,7 @@ class GrpcMux { * away, its EDS updates should be cancelled by destroying the GrpcMuxWatchPtr. */ virtual GrpcMuxWatchPtr subscribe(const std::string& type_url, - const std::vector& resources, + const std::set& resources, GrpcMuxCallbacks& callbacks) PURE; /** diff --git a/include/envoy/config/subscription.h b/include/envoy/config/subscription.h index 6d103c9c01e1d..badd3bad890d3 100644 --- a/include/envoy/config/subscription.h +++ b/include/envoy/config/subscription.h @@ -3,6 +3,7 @@ #include #include +#include "envoy/api/v2/discovery.pb.h" #include "envoy/common/exception.h" #include "envoy/common/pure.h" #include "envoy/stats/stats_macros.h" @@ -12,11 +13,9 @@ namespace Envoy { namespace Config { -template class SubscriptionCallbacks { +class SubscriptionCallbacks { public: - typedef Protobuf::RepeatedPtrField ResourceVector; - - virtual ~SubscriptionCallbacks() {} + virtual ~SubscriptionCallbacks() = default; /** * Called when a configuration update is received. @@ -26,9 +25,25 @@ template class SubscriptionCallbacks { * is accepted. Accepted configurations have their version_info reflected in subsequent * requests. */ - virtual void onConfigUpdate(const ResourceVector& resources, + virtual void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, const std::string& version_info) PURE; + // TODO(fredlas) it is a HACK that there are two of these. After delta CDS is merged, + // I intend to reimplement all state-of-the-world xDSes' use of onConfigUpdate + // in terms of this delta-style one (and remove the original). + /** + * Called when a delta configuration update is received. + * @param added_resources resources newly added since the previous fetch. + * @param removed_resources names of resources that this fetch instructed to be removed. + * @param system_version_info aggregate response data "version", for debugging. + * @throw EnvoyException with reason if the config changes are rejected. Otherwise the changes + * are accepted. Accepted changes have their version_info reflected in subsequent requests. + */ + virtual void + onConfigUpdate(const Protobuf::RepeatedPtrField& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& system_version_info) PURE; + /** * Called when either the Subscription is unable to fetch a config update or when onConfigUpdate * invokes an exception. @@ -45,28 +60,27 @@ template class SubscriptionCallbacks { /** * Common abstraction for subscribing to versioned config updates. This may be implemented via bidi - * gRPC streams, periodic/long polling REST or inotify filesystem updates. ResourceType is expected - * to be a protobuf serializable object. + * gRPC streams, periodic/long polling REST or inotify filesystem updates. */ -template class Subscription { +class Subscription { public: - virtual ~Subscription() {} + virtual ~Subscription() = default; /** * Start a configuration subscription asynchronously. This should be called once and will continue * to fetch throughout the lifetime of the Subscription object. - * @param resources vector of resource names to fetch. + * @param resources set of resource names to fetch. * @param callbacks the callbacks to be notified of configuration updates. The callback must not * result in the deletion of the Subscription object. */ - virtual void start(const std::vector& resources, - SubscriptionCallbacks& callbacks) PURE; + virtual void start(const std::set& resources, SubscriptionCallbacks& callbacks) PURE; /** * Update the resources to fetch. - * @param resources vector of resource names to fetch. + * @param resources vector of resource names to fetch. It's a (not unordered_)set so that it can + * be passed to std::set_difference, which must be given sorted collections. */ - virtual void updateResources(const std::vector& resources) PURE; + virtual void updateResources(const std::set& update_to_these_names) PURE; }; /** diff --git a/include/envoy/config/xds_grpc_context.h b/include/envoy/config/xds_grpc_context.h new file mode 100644 index 0000000000000..aba3a824a67ce --- /dev/null +++ b/include/envoy/config/xds_grpc_context.h @@ -0,0 +1,42 @@ +#pragma once + +#include "envoy/common/pure.h" +#include "envoy/config/subscription.h" + +#include "common/protobuf/protobuf.h" + +namespace Envoy { +namespace Config { + +/** + * A grouping of callbacks that an XdsGrpcContext should provide to its GrpcStream. + */ +template class GrpcStreamCallbacks { +public: + virtual ~GrpcStreamCallbacks() {} + + /** + * For the GrpcStream to prompt the context to take appropriate action in response to the + * gRPC stream having been successfully established. + */ + virtual void onStreamEstablished() PURE; + + /** + * For the GrpcStream to prompt the context to take appropriate action in response to + * failure to establish the gRPC stream. + */ + virtual void onEstablishmentFailure() PURE; + + /** + * For the GrpcStream to pass received protos to the context. + */ + virtual void onDiscoveryResponse(std::unique_ptr&& message) PURE; + + /** + * For the GrpcStream to call when its rate limiting logic allows more requests to be sent. + */ + virtual void onWriteable() PURE; +}; + +} // namespace Config +} // namespace Envoy diff --git a/include/envoy/event/BUILD b/include/envoy/event/BUILD index 75352fad95c4c..962e290e38086 100644 --- a/include/envoy/event/BUILD +++ b/include/envoy/event/BUILD @@ -48,6 +48,5 @@ envoy_cc_library( hdrs = ["timer.h"], deps = [ "//include/envoy/common:time_interface", - "//source/common/event:libevent_lib", ], ) diff --git a/include/envoy/event/dispatcher.h b/include/envoy/event/dispatcher.h index 43916a8762cc2..0024b3a2e7795 100644 --- a/include/envoy/event/dispatcher.h +++ b/include/envoy/event/dispatcher.h @@ -17,11 +17,29 @@ #include "envoy/network/listen_socket.h" #include "envoy/network/listener.h" #include "envoy/network/transport_socket.h" +#include "envoy/stats/scope.h" +#include "envoy/stats/stats_macros.h" #include "envoy/thread/thread.h" namespace Envoy { namespace Event { +/** + * All dispatcher stats. @see stats_macros.h + */ +// clang-format off +#define ALL_DISPATCHER_STATS(HISTOGRAM) \ + HISTOGRAM(loop_duration_us) \ + HISTOGRAM(poll_delay_us) +// clang-format on + +/** + * Struct definition for all dispatcher stats. @see stats_macros.h + */ +struct DispatcherStats { + ALL_DISPATCHER_STATS(GENERATE_HISTOGRAM_STRUCT) +}; + /** * Callback invoked when a dispatcher post() runs. */ @@ -37,7 +55,16 @@ class Dispatcher { /** * Returns a time-source to use with this dispatcher. */ - virtual TimeSystem& timeSystem() PURE; + virtual TimeSource& timeSource() PURE; + + /** + * Initialize stats for this dispatcher. Note that this can't generally be done at construction + * time, since the main and worker thread dispatchers are constructed before + * ThreadLocalStoreImpl::initializeThreading. + * @param scope the scope to contain the new per-dispatcher stats created here. + * @param prefix the stats prefix to identify this dispatcher. + */ + virtual void initializeStats(Stats::Scope& scope, const std::string& prefix) PURE; /** * Clear any items in the deferred deletion queue. @@ -157,7 +184,14 @@ class Dispatcher { * called) or non-blocking mode where only active events will be executed and then * run() will return. */ - enum class RunType { Block, NonBlock }; + enum class RunType { + Block, // Executes any events that have been activated, then exit. + NonBlock, // Waits for any pending events to activate, executes them, + // then exits. Exits immediately if there are no pending or + // active events. + RunUntilExit // Runs the event-loop until loopExit() is called, blocking + // until there are pending or active events. + }; virtual void run(RunType type) PURE; /** diff --git a/include/envoy/event/timer.h b/include/envoy/event/timer.h index d6d90d1da39d1..f26d58c22f2c8 100644 --- a/include/envoy/event/timer.h +++ b/include/envoy/event/timer.h @@ -7,8 +7,6 @@ #include "envoy/common/pure.h" #include "envoy/common/time.h" -#include "common/event/libevent.h" - namespace Envoy { namespace Event { @@ -33,6 +31,11 @@ class Timer { * Enable a pending timeout. If a timeout is already pending, it will be reset to the new timeout. */ virtual void enableTimer(const std::chrono::milliseconds& d) PURE; + + /** + * Return whether the timer is currently armed. + */ + virtual bool enabled() PURE; }; typedef std::unique_ptr TimerPtr; @@ -63,7 +66,7 @@ class TimeSystem : public TimeSource { * Creates a timer factory. This indirection enables thread-local timer-queue management, * so servers can have a separate timer-factory in each thread. */ - virtual SchedulerPtr createScheduler(Libevent::BasePtr&) PURE; + virtual SchedulerPtr createScheduler(Scheduler& base_scheduler) PURE; }; } // namespace Event diff --git a/include/envoy/filesystem/BUILD b/include/envoy/filesystem/BUILD index 022c770c1c4ae..e740d2ee25e40 100644 --- a/include/envoy/filesystem/BUILD +++ b/include/envoy/filesystem/BUILD @@ -12,6 +12,8 @@ envoy_cc_library( name = "filesystem_interface", hdrs = ["filesystem.h"], deps = [ + "//include/envoy/api:io_error_interface", + "//include/envoy/api:os_sys_calls_interface", "//include/envoy/event:dispatcher_interface", ], ) diff --git a/include/envoy/filesystem/filesystem.h b/include/envoy/filesystem/filesystem.h index 63915bee12f28..a866cff3afb79 100644 --- a/include/envoy/filesystem/filesystem.h +++ b/include/envoy/filesystem/filesystem.h @@ -4,9 +4,9 @@ #include #include +#include "envoy/api/io_error.h" +#include "envoy/common/platform.h" #include "envoy/common/pure.h" -#include "envoy/event/dispatcher.h" -#include "envoy/thread/thread.h" #include "absl/strings/string_view.h" @@ -14,58 +14,59 @@ namespace Envoy { namespace Filesystem { /** - * Abstraction for a file on disk. + * Abstraction for a basic file on disk. */ class File { public: virtual ~File() {} /** - * Write data to the file. + * Open the file with O_RDWR | O_APPEND | O_CREAT + * The file will be closed when this object is destructed + * + * @return bool whether the open succeeded + */ + virtual Api::IoCallBoolResult open() PURE; + + /** + * Write the buffer to the file. The file must be explicitly opened before writing. + * + * @return ssize_t number of bytes written, or -1 for failure */ - virtual void write(absl::string_view) PURE; + virtual Api::IoCallSizeResult write(absl::string_view buffer) PURE; /** - * Reopen the file. + * Close the file. + * + * @return bool whether the close succeeded */ - virtual void reopen() PURE; + virtual Api::IoCallBoolResult close() PURE; /** - * Synchronously flush all pending data to disk. + * @return bool is the file open */ - virtual void flush() PURE; + virtual bool isOpen() const PURE; + + /** + * @return string the file path + */ + virtual std::string path() const PURE; }; -typedef std::shared_ptr FileSharedPtr; +using FilePtr = std::unique_ptr; /** - * Captures state, properties, and stats of a file-system. + * Abstraction for some basic filesystem operations */ class Instance { public: virtual ~Instance() {} /** - * Creates a file, overriding the flush-interval set in the class. - * - * @param path The path of the file to open. - * @param dispatcher The dispatcher used for set up timers to run flush(). - * @param lock The lock. - * @param file_flush_interval_msec Number of milliseconds to delay before flushing. + * @param path The path of the File + * @return a FilePtr. The file is not opened. */ - virtual FileSharedPtr createFile(const std::string& path, Event::Dispatcher& dispatcher, - Thread::BasicLockable& lock, - std::chrono::milliseconds file_flush_interval_msec) PURE; - - /** - * Creates a file, using the default flush-interval for the class. - * - * @param path The path of the file to open. - * @param dispatcher The dispatcher used for set up timers to run flush(). - * @param lock The lock. - */ - virtual FileSharedPtr createFile(const std::string& path, Event::Dispatcher& dispatcher, - Thread::BasicLockable& lock) PURE; + virtual FilePtr createFile(const std::string& path) PURE; /** * @return bool whether a file exists on disk and can be opened for read. @@ -91,12 +92,6 @@ class Instance { */ virtual std::string fileReadToEnd(const std::string& path) PURE; - /** - * @param path some filesystem path. - * @return SysCallStringResult containing the canonical path (see realpath(3)). - */ - virtual Api::SysCallStringResult canonicalPath(const std::string& path) PURE; - /** * Determine if the path is on a list of paths Envoy will refuse to access. This * is a basic sanity check for users, blacklisting some clearly bad paths. Paths @@ -109,8 +104,6 @@ class Instance { virtual bool illegalPath(const std::string& path) PURE; }; -typedef std::unique_ptr WatcherPtr; - enum class FileType { Regular, Directory, Other }; struct DirectoryEntry { diff --git a/include/envoy/filesystem/watcher.h b/include/envoy/filesystem/watcher.h index 25359be51c693..d50264254e4a6 100644 --- a/include/envoy/filesystem/watcher.h +++ b/include/envoy/filesystem/watcher.h @@ -20,6 +20,7 @@ class Watcher { struct Events { static const uint32_t MovedTo = 0x1; + static const uint32_t Modified = 0x2; }; virtual ~Watcher() {} @@ -36,4 +37,4 @@ class Watcher { using WatcherPtr = std::unique_ptr; } // namespace Filesystem -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/include/envoy/grpc/BUILD b/include/envoy/grpc/BUILD index 521179147796d..37a16ae1eea50 100644 --- a/include/envoy/grpc/BUILD +++ b/include/envoy/grpc/BUILD @@ -14,6 +14,7 @@ envoy_cc_library( external_deps = ["abseil_optional"], deps = [ ":status", + "//include/envoy/buffer:buffer_interface", "//include/envoy/http:header_map_interface", "//include/envoy/tracing:http_tracer_interface", "//source/common/protobuf", diff --git a/include/envoy/grpc/status.h b/include/envoy/grpc/status.h index 24c200eb438a5..dbc7c0a016f1c 100644 --- a/include/envoy/grpc/status.h +++ b/include/envoy/grpc/status.h @@ -5,6 +5,8 @@ namespace Grpc { class Status { public: + // If this enum is changed, then the std::unordered_map in Envoy::Grpc::Utility::nameToGrpcStatus + // located at: //source/common/access_log/grpc/status.cc must also be changed. enum GrpcStatus { // The RPC completed successfully. Ok = 0, diff --git a/include/envoy/http/BUILD b/include/envoy/http/BUILD index bdd112be4e1ed..4f0355015ce3c 100644 --- a/include/envoy/http/BUILD +++ b/include/envoy/http/BUILD @@ -73,7 +73,10 @@ envoy_cc_library( envoy_cc_library( name = "header_map_interface", hdrs = ["header_map.h"], - deps = ["//source/common/common:hash_lib"], + deps = [ + "//source/common/common:assert_lib", + "//source/common/common:hash_lib", + ], ) envoy_cc_library( diff --git a/include/envoy/http/codec.h b/include/envoy/http/codec.h index 568a735339e7f..4bcceeaca978f 100644 --- a/include/envoy/http/codec.h +++ b/include/envoy/http/codec.h @@ -134,8 +134,10 @@ class StreamCallbacks { /** * Fires when a stream has been remote reset. * @param reason supplies the reset reason. + * @param transport_failure_reason supplies underlying transport failure reason. */ - virtual void onResetStream(StreamResetReason reason) PURE; + virtual void onResetStream(StreamResetReason reason, + absl::string_view transport_failure_reason) PURE; /** * Fires when a stream, or the connection the stream is sending to, goes over its high watermark. @@ -179,6 +181,12 @@ class Stream { * Cessation of data may not be immediate. For example, for HTTP/2 this may stop further flow * control window updates which will result in the peer eventually stopping sending data. * @param disable informs if reads should be disabled (true) or re-enabled (false). + * + * Note that this function reference counts calls. For example + * readDisable(true); // Disables data + * readDisable(true); // Notes the stream is blocked by two sources + * readDisable(false); // Notes the stream is blocked by one source + * readDisable(false); // Marks the stream as unblocked, so resumes reading. */ virtual void readDisable(bool disable) PURE; @@ -322,13 +330,19 @@ class DownstreamWatermarkCallbacks { virtual ~DownstreamWatermarkCallbacks() {} /** - * Called when the downstream connection or stream goes over its high watermark. + * Called when the downstream connection or stream goes over its high watermark. Note that this + * may be called separately for both the stream going over and the connection going over. It + * is the responsibility of the DownstreamWatermarkCallbacks implementation to handle unwinding + * multiple high and low watermark calls. */ virtual void onAboveWriteBufferHighWatermark() PURE; /** * Called when the downstream connection or stream goes from over its high watermark to under its - * low watermark. + * low watermark. As with onAboveWriteBufferHighWatermark above, this may be called independently + * when both the stream and the connection go under the low watermark limit, and the callee must + * ensure that the flow of data does not resume until all callers which were above their high + * watermarks have gone below. */ virtual void onBelowWriteBufferLowWatermark() PURE; }; diff --git a/include/envoy/http/conn_pool.h b/include/envoy/http/conn_pool.h index bd5cb462e625f..41017293ad718 100644 --- a/include/envoy/http/conn_pool.h +++ b/include/envoy/http/conn_pool.h @@ -46,10 +46,11 @@ class Callbacks { /** * Called when a pool error occurred and no connection could be acquired for making the request. * @param reason supplies the failure reason. + * @param transport_failure_reason supplies the details of the transport failure reason. * @param host supplies the description of the host that caused the failure. This may be nullptr * if no host was involved in the failure (for example overflow). */ - virtual void onPoolFailure(PoolFailureReason reason, + virtual void onPoolFailure(PoolFailureReason reason, absl::string_view transport_failure_reason, Upstream::HostDescriptionConstSharedPtr host) PURE; /** @@ -95,6 +96,12 @@ class Instance : public Event::DeferredDeletable { */ virtual void drainConnections() PURE; + /** + * Determines whether the connection pool is actively processing any requests. + * @return true if the connection pool has any pending requests or any active requests. + */ + virtual bool hasActiveConnections() const PURE; + /** * Create a new stream on the pool. * @param response_decoder supplies the decoder events to fire when the response is diff --git a/include/envoy/http/filter.h b/include/envoy/http/filter.h index 01ea75b131d15..4ffa535542006 100644 --- a/include/envoy/http/filter.h +++ b/include/envoy/http/filter.h @@ -33,7 +33,30 @@ enum class FilterHeadersStatus { StopIteration, // Continue iteration to remaining filters, but ignore any subsequent data or trailers. This // results in creating a header only request/response. - ContinueAndEndStream + ContinueAndEndStream, + // Do not iterate for headers as well as data and trailers for the current filter and the filters + // following, and buffer body data for later dispatching. ContinueDecoding() MUST + // be called if continued filter iteration is desired. + // + // Used when a filter wants to stop iteration on data and trailers while waiting for headers' + // iteration to resume. + // + // If buffering the request causes buffered data to exceed the configured buffer limit, a 413 will + // be sent to the user. On the response path exceeding buffer limits will result in a 500. + // + // TODO(soya3129): stop metadata parsing when StopAllIterationAndBuffer is set. + StopAllIterationAndBuffer, + // Do not iterate for headers as well as data and trailers for the current filter and the filters + // following, and buffer body data for later dispatching. continueDecoding() MUST + // be called if continued filter iteration is desired. + // + // Used when a filter wants to stop iteration on data and trailers while waiting for headers' + // iteration to resume. + // + // This will cause the flow of incoming data to cease until continueDecoding() function is called. + // + // TODO(soya3129): stop metadata parsing when StopAllIterationAndWatermark is set. + StopAllIterationAndWatermark, }; /** @@ -189,6 +212,12 @@ class StreamDecoderFilterCallbacks : public virtual StreamFilterCallbacks { */ virtual const Buffer::Instance* decodingBuffer() PURE; + /** + * Allows modifying the decoding buffer. May only be called before any data has been continued + * past the calling filter. + */ + virtual void modifyDecodingBuffer(std::function callback) PURE; + /** * Add buffered body data. This method is used in advanced cases where returning * StopIterationAndBuffer from decodeData() is not sufficient. @@ -211,11 +240,38 @@ class StreamDecoderFilterCallbacks : public virtual StreamFilterCallbacks { * * It is an error to call this method in any other case. * + * See also injectDecodedDataToFilterChain() for a different way of passing data to further + * filters and also how the two methods are different. + * * @param data Buffer::Instance supplies the data to be decoded. * @param streaming_filter boolean supplies if this filter streams data or buffers the full body. */ virtual void addDecodedData(Buffer::Instance& data, bool streaming_filter) PURE; + /** + * Decode data directly to subsequent filters in the filter chain. This method is used in + * advanced cases in which a filter needs full control over how subsequent filters view data, + * and does not want to make use of HTTP connection manager buffering. Using this method allows + * a filter to buffer data (or not) and then periodically inject data to subsequent filters, + * indicating end_stream at an appropriate time. This can be used to implement rate limiting, + * periodic data emission, etc. + * + * This method should only be called outside of callback context. I.e., do not call this method + * from within a filter's decodeData() call. + * + * When using this callback, filters should generally only return + * FilterDataStatus::StopIterationNoBuffer from their decodeData() call, since use of this method + * indicates that a filter does not wish to participate in standard HTTP connection manager + * buffering and continuation and will perform any necessary buffering and continuation on its + * own. + * + * This callback is different from addDecodedData() in that the specified data and end_stream + * status will be propagated directly to further filters in the filter chain. This is different + * from addDecodedData() where data is added to the HTTP connection manager's buffered data with + * the assumption that standard HTTP connection manager buffering and continuation are being used. + */ + virtual void injectDecodedDataToFilterChain(Buffer::Instance& data, bool end_stream) PURE; + /** * Adds decoded trailers. May only be called in decodeData when end_stream is set to true. * If called in any other context, an assertion will be triggered. @@ -227,6 +283,18 @@ class StreamDecoderFilterCallbacks : public virtual StreamFilterCallbacks { */ virtual HeaderMap& addDecodedTrailers() PURE; + /** + * A wrapper for legacy sendLocalReply replies without the details parameter. + * See sendLocalReply below for usage + */ + // TODO(alyssawilk) send an email to envoy-dev for API change, add for all other filters, and + // delete this placeholder. + void sendLocalReply(Code response_code, absl::string_view body_text, + std::function modify_headers, + const absl::optional grpc_status) { + sendLocalReply(response_code, body_text, modify_headers, grpc_status, ""); + } + /** * Create a locally generated response using the provided response_code and body_text parameters. * If the request was a gRPC request the local reply will be encoded as a gRPC response with a 200 @@ -239,10 +307,12 @@ class StreamDecoderFilterCallbacks : public virtual StreamFilterCallbacks { * @param modify_headers supplies an optional callback function that can modify the * response headers. * @param grpc_status the gRPC status code to override the httpToGrpcStatus mapping with. + * @param details a string detailing why this local reply was sent. */ virtual void sendLocalReply(Code response_code, absl::string_view body_text, std::function modify_headers, - const absl::optional grpc_status) PURE; + const absl::optional grpc_status, + absl::string_view details) PURE; /** * Called with 100-Continue headers to be encoded. @@ -348,6 +418,20 @@ class StreamDecoderFilterCallbacks : public virtual StreamFilterCallbacks { // Note that HttpConnectionManager sanitization will *not* be performed on the // recreated stream, as it is assumed that sanitization has already been done. virtual bool recreateStream() PURE; + + /** + * Adds socket options to be applied to any connections used for upstream requests. Note that + * unique values for the options will likely lead to many connection pools being created. The + * added options are appended to any previously added. + * + * @param options The options to be added. + */ + virtual void addUpstreamSocketOptions(const Network::Socket::OptionsSharedPtr& options) PURE; + + /** + * @return The socket options to be applied to the upstream request. + */ + virtual Network::Socket::OptionsSharedPtr getUpstreamSocketOptions() const PURE; }; /** @@ -401,6 +485,11 @@ class StreamDecoderFilter : public StreamFilterBase { * filter should use. Callbacks will not be invoked by the filter after onDestroy() is called. */ virtual void setDecoderFilterCallbacks(StreamDecoderFilterCallbacks& callbacks) PURE; + + /** + * Called at the end of the stream, when all data has been decoded. + */ + virtual void decodeComplete() {} }; typedef std::shared_ptr StreamDecoderFilterSharedPtr; @@ -430,6 +519,12 @@ class StreamEncoderFilterCallbacks : public virtual StreamFilterCallbacks { */ virtual const Buffer::Instance* encodingBuffer() PURE; + /** + * Allows modifying the encoding buffer. May only be called before any data has been continued + * past the calling filter. + */ + virtual void modifyEncodingBuffer(std::function callback) PURE; + /** * Add buffered body data. This method is used in advanced cases where returning * StopIterationAndBuffer from encodeData() is not sufficient. @@ -452,11 +547,38 @@ class StreamEncoderFilterCallbacks : public virtual StreamFilterCallbacks { * * It is an error to call this method in any other case. * + * See also injectEncodedDataToFilterChain() for a different way of passing data to further + * filters and also how the two methods are different. + * * @param data Buffer::Instance supplies the data to be encoded. * @param streaming_filter boolean supplies if this filter streams data or buffers the full body. */ virtual void addEncodedData(Buffer::Instance& data, bool streaming_filter) PURE; + /** + * Encode data directly to subsequent filters in the filter chain. This method is used in + * advanced cases in which a filter needs full control over how subsequent filters view data, + * and does not want to make use of HTTP connection manager buffering. Using this method allows + * a filter to buffer data (or not) and then periodically inject data to subsequent filters, + * indicating end_stream at an appropriate time. This can be used to implement rate limiting, + * periodic data emission, etc. + * + * This method should only be called outside of callback context. I.e., do not call this method + * from within a filter's encodeData() call. + * + * When using this callback, filters should generally only return + * FilterDataStatus::StopIterationNoBuffer from their encodeData() call, since use of this method + * indicates that a filter does not wish to participate in standard HTTP connection manager + * buffering and continuation and will perform any necessary buffering and continuation on its + * own. + * + * This callback is different from addEncodedData() in that the specified data and end_stream + * status will be propagated directly to further filters in the filter chain. This is different + * from addEncodedData() where data is added to the HTTP connection manager's buffered data with + * the assumption that standard HTTP connection manager buffering and continuation are being used. + */ + virtual void injectEncodedDataToFilterChain(Buffer::Instance& data, bool end_stream) PURE; + /** * Adds encoded trailers. May only be called in encodeData when end_stream is set to true. * If called in any other context, an assertion will be triggered. @@ -500,7 +622,7 @@ class StreamEncoderFilterCallbacks : public virtual StreamFilterCallbacks { */ class StreamEncoderFilter : public StreamFilterBase { public: - /* + /** * Called with 100-continue headers. * * This is not folded into encodeHeaders because most Envoy users and filters @@ -549,6 +671,11 @@ class StreamEncoderFilter : public StreamFilterBase { * use. Callbacks will not be invoked by the filter after onDestroy() is called. */ virtual void setEncoderFilterCallbacks(StreamEncoderFilterCallbacks& callbacks) PURE; + + /** + * Called at the end of the stream, when all data has been encoded. + */ + virtual void encodeComplete() {} }; typedef std::shared_ptr StreamEncoderFilterSharedPtr; diff --git a/include/envoy/http/header_map.h b/include/envoy/http/header_map.h index 780fc61b0ae49..3e3ee05422c53 100644 --- a/include/envoy/http/header_map.h +++ b/include/envoy/http/header_map.h @@ -12,6 +12,7 @@ #include "envoy/common/pure.h" +#include "common/common/assert.h" #include "common/common/hash.h" #include "absl/strings/string_view.h" @@ -19,15 +20,29 @@ namespace Envoy { namespace Http { +// Used by ASSERTs to validate internal consistency. E.g. valid HTTP header keys/values should +// never contain embedded NULLs. +static inline bool validHeaderString(absl::string_view s) { + for (const char c : {'\0', '\r', '\n'}) { + if (s.find(c) != absl::string_view::npos) { + return false; + } + } + return true; +} + /** * Wrapper for a lower case string used in header operations to generally avoid needless case * insensitive compares. */ class LowerCaseString { public: - LowerCaseString(LowerCaseString&& rhs) : string_(std::move(rhs.string_)) {} - LowerCaseString(const LowerCaseString& rhs) : string_(rhs.string_) {} - explicit LowerCaseString(const std::string& new_string) : string_(new_string) { lower(); } + LowerCaseString(LowerCaseString&& rhs) : string_(std::move(rhs.string_)) { ASSERT(valid()); } + LowerCaseString(const LowerCaseString& rhs) : string_(rhs.string_) { ASSERT(valid()); } + explicit LowerCaseString(const std::string& new_string) : string_(new_string) { + ASSERT(valid()); + lower(); + } const std::string& get() const { return string_; } bool operator==(const LowerCaseString& rhs) const { return string_ == rhs.string_; } @@ -36,6 +51,7 @@ class LowerCaseString { private: void lower() { std::transform(string_.begin(), string_.end(), string_.begin(), tolower); } + bool valid() const { return validHeaderString(string_); } std::string string_; }; @@ -102,11 +118,6 @@ class HeaderString { */ char* buffer() { return buffer_.dynamic_; } - /** - * @return a null terminated C string. - */ - const char* c_str() const { return buffer_.ref_; } - /** * @return an absl::string_view. */ @@ -125,15 +136,17 @@ class HeaderString { */ bool empty() const { return string_length_ == 0; } + // Looking for find? Use getStringView().find() + /** - * @return whether a substring exists in the string. + * Set the value of the string by copying data into it. This overwrites any existing string. */ - bool find(const char* str) const { return strstr(c_str(), str); } + void setCopy(const char* data, uint32_t size); /** * Set the value of the string by copying data into it. This overwrites any existing string. */ - void setCopy(const char* data, uint32_t size); + void setCopy(absl::string_view view); /** * Set the value of the string to an integer. This overwrites any existing string. @@ -157,8 +170,10 @@ class HeaderString { */ Type type() const { return type_; } - bool operator==(const char* rhs) const { return 0 == strcmp(c_str(), rhs); } - bool operator!=(const char* rhs) const { return 0 != strcmp(c_str(), rhs); } + bool operator==(const char* rhs) const { return getStringView() == absl::string_view(rhs); } + bool operator==(absl::string_view rhs) const { return getStringView() == rhs; } + bool operator!=(const char* rhs) const { return getStringView() != absl::string_view(rhs); } + bool operator!=(absl::string_view rhs) const { return getStringView() != rhs; } private: union Buffer { @@ -176,6 +191,7 @@ class HeaderString { }; void freeDynamic(); + bool valid() const; uint32_t string_length_; Type type_; @@ -494,6 +510,11 @@ class HeaderMap { */ virtual size_t size() const PURE; + /** + * @return true if the map is empty, false otherwise. + */ + virtual bool empty() const PURE; + /** * Allow easy pretty-printing of the key/value pairs in HeaderMap * @param os supplies the ostream to print to. @@ -502,8 +523,8 @@ class HeaderMap { friend std::ostream& operator<<(std::ostream& os, const HeaderMap& headers) { headers.iterate( [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { - *static_cast(context) - << "'" << header.key().c_str() << "', '" << header.value().c_str() << "'\n"; + *static_cast(context) << "'" << header.key().getStringView() << "', '" + << header.value().getStringView() << "'\n"; return HeaderMap::Iterate::Continue; }, &os); diff --git a/include/envoy/init/BUILD b/include/envoy/init/BUILD index cfa069239b96f..2229d7c7a12e4 100644 --- a/include/envoy/init/BUILD +++ b/include/envoy/init/BUILD @@ -9,6 +9,23 @@ load( envoy_package() envoy_cc_library( - name = "init_interface", - hdrs = ["init.h"], + name = "watcher_interface", + hdrs = ["watcher.h"], +) + +envoy_cc_library( + name = "target_interface", + hdrs = ["target.h"], + deps = [ + ":watcher_interface", + ], +) + +envoy_cc_library( + name = "manager_interface", + hdrs = ["manager.h"], + deps = [ + ":target_interface", + ":watcher_interface", + ], ) diff --git a/include/envoy/init/init.h b/include/envoy/init/init.h deleted file mode 100644 index 0ecc37625cad5..0000000000000 --- a/include/envoy/init/init.h +++ /dev/null @@ -1,60 +0,0 @@ -#pragma once - -#include - -#include "envoy/common/pure.h" - -namespace Envoy { -namespace Init { - -/** - * A single initialization target. - */ -class Target { -public: - virtual ~Target() {} - - /** - * Called when the target should begin its own initialization. - * @param callback supplies the callback to invoke when the target has completed its - * initialization. - */ - virtual void initialize(std::function callback) PURE; -}; - -/** - * A manager that initializes multiple targets. - */ -class Manager { -public: - virtual ~Manager() {} - - /** - * Register a target to be initialized in the future. The manager will call initialize() on - * each target at some point in the future. - */ - virtual void registerTarget(Target& target) PURE; - - enum class State { - /** - * Targets have not been initialized. - */ - NotInitialized, - /** - * Targets are currently being initialized. - */ - Initializing, - /** - * All targets have been initialized. - */ - Initialized - }; - - /** - * Returns the current state of the init manager. - */ - virtual State state() const PURE; -}; - -} // namespace Init -} // namespace Envoy diff --git a/include/envoy/init/manager.h b/include/envoy/init/manager.h new file mode 100644 index 0000000000000..94cf0dbb25e1a --- /dev/null +++ b/include/envoy/init/manager.h @@ -0,0 +1,79 @@ +#pragma once + +#include "envoy/common/pure.h" +#include "envoy/init/target.h" +#include "envoy/init/watcher.h" + +namespace Envoy { +namespace Init { + +/** + * Init::Manager coordinates initialization of one or more "targets." A typical flow would be: + * + * - One or more initialization targets are registered with a manager using `add`. + * - The manager is told to `initialize` all its targets, given a Watcher to notify when all + * registered targets are initialized. + * - Each target will initialize, either immediately or asynchronously, and will signal + * `ready` to the manager when initialized. + * - When all targets are initialized, the manager signals `ready` to the watcher it was given + * previously. + * + * Since there are several entities involved in this flow -- the owner of the manager, the targets + * registered with the manager, and the manager itself -- it may be difficult or impossible in some + * cases to guarantee that their lifetimes line up correctly to avoid use-after-free errors. The + * interface design here in Init allows implementations to avoid the issue: + * + * - A Target can only be initialized via a TargetHandle, which acts as a weak reference. + * Attempting to initialize a destroyed Target via its handle has no ill effects. + * - Likewise, a Watcher can only be notified that initialization was complete via a + * WatcherHandle, which acts as a weak reference as well. + * + * See target.h and watcher.h, as well as implementation in source/common/init for details. + */ +struct Manager { + virtual ~Manager() = default; + + /** + * The manager's state, used e.g. for reporting in the admin server. + */ + enum class State { + /** + * Targets have not been initialized. + */ + Uninitialized, + /** + * Targets are currently being initialized. + */ + Initializing, + /** + * All targets have been initialized. + */ + Initialized + }; + + /** + * @return the current state of the manager. + */ + virtual State state() const PURE; + + /** + * Register an initialization target. If the manager's current state is uninitialized, the target + * will be saved for invocation later, when `initialize` is called. If the current state is + * initializing, the target will be invoked immediately. It is an error to register a target with + * a manager that is already in initialized state. + * @param target the target to be invoked when initialization begins. + */ + virtual void add(const Target& target) PURE; + + /** + * Start initialization of all previously registered targets, and notify the given Watcher when + * initialization is complete. It is an error to call initialize on a manager that is already in + * initializing or initialized state. If the manager contains no targets, initialization completes + * immediately. + * @param watcher the watcher to notify when initialization is complete. + */ + virtual void initialize(const Watcher& watcher) PURE; +}; + +} // namespace Init +} // namespace Envoy diff --git a/include/envoy/init/target.h b/include/envoy/init/target.h new file mode 100644 index 0000000000000..9ab46d38aff48 --- /dev/null +++ b/include/envoy/init/target.h @@ -0,0 +1,52 @@ +#pragma once + +#include + +#include "envoy/common/pure.h" +#include "envoy/init/watcher.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Init { + +/** + * A TargetHandle functions as a weak reference to a Target. It is how an implementation of + * Init::Manager would safely tell a target to `initialize` with no guarantees about the + * target's lifetime. Typical usage (outside of Init::ManagerImpl) does not require touching + * TargetHandles at all. + */ +struct TargetHandle { + virtual ~TargetHandle() = default; + + /** + * Tell the target to begin initialization, if it is still available. + * @param watcher A Watcher for the target to notify when it has initialized. + * @return true if the target received this call, false if the target was already destroyed. + */ + virtual bool initialize(const Watcher& watcher) const PURE; +}; +using TargetHandlePtr = std::unique_ptr; + +/** + * An initialization Target is an entity that can be registered with a Manager for initialization. + * It can only be invoked through a TargetHandle. + */ +struct Target { + virtual ~Target() = default; + + /** + * @return a human-readable target name, for logging / debugging. + */ + virtual absl::string_view name() const PURE; + + /** + * Create a new handle that can initialize this target. + * @param name a human readable handle name, for logging / debugging. + * @return a new handle that can initialize this target. + */ + virtual TargetHandlePtr createHandle(absl::string_view name) const PURE; +}; + +} // namespace Init +} // namespace Envoy diff --git a/include/envoy/init/watcher.h b/include/envoy/init/watcher.h new file mode 100644 index 0000000000000..ccf17adfcbafc --- /dev/null +++ b/include/envoy/init/watcher.h @@ -0,0 +1,53 @@ +#pragma once + +#include + +#include "envoy/common/pure.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Init { + +/** + * A WatcherHandle functions as a weak reference to a Watcher. It is how an implementation of + * Init::Target would safely notify a Manager that it has initialized, and likewise it's how + * an implementation of Init::Manager would safely tell its client that all registered targets + * have initialized, with no guarantees about the lifetimes of the manager or client. Typical usage + * (outside of Init::TargetImpl and ManagerImpl) does not require touching WatcherHandles at + * all. + */ +struct WatcherHandle { + virtual ~WatcherHandle() = default; + + /** + * Tell the watcher that initialization has completed, if it is still available. + * @return true if the watcher received this call, false if the watcher was already destroyed. + */ + virtual bool ready() const PURE; +}; +using WatcherHandlePtr = std::unique_ptr; + +/** + * A Watcher is an entity that listens for notifications that either an initialization target or + * all targets registered with a manager have initialized. It can only be invoked through a + * WatcherHandle. + */ +struct Watcher { + virtual ~Watcher() = default; + + /** + * @return a human-readable target name, for logging / debugging. + */ + virtual absl::string_view name() const PURE; + + /** + * Create a new handle that can notify this watcher. + * @param name a human readable handle name, for logging / debugging. + * @return a new handle that can notify this watcher. + */ + virtual WatcherHandlePtr createHandle(absl::string_view name) const PURE; +}; + +} // namespace Init +} // namespace Envoy diff --git a/include/envoy/local_info/local_info.h b/include/envoy/local_info/local_info.h index 2fe5131b8ee50..1c9c152c8c46c 100644 --- a/include/envoy/local_info/local_info.h +++ b/include/envoy/local_info/local_info.h @@ -24,17 +24,17 @@ class LocalInfo { /** * Human readable zone name. E.g., "us-east-1a". */ - virtual const std::string zoneName() const PURE; + virtual const std::string& zoneName() const PURE; /** * Human readable cluster name. E.g., "eta". */ - virtual const std::string clusterName() const PURE; + virtual const std::string& clusterName() const PURE; /** * Human readable individual node name. E.g., "i-123456". */ - virtual const std::string nodeName() const PURE; + virtual const std::string& nodeName() const PURE; /** * v2 API Node protobuf. This is the full node identity presented to management servers. diff --git a/include/envoy/network/BUILD b/include/envoy/network/BUILD index 15ba05a6a4f95..2c1a20f5e90c6 100644 --- a/include/envoy/network/BUILD +++ b/include/envoy/network/BUILD @@ -67,6 +67,10 @@ envoy_cc_library( envoy_cc_library( name = "io_handle_interface", hdrs = ["io_handle.h"], + deps = [ + "//include/envoy/api:io_error_interface", + "//source/common/common:assert_lib", + ], ) envoy_cc_library( diff --git a/include/envoy/network/connection.h b/include/envoy/network/connection.h index 1545d4b30e9f6..86d407bb3e485 100644 --- a/include/envoy/network/connection.h +++ b/include/envoy/network/connection.h @@ -144,6 +144,12 @@ class Connection : public Event::DeferredDeletable, public FilterManager { * enabled again if there is data still in the input buffer it will be redispatched through * the filter chain. * @param disable supplies TRUE is reads should be disabled, FALSE if they should be enabled. + * + * Note that this function reference counts calls. For example + * readDisable(true); // Disables data + * readDisable(true); // Notes the connection is blocked by two sources + * readDisable(false); // Notes the connection is blocked by one source + * readDisable(false); // Marks the connection as unblocked, so resumes reading. */ virtual void readDisable(bool disable) PURE; @@ -166,6 +172,30 @@ class Connection : public Event::DeferredDeletable, public FilterManager { */ virtual const Network::Address::InstanceConstSharedPtr& remoteAddress() const PURE; + /** + * Credentials of the peer of a socket as decided by SO_PEERCRED. + */ + struct UnixDomainSocketPeerCredentials { + /** + * The process id of the peer. + */ + int32_t pid; + /** + * The user id of the peer. + */ + uint32_t uid; + /** + * The group id of the peer. + */ + uint32_t gid; + }; + + /** + * @return The unix socket peer credentials of the the remote client. Note that this is only + * supported for unix socket connections. + */ + virtual absl::optional unixSocketPeerCredentials() const PURE; + /** * @return the local address of the connection. For client connections, this is the origin * address. For server connections, this is the local destination address. For server connections @@ -184,7 +214,8 @@ class Connection : public Event::DeferredDeletable, public FilterManager { /** * @return the const SSL connection data if this is an SSL connection, or nullptr if it is not. */ - virtual const Ssl::Connection* ssl() const PURE; + // TODO(snowp): Remove this in favor of StreamInfo::downstreamSslConnection. + virtual const Ssl::ConnectionInfo* ssl() const PURE; /** * @return requested server name (e.g. SNI in TLS), if any. @@ -253,6 +284,7 @@ class Connection : public Event::DeferredDeletable, public FilterManager { /** * Set the timeout for delayed connection close()s. + * This can only be called prior to issuing a close() on the connection. * @param timeout The timeout value in milliseconds */ virtual void setDelayedCloseTimeout(std::chrono::milliseconds timeout) PURE; @@ -263,20 +295,10 @@ class Connection : public Event::DeferredDeletable, public FilterManager { virtual std::chrono::milliseconds delayedCloseTimeout() const PURE; /** - * Set the order of the write filters, indicating whether it is reversed to the filter chain - * config. - */ - // TODO(qiannawang): this method is deprecated and to be moved soon. See - // https://github.com/envoyproxy/envoy/pull/4889 for more details. - virtual void setWriteFilterOrder(bool reversed) PURE; - - /** - * @return bool indicates whether write filters should be in the reversed order of the filter - * chain config. + * @return std::string the failure reason of the underlying transport socket, if no failure + * occurred an empty string is returned. */ - // TODO(qiannawang): this method is deprecated and to be moved soon. See - // https://github.com/envoyproxy/envoy/pull/4889 for more details. - virtual bool reverseWriteFilterOrder() const PURE; + virtual absl::string_view transportFailureReason() const PURE; }; typedef std::unique_ptr ConnectionPtr; diff --git a/include/envoy/network/filter.h b/include/envoy/network/filter.h index 87b982969fca1..d9f3c3a409233 100644 --- a/include/envoy/network/filter.h +++ b/include/envoy/network/filter.h @@ -28,6 +28,46 @@ enum class FilterStatus { StopIteration }; +/** + * Callbacks used by individual filter instances to communicate with the filter manager. + */ +class NetworkFilterCallbacks { +public: + virtual ~NetworkFilterCallbacks() {} + + /** + * @return the connection that owns this filter. + */ + virtual Connection& connection() PURE; +}; + +/** + * Callbacks used by individual write filter instances to communicate with the filter manager. + */ +class WriteFilterCallbacks : public virtual NetworkFilterCallbacks { +public: + virtual ~WriteFilterCallbacks() {} + + /** + * Pass data directly to subsequent filters in the filter chain. This method is used in + * advanced cases in which a filter needs full control over how subsequent filters view data. + * Using this method allows a filter to buffer data (or not) and then periodically inject data + * to subsequent filters, indicating end_stream at an appropriate time. + * This can be used to implement rate limiting, periodic data emission, etc. + * + * When using this callback, filters should generally move passed in buffer and return + * FilterStatus::StopIteration from their onWrite() call, since use of this method + * indicates that a filter does not wish to participate in a standard write flow + * and will perform any necessary buffering and continuation on its own. + * + * @param data supplies the write data to be propagated directly to further filters in the filter + * chain. + * @param end_stream supplies the end_stream status to be propagated directly to further filters + * in the filter chain. + */ + virtual void injectWriteDataToFilterChain(Buffer::Instance& data, bool end_stream) PURE; +}; + /** * A write path binary connection filter. */ @@ -42,6 +82,19 @@ class WriteFilter { * @return status used by the filter manager to manage further filter iteration. */ virtual FilterStatus onWrite(Buffer::Instance& data, bool end_stream) PURE; + + /** + * Initializes the write filter callbacks used to interact with the filter manager. It will be + * called by the filter manager a single time when the filter is first registered. Thus, any + * construction that requires the backing connection should take place in the context of this + * function. + * + * IMPORTANT: No outbound networking or complex processing should be done in this function. + * That should be done in the context of ReadFilter::onNewConnection() if needed. + * + * @param callbacks supplies the callbacks. + */ + virtual void initializeWriteFilterCallbacks(WriteFilterCallbacks&) {} }; typedef std::shared_ptr WriteFilterSharedPtr; @@ -49,15 +102,10 @@ typedef std::shared_ptr WriteFilterSharedPtr; /** * Callbacks used by individual read filter instances to communicate with the filter manager. */ -class ReadFilterCallbacks { +class ReadFilterCallbacks : public virtual NetworkFilterCallbacks { public: virtual ~ReadFilterCallbacks() {} - /** - * @return the connection that owns this read filter. - */ - virtual Connection& connection() PURE; - /** * If a read filter stopped filter iteration, continueReading() can be called to continue the * filter chain. The next filter will be called with all currently available data in the read @@ -65,6 +113,31 @@ class ReadFilterCallbacks { */ virtual void continueReading() PURE; + /** + * Pass data directly to subsequent filters in the filter chain. This method is used in + * advanced cases in which a filter needs full control over how subsequent filters view data, + * and does not want to make use of connection-level buffering. Using this method allows + * a filter to buffer data (or not) and then periodically inject data to subsequent filters, + * indicating end_stream at an appropriate time. This can be used to implement rate limiting, + * periodic data emission, etc. + * + * When using this callback, filters should generally move passed in buffer and return + * FilterStatus::StopIteration from their onData() call, since use of this method + * indicates that a filter does not wish to participate in standard connection-level + * buffering and continuation and will perform any necessary buffering and continuation on its + * own. + * + * This callback is different from continueReading() in that the specified data and end_stream + * status will be propagated verbatim to further filters in the filter chain + * (while continueReading() propagates connection-level read buffer and end_stream status). + * + * @param data supplies the read data to be propagated directly to further filters in the filter + * chain. + * @param end_stream supplies the end_stream status to be propagated directly to further filters + * in the filter chain. + */ + virtual void injectReadDataToFilterChain(Buffer::Instance& data, bool end_stream) PURE; + /** * Return the currently selected upstream host, if any. This can be used for communication * between multiple network level filters, for example the TCP proxy filter communicating its diff --git a/include/envoy/network/io_handle.h b/include/envoy/network/io_handle.h index c00c2288278dd..37ecec5d07344 100644 --- a/include/envoy/network/io_handle.h +++ b/include/envoy/network/io_handle.h @@ -1,33 +1,64 @@ #pragma once -#include - +#include "envoy/api/io_error.h" #include "envoy/common/pure.h" namespace Envoy { +namespace Buffer { +struct RawSlice; +} // namespace Buffer + namespace Network { +namespace Address { +class Instance; +} // namespace Address /** * IoHandle: an abstract interface for all I/O operations */ class IoHandle { public: - IoHandle() {} - virtual ~IoHandle() {} /** * Return data associated with IoHandle. * - * TODO(sbelair2) remove fd() method + * TODO(danzh) move it to IoSocketHandle after replacing the calls to it with + * calls to IoHandle API's everywhere. */ virtual int fd() const PURE; /** * Clean up IoHandle resources */ - virtual void close() PURE; + virtual Api::IoCallUint64Result close() PURE; + + /** + * Return true if close() hasn't been called. + */ + virtual bool isOpen() const PURE; + + /** + * Read data into given slices. + * @param max_length supplies the maximum length to read. + * @param slices points to the output location. + * @param num_slice indicates the number of slices |slices| contains. + * @return a Api::IoCallUint64Result with err_ = an Api::IoError instance or + * err_ = nullptr and rc_ = the bytes read for success. + */ + virtual Api::IoCallUint64Result readv(uint64_t max_length, Buffer::RawSlice* slices, + uint64_t num_slice) PURE; + + /** + * Write the data in slices out. + * @param slices points to the location of data to be written. + * @param num_slice indicates number of slices |slices| contains. + * @return a Api::IoCallUint64Result with err_ = an Api::IoError instance or + * err_ = nullptr and rc_ = the bytes written for success. + */ + virtual Api::IoCallUint64Result writev(const Buffer::RawSlice* slices, uint64_t num_slice) PURE; }; + typedef std::unique_ptr IoHandlePtr; } // namespace Network diff --git a/include/envoy/network/listener.h b/include/envoy/network/listener.h index 06a9d7cc22637..886c50ef4e70b 100644 --- a/include/envoy/network/listener.h +++ b/include/envoy/network/listener.h @@ -80,14 +80,6 @@ class ListenerConfig { * @return const std::string& the listener's name. */ virtual const std::string& name() const PURE; - - /** - * @return bool indicates whether write filters should be in the reversed order of the filter - * chain config. - */ - // TODO(qiannawang): this method is deprecated and to be moved soon. See - // https://github.com/envoyproxy/envoy/pull/4889 for more details. - virtual bool reverseWriteFilterOrder() const PURE; }; /** @@ -100,9 +92,9 @@ class ListenerCallbacks { /** * Called when a new connection is accepted. * @param socket supplies the socket that is moved into the callee. - * @param redirected is true when the socket was first accepted by another listener - * and is redirected to a new listener. The recipient should not redirect - * the socket any further. + * @param hand_off_restored_destination_connections is true when the socket was first accepted by + * another listener and is redirected to a new listener. The recipient should not redirect the + * socket any further. */ virtual void onAccept(ConnectionSocketPtr&& socket, bool hand_off_restored_destination_connections = true) PURE; diff --git a/include/envoy/network/transport_socket.h b/include/envoy/network/transport_socket.h index 289f17c28bf2b..5d8430126949b 100644 --- a/include/envoy/network/transport_socket.h +++ b/include/envoy/network/transport_socket.h @@ -105,6 +105,12 @@ class TransportSocket { */ virtual std::string protocol() const PURE; + /** + * @return std::string the last failure reason occurred on the transport socket. If no failure + * has been occurred the empty string is returned. + */ + virtual absl::string_view failureReason() const PURE; + /** * @return bool whether the socket can be flushed and closed. */ @@ -138,7 +144,7 @@ class TransportSocket { /** * @return the const SSL connection data if this is an SSL connection, or nullptr if it is not. */ - virtual const Ssl::Connection* ssl() const PURE; + virtual const Ssl::ConnectionInfo* ssl() const PURE; }; typedef std::unique_ptr TransportSocketPtr; diff --git a/include/envoy/router/BUILD b/include/envoy/router/BUILD index f47a6db92ec04..73c4e8bee63a5 100644 --- a/include/envoy/router/BUILD +++ b/include/envoy/router/BUILD @@ -20,7 +20,6 @@ envoy_cc_library( deps = [ ":rds_interface", "//include/envoy/event:dispatcher_interface", - "//include/envoy/init:init_interface", "//include/envoy/json:json_object_interface", "//include/envoy/local_info:local_info_interface", "//include/envoy/runtime:runtime_interface", @@ -32,6 +31,17 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "route_config_update_info_interface", + hdrs = ["route_config_update_receiver.h"], + external_deps = ["abseil_optional"], + deps = [ + ":rds_interface", + "//include/envoy/common:time_interface", + "//source/common/protobuf", + ], +) + envoy_cc_library( name = "router_interface", hdrs = ["router.h"], diff --git a/include/envoy/router/rds.h b/include/envoy/router/rds.h index 8ff43f213f4f5..67a82fd45140f 100644 --- a/include/envoy/router/rds.h +++ b/include/envoy/router/rds.h @@ -43,6 +43,11 @@ class RouteConfigProvider { * @return the last time this RouteConfigProvider was updated. Used for config dumps. */ virtual SystemTime lastUpdated() const PURE; + + /** + * Callback used to notify RouteConfigProvider about configuration changes. + */ + virtual void onConfigUpdate() PURE; }; typedef std::unique_ptr RouteConfigProviderPtr; diff --git a/include/envoy/router/route_config_provider_manager.h b/include/envoy/router/route_config_provider_manager.h index 3070a8dc407d9..9bf02066a6009 100644 --- a/include/envoy/router/route_config_provider_manager.h +++ b/include/envoy/router/route_config_provider_manager.h @@ -4,7 +4,6 @@ #include "envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.pb.h" #include "envoy/event/dispatcher.h" -#include "envoy/init/init.h" #include "envoy/json/json_object.h" #include "envoy/local_info/local_info.h" #include "envoy/router/rds.h" diff --git a/include/envoy/router/route_config_update_receiver.h b/include/envoy/router/route_config_update_receiver.h new file mode 100644 index 0000000000000..8ac284fae6d4f --- /dev/null +++ b/include/envoy/router/route_config_update_receiver.h @@ -0,0 +1,79 @@ +#pragma once + +#include + +#include "envoy/common/pure.h" +#include "envoy/common/time.h" +#include "envoy/router/rds.h" + +#include "common/protobuf/protobuf.h" + +#include "absl/types/optional.h" + +namespace Envoy { +namespace Router { + +/** + * A primitive that keeps track of updates to a RouteConfiguration. + */ +class RouteConfigUpdateReceiver { +public: + virtual ~RouteConfigUpdateReceiver() = default; + + /** + * Called on updates via RDS. + * @param rc supplies the RouteConfiguration. + * @param version_info supplies RouteConfiguration version. + * @return bool whether RouteConfiguration has been updated. + */ + virtual bool onRdsUpdate(const envoy::api::v2::RouteConfiguration& rc, + const std::string& version_info) PURE; + /** + * Called on updates via VHDS. + * @param added_resources supplies Resources (each containing a VirtualHost) that have been + * added. + * @param removed_resources supplies names of VirtualHosts that have been removed. + * @param version_info supplies RouteConfiguration version. + * @return bool whether RouteConfiguration has been updated. + */ + virtual bool + onVhdsUpdate(const Protobuf::RepeatedPtrField& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& version_info) PURE; + + /** + * @return std::string& the name of RouteConfiguration. + */ + virtual const std::string& routeConfigName() const PURE; + + /** + * @return std::string& the version of RouteConfiguration. + */ + virtual const std::string& configVersion() PURE; + + /** + * @return uint64_t the hash value of RouteConfiguration. + */ + virtual uint64_t configHash() const PURE; + + /** + * @return absl::optional containing an instance of + * RouteConfigProvider::ConfigInfo if RouteConfiguration has been updated at least once. Otherwise + * returns an empty absl::optional. + */ + virtual absl::optional configInfo() const PURE; + + /** + * @return envoy::api::v2::RouteConfiguration& current RouteConfiguration. + */ + virtual const envoy::api::v2::RouteConfiguration& routeConfiguration() PURE; + + /** + * @return SystemTime the time of the last update. + */ + virtual SystemTime lastUpdated() const PURE; +}; + +using RouteConfigUpdatePtr = std::unique_ptr; +} // namespace Router +} // namespace Envoy diff --git a/include/envoy/router/router.h b/include/envoy/router/router.h index efc3025331a74..d2652df8d466c 100644 --- a/include/envoy/router/router.h +++ b/include/envoy/router/router.h @@ -201,6 +201,16 @@ class RetryPolicy { * policy is enabled. */ virtual const std::vector& retriableStatusCodes() const PURE; + + /** + * @return absl::optional base retry interval + */ + virtual absl::optional baseInterval() const PURE; + + /** + * @return absl::optional maximum retry interval + */ + virtual absl::optional maxInterval() const PURE; }; /** @@ -228,9 +238,8 @@ class RetryState { virtual bool enabled() PURE; /** - * Determine whether a request should be retried based on the response. - * @param response_headers supplies the response headers if available. - * @param reset_reason supplies the reset reason if available. + * Determine whether a request should be retried based on the response headers. + * @param response_headers supplies the response headers. * @param callback supplies the callback that will be invoked when the retry should take place. * This is used to add timed backoff, etc. The callback will never be called * inline. @@ -238,9 +247,21 @@ class RetryState { * in the future. Otherwise a retry should not take place and the callback will never be * called. Calling code should proceed with error handling. */ - virtual RetryStatus shouldRetry(const Http::HeaderMap* response_headers, - const absl::optional& reset_reason, - DoRetryCallback callback) PURE; + virtual RetryStatus shouldRetryHeaders(const Http::HeaderMap& response_headers, + DoRetryCallback callback) PURE; + + /** + * Determine whether a request should be retried after a reset based on the reason for the reset. + * @param reset_reason supplies the reset reason. + * @param callback supplies the callback that will be invoked when the retry should take place. + * This is used to add timed backoff, etc. The callback will never be called + * inline. + * @return RetryStatus if a retry should take place. @param callback will be called at some point + * in the future. Otherwise a retry should not take place and the callback will never be + * called. Calling code should proceed with error handling. + */ + virtual RetryStatus shouldRetryReset(const Http::StreamResetReason reset_reason, + DoRetryCallback callback) PURE; /** * Called when a host was attempted but the request failed and is eligible for another retry. @@ -411,6 +432,32 @@ class HashPolicy { AddCookieCallback add_cookie) const PURE; }; +/** + * Route level hedging policy. + */ +class HedgePolicy { +public: + virtual ~HedgePolicy() {} + + /** + * @return number of upstream requests that should be sent initially. + */ + virtual uint32_t initialRequests() const PURE; + + /** + * @return percent chance that an additional upstream request should be sent + * on top of the value from initialRequests(). + */ + virtual const envoy::type::FractionalPercent& additionalRequestChance() const PURE; + + /** + * @return bool indicating whether request hedging should occur when a request + * is retried due to a per try timeout. The alternative is the original request + * will be canceled immediately. + */ + virtual bool hedgeOnPerTryTimeout() const PURE; +}; + class MetadataMatchCriterion { public: virtual ~MetadataMatchCriterion() {} @@ -528,6 +575,12 @@ class RouteEntry : public ResponseEntry { */ virtual const HashPolicy* hashPolicy() const PURE; + /** + * @return const HedgePolicy& the hedge policy for the route. All routes have a hedge policy even + * if it is empty and does not allow for hedged requests. + */ + virtual const HedgePolicy& hedgePolicy() const PURE; + /** * @return the priority of the route. */ @@ -568,6 +621,13 @@ class RouteEntry : public ResponseEntry { */ virtual absl::optional maxGrpcTimeout() const PURE; + /** + * @return absl::optional the timeout offset to apply to the timeout + * provided by the 'grpc-timeout' header of a gRPC request. This value will be positive and should + * be subtracted from the value provided by the header. + */ + virtual absl::optional grpcTimeoutOffset() const PURE; + /** * Determine whether a specific request path belongs to a virtual cluster for use in stats, etc. * @param headers supplies the request headers. @@ -743,6 +803,11 @@ class Config { * @return const std::string the RouteConfiguration name. */ virtual const std::string& name() const PURE; + + /** + * @return whether router configuration uses VHDS. + */ + virtual bool usesVhds() const PURE; }; typedef std::shared_ptr ConfigConstSharedPtr; diff --git a/include/envoy/runtime/BUILD b/include/envoy/runtime/BUILD index 073bc85591873..8e510aeb09ddb 100644 --- a/include/envoy/runtime/BUILD +++ b/include/envoy/runtime/BUILD @@ -13,6 +13,8 @@ envoy_cc_library( hdrs = ["runtime.h"], external_deps = ["abseil_optional"], deps = [ + "//source/common/common:assert_lib", + "//source/common/singleton:threadsafe_singleton", "@envoy_api//envoy/type:percent_cc", ], ) diff --git a/include/envoy/runtime/runtime.h b/include/envoy/runtime/runtime.h index ef535cf8ffd3e..b5e308b14d4ac 100644 --- a/include/envoy/runtime/runtime.h +++ b/include/envoy/runtime/runtime.h @@ -9,6 +9,10 @@ #include "envoy/common/pure.h" #include "envoy/type/percent.pb.h" +#include "common/common/assert.h" +#include "common/singleton/threadsafe_singleton.h" + +#include "absl/container/flat_hash_map.h" #include "absl/types/optional.h" namespace Envoy { @@ -46,9 +50,10 @@ class Snapshot { std::string raw_string_value_; absl::optional uint_value_; absl::optional fractional_percent_value_; + absl::optional bool_value_; }; - typedef std::unordered_map EntryMap; + typedef absl::flat_hash_map EntryMap; /** * A provider of runtime values. One or more of these compose the snapshot's source of values, @@ -59,9 +64,9 @@ class Snapshot { virtual ~OverrideLayer() {} /** - * @return const std::unordered_map& the values in this layer. + * @return const absl::flat_hash_map& the values in this layer. */ - virtual const std::unordered_map& values() const PURE; + virtual const EntryMap& values() const PURE; /** * @return const std::string& a user-friendly alias for this layer, e.g. "admin" or "disk". @@ -71,6 +76,21 @@ class Snapshot { typedef std::unique_ptr OverrideLayerConstPtr; + // Returns true if a deprecated feature is allowed. + // + // Fundamentally, deprecated features are boolean values. + // They are allowed by default or with explicit configuration to "true" via runtime configuration. + // They can be disallowed either by inclusion in the hard-coded disallowed_features[] list, or by + // configuration of "false" in runtime config. + virtual bool deprecatedFeatureEnabled(const std::string& key) const PURE; + + // Returns true if a runtime feature is enabled. + // + // Runtime features are used to easily allow switching between old and new code paths for high + // risk changes. The intent is for the old code path to be short lived - the old code path is + // deprecated as the feature is defaulted true, and removed with the following Envoy release. + virtual bool runtimeFeatureEnabled(absl::string_view key) const PURE; + /** * Test if a feature is enabled using the built in random generator. This is done by generating * a random number in the range 0-99 and seeing if this number is < the value stored in the @@ -201,7 +221,17 @@ class Loader { virtual void mergeValues(const std::unordered_map& values) PURE; }; -typedef std::unique_ptr LoaderPtr; +using LoaderPtr = std::unique_ptr; + +// To make the runtime generally accessible, we make use of the dreaded +// singleton class. For Envoy, the runtime will be created and cleaned up by the +// Server::InstanceImpl initialize() and destructor, respectively. +// +// This makes it possible for call sites to easily make use of runtime values to +// determine if a given feature is on or off, as well as various deprecated configuration +// protos being enabled or disabled by default. +using LoaderSingleton = InjectableSingleton; +using ScopedLoaderSingleton = ScopedInjectableLoader; } // namespace Runtime } // namespace Envoy diff --git a/include/envoy/server/BUILD b/include/envoy/server/BUILD index adcf536c8b12e..5baa1b5d7472b 100644 --- a/include/envoy/server/BUILD +++ b/include/envoy/server/BUILD @@ -80,6 +80,7 @@ envoy_cc_library( deps = [ "//include/envoy/event:dispatcher_interface", "//include/envoy/thread:thread_interface", + "//source/server:hot_restart_cc", ], ) @@ -90,6 +91,7 @@ envoy_cc_library( ":admin_interface", ":drain_manager_interface", ":hot_restart_interface", + ":lifecycle_notifier_interface", ":listener_manager_interface", ":options_interface", "//include/envoy/access_log:access_log_interface", @@ -98,7 +100,7 @@ envoy_cc_library( "//include/envoy/event:timer_interface", "//include/envoy/http:context_interface", "//include/envoy/http:query_params_interface", - "//include/envoy/init:init_interface", + "//include/envoy/init:manager_interface", "//include/envoy/local_info:local_info_interface", "//include/envoy/runtime:runtime_interface", "//include/envoy/secret:secret_manager_interface", @@ -144,12 +146,13 @@ envoy_cc_library( hdrs = ["filter_config.h"], deps = [ ":admin_interface", + ":lifecycle_notifier_interface", "//include/envoy/access_log:access_log_interface", "//include/envoy/api:api_interface", "//include/envoy/http:codes_interface", "//include/envoy/http:context_interface", "//include/envoy/http:filter_interface", - "//include/envoy/init:init_interface", + "//include/envoy/init:manager_interface", "//include/envoy/json:json_object_interface", "//include/envoy/local_info:local_info_interface", "//include/envoy/network:drain_decision_interface", @@ -166,6 +169,14 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "lifecycle_notifier_interface", + hdrs = ["lifecycle_notifier.h"], + deps = [ + "//include/envoy/event:dispatcher_interface", + ], +) + envoy_cc_library( name = "listener_manager_interface", hdrs = ["listener_manager.h"], @@ -186,7 +197,7 @@ envoy_cc_library( hdrs = ["transport_socket_config.h"], deps = [ "//include/envoy/event:dispatcher_interface", - "//include/envoy/init:init_interface", + "//include/envoy/init:manager_interface", "//include/envoy/local_info:local_info_interface", "//include/envoy/network:transport_socket_interface", "//include/envoy/runtime:runtime_interface", diff --git a/include/envoy/server/admin.h b/include/envoy/server/admin.h index 7249e65cc2bdd..03500ac84aa32 100644 --- a/include/envoy/server/admin.h +++ b/include/envoy/server/admin.h @@ -72,7 +72,7 @@ class Admin { /** * Callback for admin URL handlers. - * @param path_and_query supplies the the path and query of the request URL. + * @param path_and_query supplies the path and query of the request URL. * @param response_headers enables setting of http headers (eg content-type, cache-control) in the * handler. * @param response supplies the buffer to fill in with the response body. @@ -134,7 +134,7 @@ class Admin { * * @param path_and_query the path and query of the admin URL. * @param method the HTTP method (POST or GET). - * @param response_headers populated the the response headers from executing the request, + * @param response_headers populated the response headers from executing the request, * most notably content-type. * @param body populated with the response-body from the admin request. * @return Http::Code The HTTP response code from the admin request. diff --git a/include/envoy/server/configuration.h b/include/envoy/server/configuration.h index e076b94070f01..c45fe156220ca 100644 --- a/include/envoy/server/configuration.h +++ b/include/envoy/server/configuration.h @@ -20,9 +20,9 @@ namespace Configuration { /** * Configuration for local disk runtime support. */ -class Runtime { +class DiskRuntime { public: - virtual ~Runtime() {} + virtual ~DiskRuntime() {} /** * @return const std::string& the root symlink to watch for swapping. @@ -136,9 +136,15 @@ class Initial { virtual absl::optional flagsPath() PURE; /** - * @return Runtime* the local disk runtime configuration or nullptr if there is no configuration. + * @return const ProtobufWkt::Struct& base runtime snapshot. */ - virtual Runtime* runtime() PURE; + virtual const ProtobufWkt::Struct& baseRuntime() PURE; + + /** + * @return DiskRuntime* the local disk runtime configuration or nullptr if there is no + * configuration. + */ + virtual DiskRuntime* diskRuntime() PURE; }; } // namespace Configuration diff --git a/include/envoy/server/filter_config.h b/include/envoy/server/filter_config.h index 9231e2bd4b721..2f433b1e6b656 100644 --- a/include/envoy/server/filter_config.h +++ b/include/envoy/server/filter_config.h @@ -7,12 +7,13 @@ #include "envoy/http/codes.h" #include "envoy/http/context.h" #include "envoy/http/filter.h" -#include "envoy/init/init.h" +#include "envoy/init/manager.h" #include "envoy/json/json_object.h" #include "envoy/network/drain_decision.h" #include "envoy/network/filter.h" #include "envoy/runtime/runtime.h" #include "envoy/server/admin.h" +#include "envoy/server/lifecycle_notifier.h" #include "envoy/server/overload_manager.h" #include "envoy/singleton/manager.h" #include "envoy/stats/scope.h" @@ -79,6 +80,11 @@ class FactoryContext { */ virtual Init::Manager& initManager() PURE; + /** + * @return ServerLifecycleNotifier& the lifecycle notifier for the server. + */ + virtual ServerLifecycleNotifier& lifecycleNotifier() PURE; + /** * @return information about the local environment the server is running in. */ diff --git a/include/envoy/server/hot_restart.h b/include/envoy/server/hot_restart.h index 458187668e2cc..f9d80729bf2b1 100644 --- a/include/envoy/server/hot_restart.h +++ b/include/envoy/server/hot_restart.h @@ -6,8 +6,11 @@ #include "envoy/common/pure.h" #include "envoy/event/dispatcher.h" #include "envoy/stats/stat_data_allocator.h" +#include "envoy/stats/store.h" #include "envoy/thread/thread.h" +#include "source/server/hot_restart.pb.h" + namespace Envoy { namespace Server { @@ -20,13 +23,9 @@ class Instance; */ class HotRestart { public: - struct GetParentStatsInfo { - uint64_t memory_allocated_; - uint64_t num_connections_; - }; - - struct ShutdownParentAdminInfo { - time_t original_start_time_; + struct ServerStatsFromParent { + uint64_t parent_memory_allocated_ = 0; + uint64_t parent_connections_ = 0; }; virtual ~HotRestart() {} @@ -46,32 +45,37 @@ class HotRestart { virtual int duplicateParentListenSocket(const std::string& address) PURE; /** - * Retrieve stats from our parent process. - * @param info will be filled with information from our parent if it can be retrieved. - */ - virtual void getParentStats(GetParentStatsInfo& info) PURE; - - /** - * Initialize the restarter after primary server initialization begins. The hot restart - * implementation needs to be created early to deal with shared memory, logging, etc. so - * late initialization of needed interfaces is done here. + * Initialize the parent logic of our restarter. Meant to be called after initialization of a + * new child has begun. The hot restart implementation needs to be created early to deal with + * shared memory, logging, etc. so late initialization of needed interfaces is done here. */ virtual void initialize(Event::Dispatcher& dispatcher, Server::Instance& server) PURE; /** * Shutdown admin processing in the parent process if applicable. This allows admin processing * to start up in the new process. - * @param info will be filled with information from our parent if it can be retrieved. + * @param original_start_time will be filled with information from our parent, if retrieved. */ - virtual void shutdownParentAdmin(ShutdownParentAdminInfo& info) PURE; + virtual void sendParentAdminShutdownRequest(time_t& original_start_time) PURE; /** - * Tell our parent to gracefully terminate itself. + * Tell our parent process to gracefully terminate itself. */ - virtual void terminateParent() PURE; + virtual void sendParentTerminateRequest() PURE; /** - * Shutdown the hot restarter. + * Retrieve stats from our parent process and merges them into stats_store, taking into account + * the stats values we've already seen transferred. + * Skips all of the above and returns 0s if there is not currently a parent. + * @param stats_store the store whose stats will be updated. + * @param stats_proto the stats values we are updating with. + * @return special values relating to the "server" stats scope, whose + * merging has to be handled by Server::InstanceImpl. + */ + virtual ServerStatsFromParent mergeParentStatsIfAny(Stats::StoreRoot& stats_store) PURE; + + /** + * Shutdown the half of our hot restarter that acts as a parent. */ virtual void shutdown() PURE; @@ -90,11 +94,6 @@ class HotRestart { * @return Thread::BasicLockable& a lock for access logs. */ virtual Thread::BasicLockable& accessLogLock() PURE; - - /** - * @returns an allocator for stats. - */ - virtual Stats::StatDataAllocator& statsAllocator() PURE; }; } // namespace Server diff --git a/include/envoy/server/instance.h b/include/envoy/server/instance.h index 14e8dd2ffd537..223aa410522c0 100644 --- a/include/envoy/server/instance.h +++ b/include/envoy/server/instance.h @@ -9,7 +9,7 @@ #include "envoy/common/mutex_tracer.h" #include "envoy/event/timer.h" #include "envoy/http/context.h" -#include "envoy/init/init.h" +#include "envoy/init/manager.h" #include "envoy/local_info/local_info.h" #include "envoy/network/listen_socket.h" #include "envoy/runtime/runtime.h" @@ -17,6 +17,7 @@ #include "envoy/server/admin.h" #include "envoy/server/drain_manager.h" #include "envoy/server/hot_restart.h" +#include "envoy/server/lifecycle_notifier.h" #include "envoy/server/listener_manager.h" #include "envoy/server/options.h" #include "envoy/server/overload_manager.h" @@ -86,12 +87,6 @@ class Instance { */ virtual void failHealthcheck(bool fail) PURE; - /** - * Fetch server stats specific to this process vs. global shared stats in a hot restart scenario. - * @param info supplies the stats structure to fill. - */ - virtual void getParentStats(HotRestart::GetParentStatsInfo& info) PURE; - /** * @return whether external healthchecks are currently failed or not. */ @@ -147,6 +142,11 @@ class Instance { */ virtual Runtime::Loader& runtime() PURE; + /** + * @return ServerLifecycleNotifier& the singleton lifecycle notifier for the server. + */ + virtual ServerLifecycleNotifier& lifecycleNotifier() PURE; + /** * Shutdown the server gracefully. */ @@ -199,9 +199,9 @@ class Instance { virtual const LocalInfo::LocalInfo& localInfo() PURE; /** - * @return the time system used for the server. + * @return the time source used for the server. */ - virtual Event::TimeSystem& timeSystem() PURE; + virtual TimeSource& timeSource() PURE; /** * @return the flush interval of stats sinks. diff --git a/include/envoy/server/lifecycle_notifier.h b/include/envoy/server/lifecycle_notifier.h new file mode 100644 index 0000000000000..32a5ae4c5da58 --- /dev/null +++ b/include/envoy/server/lifecycle_notifier.h @@ -0,0 +1,57 @@ +#pragma once + +#include + +#include "envoy/common/pure.h" +#include "envoy/event/dispatcher.h" + +namespace Envoy { +namespace Server { + +class ServerLifecycleNotifier { +public: + virtual ~ServerLifecycleNotifier() = default; + + /** + * Stages of the envoy server instance lifecycle. + */ + enum class Stage { + /** + * The server instance main thread is about to enter the dispatcher loop. + */ + Startup, + + /** + * The server instance is being shutdown and the dispatcher is about to exit. + * This provides listeners a last chance to run a callback on the main dispatcher. + * Note: the server will wait for callbacks that registered to take a completion + * before exiting the dispatcher loop. + */ + ShutdownExit + }; + + /** + * Callback invoked when the server reaches a certain lifecycle stage. + * + * Instances of the second type which take an Event::PostCb parameter must post + * that callback to the main dispatcher when they have finished processing of + * the new lifecycle state. This is useful when the main dispatcher needs to + * wait for registered callbacks to finish their work before continuing, eg. + * during server shutdown. + */ + using StageCallback = std::function; + using StageCallbackWithCompletion = std::function; + + /** + * Register a callback function that will be invoked on the main thread when + * the specified stage is reached. + * + * The second version which takes a completion back is currently only supported + * for the ShutdownExit stage. + */ + virtual void registerCallback(Stage stage, StageCallback callback) PURE; + virtual void registerCallback(Stage stage, StageCallbackWithCompletion callback) PURE; +}; + +} // namespace Server +} // namespace Envoy diff --git a/include/envoy/server/options.h b/include/envoy/server/options.h index 89ee2d132690d..0848fa37dcaf2 100644 --- a/include/envoy/server/options.h +++ b/include/envoy/server/options.h @@ -7,7 +7,6 @@ #include "envoy/admin/v2alpha/server_info.pb.h" #include "envoy/common/pure.h" #include "envoy/network/address.h" -#include "envoy/stats/stats_options.h" #include "spdlog/spdlog.h" @@ -148,17 +147,6 @@ class Options { */ virtual const std::string& serviceZone() const PURE; - /** - * @return uint64_t the maximum number of stats gauges and counters. - */ - virtual uint64_t maxStats() const PURE; - - /** - * @return StatsOptions& the max stat name / suffix lengths for stats. - * router/cluster/listener. - */ - virtual const Stats::StatsOptions& statsOptions() const PURE; - /** * @return bool indicating whether the hot restart functionality has been disabled via cli flags. */ @@ -174,6 +162,16 @@ class Options { */ virtual bool mutexTracingEnabled() const PURE; + /** + * @return whether to use the old libevent evbuffer-based Buffer implementation. + */ + virtual bool libeventBufferEnabled() const PURE; + + /** + * @return bool indicating whether cpuset size should determine the number of worker threads. + */ + virtual bool cpusetThreadsEnabled() const PURE; + /** * Converts the Options in to CommandLineOptions proto message defined in server_info.proto. * @return CommandLineOptionsPtr the protobuf representation of the options. diff --git a/include/envoy/server/overload_manager.h b/include/envoy/server/overload_manager.h index 81b7042ca337e..8ee920548e208 100644 --- a/include/envoy/server/overload_manager.h +++ b/include/envoy/server/overload_manager.h @@ -67,6 +67,9 @@ class OverloadActionNameValues { // Overload action to stop accepting new connections. const std::string StopAcceptingConnections = "envoy.overload_actions.stop_accepting_connections"; + + // Overload action to try to shrink the heap by releasing free memory. + const std::string ShrinkHeap = "envoy.overload_actions.shrink_heap"; }; typedef ConstSingleton OverloadActionNames; @@ -93,8 +96,9 @@ class OverloadManager { * @param dispatcher Event::Dispatcher& the dispatcher on which callbacks will be posted * @param callback OverloadActionCb the callback to post when the overload action * changes state + * @returns true if action was registered and false if no such action has been configured */ - virtual void registerForAction(const std::string& action, Event::Dispatcher& dispatcher, + virtual bool registerForAction(const std::string& action, Event::Dispatcher& dispatcher, OverloadActionCb callback) PURE; /** diff --git a/include/envoy/server/transport_socket_config.h b/include/envoy/server/transport_socket_config.h index 15236bd5700ec..939846e08b23a 100644 --- a/include/envoy/server/transport_socket_config.h +++ b/include/envoy/server/transport_socket_config.h @@ -3,7 +3,7 @@ #include #include "envoy/event/dispatcher.h" -#include "envoy/init/init.h" +#include "envoy/init/manager.h" #include "envoy/local_info/local_info.h" #include "envoy/network/transport_socket.h" #include "envoy/runtime/runtime.h" diff --git a/include/envoy/server/worker.h b/include/envoy/server/worker.h index cb845b318158c..255209531f577 100644 --- a/include/envoy/server/worker.h +++ b/include/envoy/server/worker.h @@ -43,6 +43,14 @@ class Worker { */ virtual void start(GuardDog& guard_dog) PURE; + /** + * Initialize stats for this worker's dispatcher, if available. The worker will output + * thread-specific stats under the given scope. + * @param scope the scope to contain the new per-dispatcher stats created here. + * @param prefix the stats prefix to identify this dispatcher. + */ + virtual void initializeStats(Stats::Scope& scope, const std::string& prefix) PURE; + /** * Stop the worker thread. */ diff --git a/include/envoy/ssl/BUILD b/include/envoy/ssl/BUILD index f1c2f45d79b5c..d5537579a0427 100644 --- a/include/envoy/ssl/BUILD +++ b/include/envoy/ssl/BUILD @@ -11,6 +11,10 @@ envoy_package() envoy_cc_library( name = "connection_interface", hdrs = ["connection.h"], + external_deps = ["abseil_optional"], + deps = [ + "//include/envoy/common:time_interface", + ], ) envoy_cc_library( diff --git a/include/envoy/ssl/connection.h b/include/envoy/ssl/connection.h index b0e4a86e6ad0f..fb598f560aa12 100644 --- a/include/envoy/ssl/connection.h +++ b/include/envoy/ssl/connection.h @@ -4,6 +4,9 @@ #include #include "envoy/common/pure.h" +#include "envoy/common/time.h" + +#include "absl/types/optional.h" namespace Envoy { namespace Ssl { @@ -11,9 +14,9 @@ namespace Ssl { /** * Base connection interface for all SSL connections. */ -class Connection { +class ConnectionInfo { public: - virtual ~Connection() {} + virtual ~ConnectionInfo() {} /** * @return bool whether the peer certificate is presented. @@ -21,10 +24,10 @@ class Connection { virtual bool peerCertificatePresented() const PURE; /** - * @return std::string the URI in the SAN field of the local certificate. Returns "" if there is + * @return std::string the URIs in the SAN field of the local certificate. Returns {} if there is * no local certificate, or no SAN field, or no URI. **/ - virtual std::string uriSanLocalCertificate() const PURE; + virtual std::vector uriSanLocalCertificate() const PURE; /** * @return std::string the subject field of the local certificate in RFC 2253 format. Returns "" @@ -51,10 +54,10 @@ class Connection { virtual std::string subjectPeerCertificate() const PURE; /** - * @return std::string the URI in the SAN field of the peer certificate. Returns "" if there is no - * peer certificate, or no SAN field, or no URI. + * @return std::string the URIs in the SAN field of the peer certificate. Returns {} if there is + *no peer certificate, or no SAN field, or no URI. **/ - virtual std::string uriSanPeerCertificate() const PURE; + virtual std::vector uriSanPeerCertificate() const PURE; /** * @return std::string the URL-encoded PEM-encoded representation of the peer certificate. Returns @@ -62,6 +65,13 @@ class Connection { **/ virtual const std::string& urlEncodedPemEncodedPeerCertificate() const PURE; + /** + * @return std::string the URL-encoded PEM-encoded representation of the full peer certificate + * chain including the leaf certificate. Returns "" if there is no peer certificate or + * encoding fails. + **/ + virtual const std::string& urlEncodedPemEncodedPeerCertificateChain() const PURE; + /** * @return std::vector the DNS entries in the SAN field of the peer certificate. * Returns {} if there is no peer certificate, or no SAN field, or no DNS. @@ -73,6 +83,18 @@ class Connection { * Returns {} if there is no local certificate, or no SAN field, or no DNS. **/ virtual std::vector dnsSansLocalCertificate() const PURE; + + /** + * @return absl::optional the time that the peer certificate was issued and should be + * considered valid from. Returns empty absl::optional if there is no peer certificate. + **/ + virtual absl::optional validFromPeerCertificate() const PURE; + + /** + * @return absl::optional the time that the peer certificate expires and should not be + * considered valid after. Returns empty absl::optional if there is no peer certificate. + **/ + virtual absl::optional expirationPeerCertificate() const PURE; }; } // namespace Ssl diff --git a/include/envoy/stats/BUILD b/include/envoy/stats/BUILD index 9f1e513135b60..420ad3883f52a 100644 --- a/include/envoy/stats/BUILD +++ b/include/envoy/stats/BUILD @@ -19,18 +19,23 @@ envoy_cc_library( "stat_data_allocator.h", "stats.h", "stats_matcher.h", - "stats_options.h", "store.h", "tag.h", "tag_extractor.h", "tag_producer.h", ], - deps = ["//include/envoy/common:interval_set_interface"], + deps = [ + ":symbol_table_interface", + "//include/envoy/common:interval_set_interface", + ], ) envoy_cc_library( name = "symbol_table_interface", hdrs = ["symbol_table.h"], + deps = [ + "//source/common/common:hash_lib", + ], ) envoy_cc_library( diff --git a/include/envoy/stats/histogram.h b/include/envoy/stats/histogram.h index bebba3ff976e5..0cd2d6a474f33 100644 --- a/include/envoy/stats/histogram.h +++ b/include/envoy/stats/histogram.h @@ -56,7 +56,7 @@ class HistogramStatistics { * of the number of samples in the histogram, it is not guaranteed that this will be * 100% the number of samples observed. */ - virtual double sampleCount() const PURE; + virtual uint64_t sampleCount() const PURE; /** * Returns sum of all values during the period. diff --git a/include/envoy/stats/scope.h b/include/envoy/stats/scope.h index ef913edea6e02..a593827c7c54d 100644 --- a/include/envoy/stats/scope.h +++ b/include/envoy/stats/scope.h @@ -2,11 +2,10 @@ #include #include -#include #include "envoy/common/pure.h" #include "envoy/stats/histogram.h" -#include "envoy/stats/stats_options.h" +#include "envoy/stats/symbol_table.h" namespace Envoy { namespace Stats { @@ -15,7 +14,7 @@ class Counter; class Gauge; class Histogram; class Scope; -class StatsOptions; +class NullGaugeImpl; typedef std::unique_ptr ScopePtr; typedef std::shared_ptr ScopeSharedPtr; @@ -42,25 +41,54 @@ class Scope { virtual void deliverHistogramToSinks(const Histogram& histogram, uint64_t value) PURE; /** + * @param name The name of the stat, obtained from the SymbolTable. + * @return a counter within the scope's namespace. + */ + virtual Counter& counterFromStatName(StatName name) PURE; + + /** + * TODO(#6667): this variant is deprecated: use counterFromStatName. + * @param name The name, expressed as a string. * @return a counter within the scope's namespace. */ virtual Counter& counter(const std::string& name) PURE; /** + * @param name The name of the stat, obtained from the SymbolTable. + * @return a gauge within the scope's namespace. + */ + virtual Gauge& gaugeFromStatName(StatName name) PURE; + + /** + * TODO(#6667): this variant is deprecated: use gaugeFromStatName. + * @param name The name, expressed as a string. * @return a gauge within the scope's namespace. */ virtual Gauge& gauge(const std::string& name) PURE; /** + * @return a null gauge within the scope's namespace. + */ + virtual NullGaugeImpl& nullGauge(const std::string& name) PURE; + + /** + * @param name The name of the stat, obtained from the SymbolTable. + * @return a histogram within the scope's namespace with a particular value type. + */ + virtual Histogram& histogramFromStatName(StatName name) PURE; + + /** + * TODO(#6667): this variant is deprecated: use histogramFromStatName. + * @param name The name, expressed as a string. * @return a histogram within the scope's namespace with a particular value type. */ virtual Histogram& histogram(const std::string& name) PURE; /** - * @return a reference to the top-level StatsOptions struct, containing information about the - * maximum allowable object name length and stat suffix length. + * @return a reference to the symbol table. */ - virtual const Stats::StatsOptions& statsOptions() const PURE; + virtual const SymbolTable& symbolTable() const PURE; + virtual SymbolTable& symbolTable() PURE; }; } // namespace Stats diff --git a/include/envoy/stats/stat_data_allocator.h b/include/envoy/stats/stat_data_allocator.h index f0ea93e266d04..8d3ec57409150 100644 --- a/include/envoy/stats/stat_data_allocator.h +++ b/include/envoy/stats/stat_data_allocator.h @@ -10,6 +10,7 @@ #include "envoy/common/pure.h" #include "envoy/stats/stats.h" +#include "envoy/stats/symbol_table.h" #include "envoy/stats/tag.h" #include "absl/strings/string_view.h" @@ -22,6 +23,7 @@ namespace Stats { * be created utilizing a single fixed-size block suitable for * shared-memory, or in the heap, allowing for pointers and sharing of * substrings, with an opportunity for reduced memory consumption. + * TODO(fredlas) this interface can be deleted now that the shared memory version is gone. */ class StatDataAllocator { public: @@ -34,8 +36,8 @@ class StatDataAllocator { * @return CounterSharedPtr a counter, or nullptr if allocation failed, in which case * tag_extracted_name and tags are not moved. */ - virtual CounterSharedPtr makeCounter(absl::string_view name, std::string&& tag_extracted_name, - std::vector&& tags) PURE; + virtual CounterSharedPtr makeCounter(StatName name, absl::string_view tag_extracted_name, + const std::vector& tags) PURE; /** * @param name the full name of the stat. @@ -44,13 +46,11 @@ class StatDataAllocator { * @return GaugeSharedPtr a gauge, or nullptr if allocation failed, in which case * tag_extracted_name and tags are not moved. */ - virtual GaugeSharedPtr makeGauge(absl::string_view name, std::string&& tag_extracted_name, - std::vector&& tags) PURE; + virtual GaugeSharedPtr makeGauge(StatName name, absl::string_view tag_extracted_name, + const std::vector& tags) PURE; - /** - * Determines whether this stats allocator requires bounded stat-name size. - */ - virtual bool requiresBoundedStatNameSize() const PURE; + virtual const SymbolTable& symbolTable() const PURE; + virtual SymbolTable& symbolTable() PURE; // TODO(jmarantz): create a parallel mechanism to instantiate histograms. At // the moment, histograms don't fit the same pattern of counters and gauges diff --git a/include/envoy/stats/stats.h b/include/envoy/stats/stats.h index 6d191bf24f9fb..5405a37eb7925 100644 --- a/include/envoy/stats/stats.h +++ b/include/envoy/stats/stats.h @@ -6,12 +6,15 @@ #include #include "envoy/common/pure.h" +#include "envoy/stats/symbol_table.h" #include "absl/strings/string_view.h" +#include "absl/types/optional.h" namespace Envoy { namespace Stats { +class StatDataAllocator; struct Tag; /** @@ -32,32 +35,77 @@ class Metric { virtual std::string name() const PURE; /** - * Returns the full name of the Metric as a nul-terminated string. The - * intention is use this as a hash-map key, so that the stat name storage - * is not duplicated in every map. You cannot use name() above for this, - * as it returns a std::string by value, as not all stat implementations - * contain the name as a std::string. - * - * Note that in the future, the plan is to replace this method with one that - * returns a reference to a symbolized representation of the elaborated string - * (see source/common/stats/symbol_table_impl.h). + * Returns the full name of the Metric as an encoded array of symbols. */ - virtual const char* nameCStr() const PURE; + virtual StatName statName() const PURE; /** * Returns a vector of configurable tags to identify this Metric. */ - virtual const std::vector& tags() const PURE; + virtual std::vector tags() const PURE; + + /** + * Returns the name of the Metric with the portions designated as tags removed + * as a string. For example, The stat name "vhost.foo.vcluster.bar.c1" would + * have "foo" extracted as the value of tag "vhost" and "bar" extracted as the + * value of tag "vcluster". Thus the tagExtractedName is simply + * "vhost.vcluster.c1". + * + * @return The stat name with all tag values extracted. + */ + virtual std::string tagExtractedName() const PURE; + + /** + * Returns the name of the Metric with the portions designated as tags + * removed as a StatName + */ + virtual StatName tagExtractedStatName() const PURE; + + // Function to be called from iterateTagStatNames passing name and value as StatNames. + using TagStatNameIterFn = std::function; + + /** + * Iterates over all tags, calling a functor for each name/value pair. The + * functor can return 'true' to continue or 'false' to stop the + * iteration. + * + * @param fn The functor to call for StatName pair. + */ + virtual void iterateTagStatNames(const TagStatNameIterFn& fn) const PURE; + + // Function to be called from iterateTags passing name and value as const Tag&. + using TagIterFn = std::function; /** - * Returns the name of the Metric with the portions designated as tags removed. + * Iterates over all tags, calling a functor for each one. The + * functor can return 'true' to continue or 'false' to stop the + * iteration. + * + * @param fn The functor to call for each Tag. */ - virtual const std::string& tagExtractedName() const PURE; + virtual void iterateTags(const TagIterFn& fn) const PURE; /** * Indicates whether this metric has been updated since the server was started. */ virtual bool used() const PURE; + + /** + * Flags: + * Used: used by all stats types to figure out whether they have been used. + * Logic...: used by gauges to cache how they should be combined with a parent's value. + */ + struct Flags { + static const uint8_t Used = 0x01; + // TODO(fredlas) these logic flags should be removed if we move to indicating combine logic in + // the stat declaration macros themselves. (Now that stats no longer use shared memory, it's + // safe to mess with what these flag bits mean whenever we want). + static const uint8_t LogicAccumulate = 0x02; + static const uint8_t LogicNeverImport = 0x04; + static const uint8_t LogicCached = LogicAccumulate | LogicNeverImport; + }; + virtual SymbolTable& symbolTable() PURE; + virtual const SymbolTable& symbolTable() const PURE; }; /** @@ -90,6 +138,16 @@ class Gauge : public virtual Metric { virtual void set(uint64_t value) PURE; virtual void sub(uint64_t amount) PURE; virtual uint64_t value() const PURE; + + /** + * Returns the stat's combine logic, if known. + */ + virtual absl::optional cachedShouldImport() const PURE; + + /** + * Sets the value to be returned by cachedCombineLogic(). + */ + virtual void setShouldImport(bool should_import) PURE; }; typedef std::shared_ptr GaugeSharedPtr; diff --git a/include/envoy/stats/stats_macros.h b/include/envoy/stats/stats_macros.h index eb1c89557c664..66a39eac171fb 100644 --- a/include/envoy/stats/stats_macros.h +++ b/include/envoy/stats/stats_macros.h @@ -40,4 +40,8 @@ namespace Envoy { #define POOL_COUNTER(POOL) POOL_COUNTER_PREFIX(POOL, "") #define POOL_GAUGE(POOL) POOL_GAUGE_PREFIX(POOL, "") #define POOL_HISTOGRAM(POOL) POOL_HISTOGRAM_PREFIX(POOL, "") + +#define NULL_STAT_DECL_(X) std::string(#X)), + +#define NULL_POOL_GAUGE(POOL) (POOL).nullGauge(NULL_STAT_DECL_ } // namespace Envoy diff --git a/include/envoy/stats/stats_options.h b/include/envoy/stats/stats_options.h deleted file mode 100644 index 4f0b3f64f3616..0000000000000 --- a/include/envoy/stats/stats_options.h +++ /dev/null @@ -1,50 +0,0 @@ -#pragma once - -#include - -#include "envoy/common/pure.h" - -namespace Envoy { -namespace Stats { - -/** - * Struct stored under Server::Options to hold information about the maximum - * object name length and maximum stat suffix length of a stat. These have - * defaults in StatsOptionsImpl, and the maximum object name length can be - * overridden. The default initialization is used in IsolatedStatImpl, and the - * user-overridden struct is stored in Options. - * - * As noted in the comment above StatsOptionsImpl in - * source/common/stats/stats_options_impl.h, a stat name often contains both a - * string whose length is user-defined (cluster_name in the below example), and - * a specific statistic name generated by Envoy. To make room for growth on both - * fronts, we limit the max allowed length of each separately. - * - * name / stat name - * |----------------------------------------------------------------| - * cluster..outlier_detection.ejections_consecutive_5xx - * |--------------------------------------| |-----------------------| - * object name suffix - */ -class StatsOptions { -public: - virtual ~StatsOptions() {} - - /** - * The max allowed length of a complete stat name, including suffix. - */ - virtual size_t maxNameLength() const PURE; - - /** - * The max allowed length of the object part of a stat name. - */ - virtual size_t maxObjNameLength() const PURE; - - /** - * The max allowed length of a stat suffix. - */ - virtual size_t maxStatSuffixLength() const PURE; -}; - -} // namespace Stats -} // namespace Envoy diff --git a/include/envoy/stats/symbol_table.h b/include/envoy/stats/symbol_table.h index b0efd1cbd75d1..4b4c8a4c4fe9e 100644 --- a/include/envoy/stats/symbol_table.h +++ b/include/envoy/stats/symbol_table.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -19,15 +20,7 @@ namespace Stats { */ class StatName; -/** - * Intermediate representation for a stat-name. This helps store multiple names - * in a single packed allocation. First we encode each desired name, then sum - * their sizes for the single packed allocation. This is used to store - * MetricImpl's tags and tagExtractedName. Like StatName, we don't want to pay - * a vptr overhead per object, and the representation is shared between the - * SymbolTable implementations, so this is just a pre-declare. - */ -class SymbolEncoding; +class StatNameList; /** * SymbolTable manages a namespace optimized for stat names, exploiting their @@ -59,22 +52,6 @@ class SymbolTable { virtual ~SymbolTable() = default; - /** - * Encodes a stat name using the symbol table, returning a SymbolEncoding. The - * SymbolEncoding is not intended for long-term storage, but is used to help - * allocate a StatName with the correct amount of storage. - * - * When a name is encoded, it bumps reference counts held in the table for - * each symbol. The caller is responsible for creating a StatName using this - * SymbolEncoding and ultimately disposing of it by calling - * SymbolTable::free(). Users are protected from leaking symbols into the pool - * by ASSERTions in the SymbolTable destructor. - * - * @param name The name to encode. - * @return SymbolEncoding the encoded symbols. - */ - virtual SymbolEncoding encode(absl::string_view name) PURE; - /** * @return uint64_t the number of symbols in the symbol table. */ @@ -116,9 +93,9 @@ class SymbolTable { * decode/encode into the elaborated form, and does not require locking the * SymbolTable. * - * The caveat is that this representation does not bump reference counts on - * the referenced Symbols in the SymbolTable, so it's only valid as long for - * the lifetime of the joined StatNames. + * Note that this method does not bump reference counts on the referenced + * Symbols in the SymbolTable, so it's only valid as long for the lifetime of + * the joined StatNames. * * This is intended for use doing cached name lookups of scoped stats, where * the scope prefix and the names to combine it with are already in StatName @@ -130,14 +107,50 @@ class SymbolTable { */ virtual StoragePtr join(const std::vector& stat_names) const PURE; + /** + * Populates a StatNameList from a list of encodings. This is not done at + * construction time to enable StatNameList to be instantiated directly in + * a class that doesn't have a live SymbolTable when it is constructed. + * + * @param names A pointer to the first name in an array, allocated by the caller. + * @param num_names The number of names. + * @param symbol_table The symbol table in which to encode the names. + */ + virtual void populateList(const absl::string_view* names, uint32_t num_names, + StatNameList& list) PURE; + #ifndef ENVOY_CONFIG_COVERAGE virtual void debugPrint() const PURE; #endif + /** + * Calls the provided function with a string-view representation of the + * elaborated name. This is useful during the interim period when we + * are using FakeSymbolTableImpl, to avoid an extra allocation. Once + * we migrate to using SymbolTableImpl, this interface will no longer + * be helpful and can be removed. The reason it's useful now is that + * it makes up, in part, for some extra runtime overhead that is spent + * on the SymbolTable abstraction and API, without getting full benefit + * from the improved representation. + * + * TODO(#6307): Remove this when the transition from FakeSymbolTableImpl to + * SymbolTableImpl is complete. + * + * @param stat_name The stat name. + * @param fn The function to call with the elaborated stat name as a string_view. + */ + virtual void callWithStringView(StatName stat_name, + const std::function& fn) const PURE; + private: + friend struct HeapStatData; friend class StatNameStorage; friend class StatNameList; + // The following methods are private, but are called by friend classes + // StatNameStorage and StatNameList, which must be friendly with SymbolTable + // in order to manage the reference-counted symbols they own. + /** * Since SymbolTable does manual reference counting, a client of SymbolTable * must manually call free(symbol_vec) when it is freeing the backing store @@ -158,6 +171,17 @@ class SymbolTable { * @param stat_name the stat name. */ virtual void incRefCount(const StatName& stat_name) PURE; + + /** + * Encodes 'name' into the symbol table. Bumps reference counts for referenced + * symbols. The caller must manage the storage, and is responsible for calling + * SymbolTable::free() to release the reference counts. + * + * @param name The name to encode. + * @return The encoded name, transferring ownership to the caller. + * + */ + virtual StoragePtr encode(absl::string_view name) PURE; }; using SharedSymbolTable = std::shared_ptr; diff --git a/include/envoy/stats/tag_extractor.h b/include/envoy/stats/tag_extractor.h index 270b782386333..361cda3e22660 100644 --- a/include/envoy/stats/tag_extractor.h +++ b/include/envoy/stats/tag_extractor.h @@ -40,7 +40,7 @@ class TagExtractor { * @param remove_characters set of intervals of character-indices to be removed from name. * @return bool indicates whether a tag was found in the name. */ - virtual bool extractTag(const std::string& stat_name, std::vector& tags, + virtual bool extractTag(absl::string_view stat_name, std::vector& tags, IntervalSet& remove_characters) const PURE; /** diff --git a/include/envoy/stats/tag_producer.h b/include/envoy/stats/tag_producer.h index 3e7986cd69d5e..5ec72877981bc 100644 --- a/include/envoy/stats/tag_producer.h +++ b/include/envoy/stats/tag_producer.h @@ -7,6 +7,8 @@ #include "envoy/common/pure.h" #include "envoy/stats/tag.h" +#include "absl/strings/string_view.h" + namespace Envoy { namespace Stats { @@ -16,12 +18,18 @@ class TagProducer { /** * Take a metric name and a vector then add proper tags into the vector and - * return an extracted metric name. + * return an extracted metric name. The tags array will be populated with + * name/value pairs extracted from the full metric name, using the regular + * expressions in source/common/config/well_known_names.cc. For example, the + * stat name "vhost.foo.vcluster.bar.c1" would have "foo" extracted as the + * value of tag "vhost" and "bar" extracted as the value of tag + * "vcluster", so this will populate tags with {"vhost", "foo"} and + * {"vcluster", "bar"}, and return "vhost.vcluster.c1". + * * @param metric_name std::string a name of Stats::Metric (Counter, Gauge, Histogram). * @param tags std::vector a set of Stats::Tag. */ - virtual std::string produceTags(const std::string& metric_name, - std::vector& tags) const PURE; + virtual std::string produceTags(absl::string_view metric_name, std::vector& tags) const PURE; }; typedef std::unique_ptr TagProducerPtr; diff --git a/include/envoy/stats/timespan.h b/include/envoy/stats/timespan.h index a204ab8d06935..20910847db280 100644 --- a/include/envoy/stats/timespan.h +++ b/include/envoy/stats/timespan.h @@ -11,13 +11,27 @@ namespace Envoy { namespace Stats { /** - * An individual timespan that flushes its measured value (in milliseconds) to a histogram. The - * initial time is captured on construction. A timespan must be completed via complete() for it to - * be stored. If the timespan is deleted this will be treated as a cancellation. + * An abstraction of timespan which can be completed. */ -class Timespan { +class CompletableTimespan { public: - Timespan(Histogram& histogram, TimeSource& time_source) + virtual ~CompletableTimespan() {} + + /** + * Complete the timespan. + */ + virtual void complete() PURE; +}; + +/** + * An individual timespan that flushes its measured value in time unit (e.g + * std::chrono::milliseconds). The initial time is captured on construction. A timespan must be + * completed via complete() for it to be stored. If the timespan is deleted this will be treated as + * a cancellation. + */ +template class TimespanWithUnit : public CompletableTimespan { +public: + TimespanWithUnit(Histogram& histogram, TimeSource& time_source) : time_source_(time_source), histogram_(histogram), start_(time_source.monotonicTime()) {} /** @@ -26,11 +40,10 @@ class Timespan { void complete() { histogram_.recordValue(getRawDuration().count()); } /** - * Get duration since the creation of the span. + * Get duration in the time unit since the creation of the span. */ - std::chrono::milliseconds getRawDuration() { - return std::chrono::duration_cast(time_source_.monotonicTime() - - start_); + TimeUnit getRawDuration() { + return std::chrono::duration_cast(time_source_.monotonicTime() - start_); } private: @@ -39,7 +52,9 @@ class Timespan { const MonotonicTime start_; }; +typedef TimespanWithUnit Timespan; typedef std::unique_ptr TimespanPtr; +typedef std::unique_ptr CompletableTimespanPtr; } // namespace Stats } // namespace Envoy diff --git a/include/envoy/stream_info/BUILD b/include/envoy/stream_info/BUILD index 8ec784d22d93c..68a28a8ac6c61 100644 --- a/include/envoy/stream_info/BUILD +++ b/include/envoy/stream_info/BUILD @@ -16,8 +16,11 @@ envoy_cc_library( ":filter_state_interface", "//include/envoy/common:time_interface", "//include/envoy/http:protocol_interface", + "//include/envoy/ssl:connection_interface", "//include/envoy/upstream:host_description_interface", + "//source/common/common:assert_lib", "//source/common/protobuf", + "//source/common/singleton:const_singleton", ], ) diff --git a/include/envoy/stream_info/stream_info.h b/include/envoy/stream_info/stream_info.h index 14c01cc4486c8..6be5724e3ecbe 100644 --- a/include/envoy/stream_info/stream_info.h +++ b/include/envoy/stream_info/stream_info.h @@ -8,10 +8,13 @@ #include "envoy/common/pure.h" #include "envoy/common/time.h" #include "envoy/http/protocol.h" +#include "envoy/ssl/connection.h" #include "envoy/stream_info/filter_state.h" #include "envoy/upstream/host_description.h" +#include "common/common/assert.h" #include "common/protobuf/protobuf.h" +#include "common/singleton/const_singleton.h" #include "absl/types/optional.h" @@ -56,8 +59,116 @@ enum ResponseFlag { DownstreamConnectionTermination = 0x4000, // Exceeded upstream retry limit. UpstreamRetryLimitExceeded = 0x8000, + // Request hit the stream idle timeout, triggering a 408. + StreamIdleTimeout = 0x10000, // ATTENTION: MAKE SURE THIS REMAINS EQUAL TO THE LAST FLAG. - LastFlag = UpstreamRetryLimitExceeded + LastFlag = StreamIdleTimeout +}; + +/** + * Constants for the response code details field of StreamInfo for details sent + * by core (non-extension) code. + * + * These provide details about the stream state such as whether the + * response is from the upstream or from envoy (in case of a local reply). + * Custom extensions can define additional values provided they are appropriately + * scoped to avoid collisions. + */ +struct ResponseCodeDetailValues { + // Response code was set by the upstream. + const std::string ViaUpstream = "via_upstream"; + // Envoy is doing non-streaming proxying, and the request payload exceeded + // configured limits. + const std::string RequestPayloadTooLarge = "request_payload_too_large"; + // Envoy is doing non-streaming proxying, and the response payload exceeded + // configured limits. + const std::string ResponsePayloadTooLArge = "response_payload_too_large"; + // The per-stream keepalive timeout was exceeded. + const std::string StreamIdleTimeout = "stream_idle_timeout"; + // The per-stream total request timeout was exceeded + const std::string RequestOverallTimeout = "request_overall_timeout"; + // The request was rejected due to the Overload Manager reaching configured resource limits. + const std::string Overload = "overload"; + // The HTTP/1.0 or HTTP/0.9 request was rejected due to HTTP/1.0 support not being configured. + const std::string LowVersion = "low_version"; + // The request was rejected due to the Host: or :authority field missing + const std::string MissingHost = "missing_host_header"; + // The request was rejected due to the request headers being larger than the configured limit. + const std::string RequestHeadersTooLarge = "request_headers_too_large"; + // The request was rejected due to the Path or :path header field missing. + const std::string MissingPath = "missing_path_rejected"; + // The request was rejected due to using an absolute path on a route not supporting them. + const std::string AbsolutePath = "absolute_path_rejected"; + // The request was rejected because path normalization was configured on and failed, probably due + // to an invalid path. + const std::string PathNormalizationFailed = "path_normalization_failed"; + // The request was rejected because it attempted an unsupported upgrade. + const std::string UpgradeFailed = "upgrade_failed"; + + // The request was rejected by the router filter because there was no route found. + const std::string RouteNotFound = "route_not_found"; + // A direct response was generated by the router filter. + const std::string DirectResponse = "direct_response"; + // The request was rejected by the router filter because there was no cluster found for the + // selected route. + const std::string ClusterNotFound = "cluster_not_found"; + // The request was rejected by the router filter because the cluster was in maintenance mode. + const std::string MaintenanceMode = "maintenance_mode"; + // The request was rejected by the router filter because there was no healthy upstream found. + const std::string NoHealthyUpstream = "no_healthy_upstream"; + // The upstream response timed out + const std::string UpstreamTimeout = "upstream_response_timeout"; + // The final upstream try timed out + const std::string UpstreamPerTryTimeout = "upstream_per_try_timeout"; + // The upstream connection was reset before a response was started. This + // will generally be accompanied by details about why the reset occurred. + const std::string EarlyUpstreamReset = "upstream_reset_before_response_started"; + // The upstream connection was reset after a response was started. This + // will generally be accompanied by details about why the reset occurred but + // indicates that original "success" headers may have been sent downstream + // despite the subsequent failure. + const std::string LateUpstreamReset = "upstream_reset_after_response_started"; +}; + +typedef ConstSingleton ResponseCodeDetails; + +struct UpstreamTiming { + /** + * Sets the time when the first byte of the request was sent upstream. + */ + void onFirstUpstreamTxByteSent(TimeSource& time_source) { + ASSERT(!first_upstream_tx_byte_sent_); + first_upstream_tx_byte_sent_ = time_source.monotonicTime(); + } + + /** + * Sets the time when the first byte of the response is received from upstream. + */ + void onLastUpstreamTxByteSent(TimeSource& time_source) { + ASSERT(!last_upstream_tx_byte_sent_); + last_upstream_tx_byte_sent_ = time_source.monotonicTime(); + } + + /** + * Sets the time when the last byte of the response is received from upstream. + */ + void onFirstUpstreamRxByteReceived(TimeSource& time_source) { + ASSERT(!first_upstream_rx_byte_received_); + first_upstream_rx_byte_received_ = time_source.monotonicTime(); + } + + /** + * Sets the time when the last byte of the request was sent upstream. + */ + void onLastUpstreamRxByteReceived(TimeSource& time_source) { + ASSERT(!last_upstream_rx_byte_received_); + last_upstream_rx_byte_received_ = time_source.monotonicTime(); + } + + absl::optional first_upstream_tx_byte_sent_; + absl::optional last_upstream_tx_byte_sent_; + absl::optional first_upstream_rx_byte_received_; + absl::optional last_upstream_rx_byte_received_; }; /** @@ -73,6 +184,12 @@ class StreamInfo { */ virtual void setResponseFlag(ResponseFlag response_flag) PURE; + /** + * @param rc_details the response code details string to set for this request. + * See ResponseCodeDetailValues above for well-known constants. + */ + virtual void setResponseCodeDetails(absl::string_view rc_details) PURE; + /** * @param response_flags the response_flags to intersect with. * @return true if the intersection of the response_flags argument and the currently set response @@ -110,6 +227,11 @@ class StreamInfo { */ virtual absl::optional responseCode() const PURE; + /** + * @return the response code details. + */ + virtual const absl::optional& responseCodeDetails() const PURE; + /** * @return the time that the first byte of the request was received. */ @@ -132,6 +254,13 @@ class StreamInfo { */ virtual void onLastDownstreamRxByteReceived() PURE; + /** + * Sets the upstream timing information for this stream. This is useful for + * when multiple upstream requests are issued and we want to save timing + * information for the one that "wins". + */ + virtual void setUpstreamTiming(const UpstreamTiming& upstream_timing) PURE; + /** * @return the duration between the first byte of the request was sent upstream and the start of * the request. There may be a considerable delta between lastDownstreamByteReceived and this @@ -139,44 +268,23 @@ class StreamInfo { */ virtual absl::optional firstUpstreamTxByteSent() const PURE; - /** - * Sets the time when the first byte of the request was sent upstream. - */ - virtual void onFirstUpstreamTxByteSent() PURE; - /** * @return the duration between the last byte of the request was sent upstream and the start of * the request. */ virtual absl::optional lastUpstreamTxByteSent() const PURE; - /** - * Sets the time when the last byte of the request was sent upstream. - */ - virtual void onLastUpstreamTxByteSent() PURE; - /** * @return the duration between the first byte of the response is received from upstream and the * start of the request. */ virtual absl::optional firstUpstreamRxByteReceived() const PURE; - /** - * Sets the time when the first byte of the response is received from upstream. - */ - virtual void onFirstUpstreamRxByteReceived() PURE; - /** * @return the duration between the last byte of the response is received from upstream and the * start of the request. */ virtual absl::optional lastUpstreamRxByteReceived() const PURE; - - /** - * Sets the time when the last byte of the response is received from upstream. - */ - virtual void onLastUpstreamRxByteReceived() PURE; - /** * @return the duration between the first byte of the response is sent downstream and the start of * the request. There may be a considerable delta between lastUpstreamByteReceived and this value @@ -212,11 +320,6 @@ class StreamInfo { */ virtual void onRequestComplete() PURE; - /** - * Resets all timings related to the upstream in the event of a retry. - */ - virtual void resetUpstreamTimings() PURE; - /** * @param bytes_sent denotes the number of bytes to add to total sent bytes. */ @@ -303,6 +406,17 @@ class StreamInfo { */ virtual const Network::Address::InstanceConstSharedPtr& downstreamRemoteAddress() const PURE; + /** + * @param connection_info sets the downstream ssl connection. + */ + virtual void setDownstreamSslConnection(const Ssl::ConnectionInfo* ssl_connection_info) PURE; + + /** + * @return the downstream SSL connection. This will be nullptr if the downstream + * connection does not use SSL. + */ + virtual const Ssl::ConnectionInfo* downstreamSslConnection() const PURE; + /** * @return const Router::RouteEntry* Get the route entry selected for this request. Note: this * will be nullptr if no route was selected. @@ -341,6 +455,17 @@ class StreamInfo { * @return SNI value for downstream host. */ virtual const std::string& requestedServerName() const PURE; + + /** + * @param failure_reason the upstream transport failure reason. + */ + virtual void setUpstreamTransportFailureReason(absl::string_view failure_reason) PURE; + + /** + * @return const std::string& the upstream transport failure reason, e.g. certificate validation + * failed. + */ + virtual const std::string& upstreamTransportFailureReason() const PURE; }; } // namespace StreamInfo diff --git a/include/envoy/thread/thread.h b/include/envoy/thread/thread.h index 6bde21178f849..e9078afa476ba 100644 --- a/include/envoy/thread/thread.h +++ b/include/envoy/thread/thread.h @@ -51,34 +51,6 @@ class ThreadFactory { virtual ThreadIdPtr currentThreadId() PURE; }; -/** - * A static singleton to the ThreadFactory corresponding to the build platform. - * - * The singleton must be initialized via set() early in main() with the appropriate ThreadFactory - * (see source/exe/{posix,win32}/platform_impl.h). - * - * This static singleton is an exception to Envoy's established practice for handling of singletons, - * which are typically registered with and accessed via the Envoy::Singleton::Manager. Reasons for - * the exception include drastic simplification of thread safety assertions; e.g.: - * ASSERT(ThreadFactorySingleton::get()->currentThreadId() == original_thread_id_); - */ -class ThreadFactorySingleton { -public: - /** - * Returns a reference to the platform dependent ThreadFactory. - */ - static ThreadFactory& get() { return *thread_factory_; } - - /** - * Sets the singleton to the supplied thread_factory. - * @param thread_factory the ThreadFactory instance to be pointed to by this singleton. - */ - static void set(ThreadFactory* thread_factory); - -private: - static ThreadFactory* thread_factory_; -}; - /** * Like the C++11 "basic lockable concept" but a pure virtual interface vs. a template, and * with thread annotations. diff --git a/include/envoy/tracing/http_tracer.h b/include/envoy/tracing/http_tracer.h index 74dcc86e916cc..efb81375dc0ed 100644 --- a/include/envoy/tracing/http_tracer.h +++ b/include/envoy/tracing/http_tracer.h @@ -53,6 +53,11 @@ class Config { * @return list of headers to populate tags on the active span. */ virtual const std::vector& requestHeadersForTags() const PURE; + + /** + * @return true if spans should be annotated with more detailed information. + */ + virtual bool verbose() const PURE; }; class Span; @@ -69,14 +74,21 @@ class Span { * Set the operation name. * @param operation the operation name */ - virtual void setOperation(const std::string& operation) PURE; + virtual void setOperation(absl::string_view operation) PURE; /** * Attach metadata to a Span, to be handled in an implementation-dependent fashion. * @param name the name of the tag * @param value the value to associate with the tag */ - virtual void setTag(const std::string& name, const std::string& value) PURE; + virtual void setTag(absl::string_view name, absl::string_view value) PURE; + + /** + * Record an event associated with a span, to be handled in an implementation-dependent fashion. + * @param timestamp the time of the event. + * @param event the name of the event. + */ + virtual void log(SystemTime timestamp, const std::string& event) PURE; /** * Capture the final duration for this Span and carry out any work necessary to complete it. diff --git a/include/envoy/upstream/BUILD b/include/envoy/upstream/BUILD index bc4b714035d5c..e20c85e1d24ec 100644 --- a/include/envoy/upstream/BUILD +++ b/include/envoy/upstream/BUILD @@ -145,5 +145,28 @@ envoy_cc_library( "//include/envoy/runtime:runtime_interface", "//include/envoy/ssl:context_interface", "//include/envoy/ssl:context_manager_interface", + "//include/envoy/upstream:types_interface", + ], +) + +envoy_cc_library( + name = "cluster_factory_interface", + hdrs = ["cluster_factory.h"], + deps = [ + ":cluster_manager_interface", + ":health_check_host_monitor_interface", + ":load_balancer_type_interface", + ":locality_lib", + ":resource_manager_interface", + ":upstream_interface", + "//include/envoy/common:callback", + "//include/envoy/config:typed_metadata_interface", + "//include/envoy/http:codec_interface", + "//include/envoy/network:connection_interface", + "//include/envoy/network:transport_socket_interface", + "//include/envoy/runtime:runtime_interface", + "//include/envoy/ssl:context_interface", + "//include/envoy/ssl:context_manager_interface", + "@envoy_api//envoy/api/v2:cds_cc", ], ) diff --git a/include/envoy/upstream/cluster_factory.h b/include/envoy/upstream/cluster_factory.h new file mode 100644 index 0000000000000..86ff93c2cb810 --- /dev/null +++ b/include/envoy/upstream/cluster_factory.h @@ -0,0 +1,143 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "envoy/access_log/access_log.h" +#include "envoy/api/api.h" +#include "envoy/api/v2/core/base.pb.h" +#include "envoy/event/dispatcher.h" +#include "envoy/local_info/local_info.h" +#include "envoy/network/dns.h" +#include "envoy/runtime/runtime.h" +#include "envoy/server/admin.h" +#include "envoy/singleton/manager.h" +#include "envoy/ssl/context.h" +#include "envoy/ssl/context_manager.h" +#include "envoy/stats/stats.h" +#include "envoy/stats/store.h" +#include "envoy/thread_local/thread_local.h" +#include "envoy/upstream/cluster_manager.h" +#include "envoy/upstream/outlier_detection.h" + +namespace Envoy { +namespace Upstream { + +/** + * Context passed to cluster factory to access envoy resources. Cluster factory should only access + * the rest of the server through this context object. + */ +class ClusterFactoryContext { +public: + virtual ~ClusterFactoryContext() = default; + + /** + * @return bool flag indicating whether the cluster is added via api. + */ + virtual bool addedViaApi() PURE; + + /** + * @return Server::Admin& the server's admin interface. + */ + virtual Server::Admin& admin() PURE; + + /** + * @return Api::Api& a reference to the api object. + */ + virtual Api::Api& api() PURE; + + /** + * @return Upstream::ClusterManager& singleton for use by the entire server. + */ + virtual ClusterManager& clusterManager() PURE; + + /** + * @return Event::Dispatcher& the main thread's dispatcher. This dispatcher should be used + * for all singleton processing. + */ + virtual Event::Dispatcher& dispatcher() PURE; + + /** + * @return Network::DnsResolverSharedPtr the dns resolver for the server. + */ + virtual Network::DnsResolverSharedPtr dnsResolver() PURE; + + /** + * @return information about the local environment the server is running in. + */ + virtual const LocalInfo::LocalInfo& localInfo() PURE; + + /** + * @return AccessLogManager for use by the entire server. + */ + virtual AccessLog::AccessLogManager& logManager() PURE; + + /** + * @return RandomGenerator& the random generator for the server. + */ + virtual Runtime::RandomGenerator& random() PURE; + + /** + * @return Runtime::Loader& the singleton runtime loader for the server. + */ + virtual Runtime::Loader& runtime() PURE; + + /** + * @return Singleton::Manager& the server-wide singleton manager. + */ + virtual Singleton::Manager& singletonManager() PURE; + + /** + * @return Ssl::ContextManager& the SSL context manager. + */ + virtual Ssl::ContextManager& sslContextManager() PURE; + + /** + * TODO(hyang): Remove this and only expose the scope, this would require refactoring + * TransportSocketFactoryContext + * @return the server-wide stats store. + */ + virtual Stats::Store& stats() PURE; + + /** + * @return the server's TLS slot allocator. + */ + virtual ThreadLocal::SlotAllocator& tls() PURE; + + /** + * @return Outlier::EventLoggerSharedPtr sink for outlier detection event logs. + */ + virtual Outlier::EventLoggerSharedPtr outlierEventLogger() PURE; +}; + +/** + * Implemented by cluster and registered via Registry::registerFactory() or the convenience class + * RegisterFactory. + */ +class ClusterFactory { +public: + virtual ~ClusterFactory() = default; + + /** + * Create a new instance of cluster. If the implementation is unable to produce a cluster instance + * with the provided parameters, it should throw an EnvoyException in the case of general error. + * @param cluster supplies the general protobuf configuration for the cluster. + * @param context supplies the cluster's context. + * @return ClusterSharedPtr the cluster instance. + */ + virtual ClusterSharedPtr create(const envoy::api::v2::Cluster& cluster, + ClusterFactoryContext& context) PURE; + + /** + * @return std::string the identifying name for a particular implementation of a cluster factory. + */ + virtual std::string name() PURE; +}; + +} // namespace Upstream +} // namespace Envoy diff --git a/include/envoy/upstream/cluster_manager.h b/include/envoy/upstream/cluster_manager.h index f5cc750738cf6..20a69617bfdf2 100644 --- a/include/envoy/upstream/cluster_manager.h +++ b/include/envoy/upstream/cluster_manager.h @@ -75,6 +75,25 @@ class ClusterManager { public: virtual ~ClusterManager() {} + /** + * Warming state a cluster is currently in. Used as an argument for the ClusterWarmingCallback. + */ + enum class ClusterWarmingState { + // Sent after cluster warming has finished. + Finished = 0, + // Sent just before cluster warming is about to start. + Starting = 1, + }; + + /** + * Called by the ClusterManager when cluster's warming state changes + * + * @param cluster_name name of the cluster. + * @param warming_state state the cluster transitioned to. + */ + typedef std::function + ClusterWarmingCallback; + /** * Add or update a cluster via API. The semantics of this API are: * 1) The hash of the config is used to determine if an already existing cluster has changed. @@ -86,7 +105,8 @@ class ClusterManager { * @return true if the action results in an add/update of a cluster. */ virtual bool addOrUpdateCluster(const envoy::api::v2::Cluster& cluster, - const std::string& version_info) PURE; + const std::string& version_info, + ClusterWarmingCallback cluster_warming_cb) PURE; /** * Set a callback that will be invoked when all owned clusters have been initialized. @@ -214,6 +234,8 @@ class ClusterManager { addThreadLocalClusterUpdateCallbacks(ClusterUpdateCallbacks& callbacks) PURE; virtual ClusterManagerFactory& clusterManagerFactory() PURE; + + virtual std::size_t warmingClusterCount() const PURE; }; typedef std::unique_ptr ClusterManagerPtr; diff --git a/include/envoy/upstream/health_checker.h b/include/envoy/upstream/health_checker.h index a625bd5a1bd75..9d8041235366d 100644 --- a/include/envoy/upstream/health_checker.h +++ b/include/envoy/upstream/health_checker.h @@ -40,7 +40,8 @@ class HealthChecker { * @param changed_state supplies whether the health check resulted in a host moving from healthy * to not healthy or vice versa. */ - typedef std::function HostStatusCb; + typedef std::function + HostStatusCb; /** * Install a callback that will be invoked every time a health check round is completed for diff --git a/include/envoy/upstream/load_balancer.h b/include/envoy/upstream/load_balancer.h index 6906990074e73..a62d2ac3e309a 100644 --- a/include/envoy/upstream/load_balancer.h +++ b/include/envoy/upstream/load_balancer.h @@ -71,6 +71,11 @@ class LoadBalancerContext { * ignored. */ virtual uint32_t hostSelectionRetryCount() const PURE; + + /** + * Returns the set of socket options which should be applied on upstream connections + */ + virtual Network::Socket::OptionsSharedPtr upstreamSocketOptions() const PURE; }; /** diff --git a/include/envoy/upstream/load_balancer_type.h b/include/envoy/upstream/load_balancer_type.h index 5d8c02afe6926..b7bb058c61f84 100644 --- a/include/envoy/upstream/load_balancer_type.h +++ b/include/envoy/upstream/load_balancer_type.h @@ -58,6 +58,12 @@ class LoadBalancerSubsetInfo { * fraction of hosts removed from the original host set. */ virtual bool scaleLocalityWeight() const PURE; + + /* + * @return bool whether to attempt to select a host from the entire cluster if host + * selection from the fallback subset fails. + */ + virtual bool panicModeAny() const PURE; }; } // namespace Upstream diff --git a/include/envoy/upstream/locality.h b/include/envoy/upstream/locality.h index 95c30cc121044..f4ff0948cfbd7 100644 --- a/include/envoy/upstream/locality.h +++ b/include/envoy/upstream/locality.h @@ -24,8 +24,7 @@ struct LocalityEqualTo { struct LocalityLess { bool operator()(const envoy::api::v2::core::Locality& lhs, const envoy::api::v2::core::Locality& rhs) const { - using LocalityTuple = std::tuple; + using LocalityTuple = std::tuple; const LocalityTuple lhs_tuple = LocalityTuple(lhs.region(), lhs.zone(), lhs.sub_zone()); const LocalityTuple rhs_tuple = LocalityTuple(rhs.region(), rhs.zone(), rhs.sub_zone()); return lhs_tuple < rhs_tuple; diff --git a/include/envoy/upstream/outlier_detection.h b/include/envoy/upstream/outlier_detection.h index d9b1c9b4c8786..d5ee2fbcd938c 100644 --- a/include/envoy/upstream/outlier_detection.h +++ b/include/envoy/upstream/outlier_detection.h @@ -98,7 +98,7 @@ class Detector { /** * Outlier detection change state callback. */ - typedef std::function ChangeStateCb; + typedef std::function ChangeStateCb; /** * Add a changed state callback to the detector. The callback will be called whenever any host diff --git a/include/envoy/upstream/resource_manager.h b/include/envoy/upstream/resource_manager.h index 4bd45feed3ddd..902ce75955f68 100644 --- a/include/envoy/upstream/resource_manager.h +++ b/include/envoy/upstream/resource_manager.h @@ -37,6 +37,11 @@ class Resource { */ virtual void dec() PURE; + /** + * Decrement the resource count by a specific amount. + */ + virtual void decBy(uint64_t amount) PURE; + /** * @return the current maximum allowed number of this resource. */ @@ -73,6 +78,11 @@ class ResourceManager { * @return Resource& active retries. */ virtual Resource& retries() PURE; + + /** + * @return Resource& active connection pools. + */ + virtual Resource& connectionPools() PURE; }; } // namespace Upstream diff --git a/include/envoy/upstream/types.h b/include/envoy/upstream/types.h index 59bd3de65ffa8..202a4d136a17c 100644 --- a/include/envoy/upstream/types.h +++ b/include/envoy/upstream/types.h @@ -51,5 +51,12 @@ struct HealthyAvailability : PriorityAvailability { using PriorityAvailability::PriorityAvailability; }; +// Phantom type indicating that the type is related to healthy hosts. +struct Healthy {}; +// Phantom type indicating that the type is related to degraded hosts. +struct Degraded {}; +// Phantom type indicating that the type is related to excluded hosts. +struct Excluded {}; + } // namespace Upstream } // namespace Envoy diff --git a/include/envoy/upstream/upstream.h b/include/envoy/upstream/upstream.h index 20028c32669a3..bf995d6e2c2cb 100644 --- a/include/envoy/upstream/upstream.h +++ b/include/envoy/upstream/upstream.h @@ -22,6 +22,7 @@ #include "envoy/upstream/locality.h" #include "envoy/upstream/outlier_detection.h" #include "envoy/upstream/resource_manager.h" +#include "envoy/upstream/types.h" #include "absl/types/optional.h" @@ -50,7 +51,12 @@ class Host : virtual public HostDescription { /* The host is currently marked as degraded through active health checking. */ \ m(DEGRADED_ACTIVE_HC, 0x08) \ /* The host is currently marked as degraded by EDS. */ \ - m(DEGRADED_EDS_HEALTH, 0x10) + m(DEGRADED_EDS_HEALTH, 0x10) \ + /* The host is pending removal from discovery but is stabilized due to */ \ + /* active HC. */ \ + m(PENDING_DYNAMIC_REMOVAL, 0x20) \ + /* The host is pending its initial active health check. */ \ + m(PENDING_ACTIVE_HC, 0x40) // clang-format on #define DECLARE_ENUM(name, value) name = value, @@ -191,10 +197,17 @@ class Host : virtual public HostDescription { typedef std::shared_ptr HostConstSharedPtr; typedef std::vector HostVector; +typedef Phantom HealthyHostVector; +typedef Phantom DegradedHostVector; +typedef Phantom ExcludedHostVector; typedef std::unordered_map HostMap; typedef std::shared_ptr HostVectorSharedPtr; typedef std::shared_ptr HostVectorConstSharedPtr; +typedef std::shared_ptr HealthyHostVectorConstSharedPtr; +typedef std::shared_ptr DegradedHostVectorConstSharedPtr; +typedef std::shared_ptr ExcludedHostVectorConstSharedPtr; + typedef std::unique_ptr HostListPtr; typedef std::unordered_map LocalityWeightsMap; @@ -221,20 +234,21 @@ class HostsPerLocality { virtual const std::vector& get() const PURE; /** - * Clone object with a filter predicate. - * @param predicate on Host entries. - * @return HostsPerLocalityConstSharedPtr clone of the HostsPerLocality with only - * hosts according to predicate. + * Clone object with multiple filter predicates. Returns a vector of clones, each with host that + * match the provided predicates. + * @param predicates vector of predicates on Host entries. + * @return vector of HostsPerLocalityConstSharedPtr clones of the HostsPerLocality that match + * hosts according to predicates. */ - virtual std::shared_ptr - filter(std::function predicate) const PURE; + virtual std::vector> + filter(const std::vector>& predicates) const PURE; /** * Clone object. * @return HostsPerLocalityConstSharedPtr clone of the HostsPerLocality. */ std::shared_ptr clone() const { - return filter([](const Host&) { return true; }); + return filter({[](const Host&) { return true; }})[0]; } }; @@ -250,6 +264,7 @@ typedef std::shared_ptr LocalityWeightsConstSharedPtr; * Base host set interface. This contains all of the endpoints for a given LocalityLbEndpoints * priority level. */ +// TODO(snowp): Remove the const ref accessors in favor of the shared_ptr ones. class HostSet { public: virtual ~HostSet() {} @@ -259,6 +274,11 @@ class HostSet { */ virtual const HostVector& hosts() const PURE; + /** + * @return a shared ptr to the vector returned by hosts(). + */ + virtual HostVectorConstSharedPtr hostsPtr() const PURE; + /** * @return all healthy hosts contained in the set at the current time. NOTE: This set is * eventually consistent. There is a time window where a host in this set may become @@ -267,6 +287,11 @@ class HostSet { */ virtual const HostVector& healthyHosts() const PURE; + /** + * @return a shared ptr to the vector returned by healthyHosts(). + */ + virtual HealthyHostVectorConstSharedPtr healthyHostsPtr() const PURE; + /** * @return all degraded hosts contained in the set at the current time. NOTE: This set is * eventually consistent. There is a time window where a host in this set may become @@ -275,56 +300,78 @@ class HostSet { */ virtual const HostVector& degradedHosts() const PURE; + /** + * @return a shared ptr to the vector returned by degradedHosts(). + */ + virtual DegradedHostVectorConstSharedPtr degradedHostsPtr() const PURE; + + /* + * @return all excluded hosts contained in the set at the current time. Excluded hosts should be + * ignored when computing load balancing weights, but may overlap with hosts in hosts(). + */ + virtual const HostVector& excludedHosts() const PURE; + + /** + * @return a shared ptr to the vector returned by excludedHosts(). + */ + virtual ExcludedHostVectorConstSharedPtr excludedHostsPtr() const PURE; + /** * @return hosts per locality. */ virtual const HostsPerLocality& hostsPerLocality() const PURE; + /** + * @return a shared ptr to the HostsPerLocality returned by hostsPerLocality(). + */ + virtual HostsPerLocalityConstSharedPtr hostsPerLocalityPtr() const PURE; + /** * @return same as hostsPerLocality but only contains healthy hosts. */ virtual const HostsPerLocality& healthyHostsPerLocality() const PURE; + /** + * @return a shared ptr to the HostsPerLocality returned by healthyHostsPerLocality(). + */ + virtual HostsPerLocalityConstSharedPtr healthyHostsPerLocalityPtr() const PURE; + /** * @return same as hostsPerLocality but only contains degraded hosts. */ virtual const HostsPerLocality& degradedHostsPerLocality() const PURE; /** - * @return weights for each locality in the host set. + * @return a shared ptr to the HostsPerLocality returned by degradedHostsPerLocality(). */ - virtual LocalityWeightsConstSharedPtr localityWeights() const PURE; + virtual HostsPerLocalityConstSharedPtr degradedHostsPerLocalityPtr() const PURE; /** - * @return next locality index to route to if performing locality weighted balancing. + * @return same as hostsPerLocality but only contains excluded hosts. */ - virtual absl::optional chooseLocality() PURE; + virtual const HostsPerLocality& excludedHostsPerLocality() const PURE; /** - * Parameter class for updateHosts. + * @return a shared ptr to the HostsPerLocality returned by excludedHostsPerLocality(). */ - struct UpdateHostsParams { - HostVectorConstSharedPtr hosts; - HostVectorConstSharedPtr healthy_hosts; - HostVectorConstSharedPtr degraded_hosts; - HostsPerLocalityConstSharedPtr hosts_per_locality; - HostsPerLocalityConstSharedPtr healthy_hosts_per_locality; - HostsPerLocalityConstSharedPtr degraded_hosts_per_locality; - }; + virtual HostsPerLocalityConstSharedPtr excludedHostsPerLocalityPtr() const PURE; /** - * Updates the hosts in a given host set. - * - * @param update_hosts_param supplies the list of hosts and hosts per locality. - * @param locality_weights supplies a map from locality to associated weight. - * @param hosts_added supplies the hosts added since the last update. - * @param hosts_removed supplies the hosts removed since the last update. - * @param overprovisioning_factor if presents, overwrites the current overprovisioning_factor. + * @return weights for each locality in the host set. */ - virtual void updateHosts(UpdateHostsParams&& update_host_params, - LocalityWeightsConstSharedPtr locality_weights, - const HostVector& hosts_added, const HostVector& hosts_removed, - absl::optional overprovisioning_factor) PURE; + virtual LocalityWeightsConstSharedPtr localityWeights() const PURE; + + /** + * @return next locality index to route to if performing locality weighted balancing + * against healthy hosts. + */ + virtual absl::optional chooseHealthyLocality() PURE; + + /** + * @return next locality index to route to if performing locality weighted balancing + * against degraded hosts. + */ + virtual absl::optional chooseDegradedLocality() PURE; /** * @return uint32_t the priority of this host set. @@ -376,17 +423,83 @@ class PrioritySet { virtual Common::CallbackHandle* addPriorityUpdateCb(PriorityUpdateCb callback) const PURE; /** - * Returns the host sets for this priority set, ordered by priority. - * The first element in the vector is the host set for priority 0, and so on. + * @return const std::vector& the host sets, ordered by priority. + */ + virtual const std::vector& hostSetsPerPriority() const PURE; + + /** + * Parameter class for updateHosts. + */ + struct UpdateHostsParams { + HostVectorConstSharedPtr hosts; + HealthyHostVectorConstSharedPtr healthy_hosts; + DegradedHostVectorConstSharedPtr degraded_hosts; + ExcludedHostVectorConstSharedPtr excluded_hosts; + HostsPerLocalityConstSharedPtr hosts_per_locality; + HostsPerLocalityConstSharedPtr healthy_hosts_per_locality; + HostsPerLocalityConstSharedPtr degraded_hosts_per_locality; + HostsPerLocalityConstSharedPtr excluded_hosts_per_locality; + }; + + /** + * Updates the hosts in a given host set. * - * @return std::vector& the host sets for this priority set. + * @param priority the priority of the host set to update. + * @param update_hosts_param supplies the list of hosts and hosts per locality. + * @param locality_weights supplies a map from locality to associated weight. + * @param hosts_added supplies the hosts added since the last update. + * @param hosts_removed supplies the hosts removed since the last update. + * @param overprovisioning_factor if presents, overwrites the current overprovisioning_factor. */ - virtual std::vector& hostSetsPerPriority() PURE; + virtual void updateHosts(uint32_t priority, UpdateHostsParams&& update_host_params, + LocalityWeightsConstSharedPtr locality_weights, + const HostVector& hosts_added, const HostVector& hosts_removed, + absl::optional overprovisioning_factor) PURE; /** - * @return const std::vector& the host sets, ordered by priority. + * Callback provided during batch updates that can be used to update hosts. */ - virtual const std::vector& hostSetsPerPriority() const PURE; + class HostUpdateCb { + public: + virtual ~HostUpdateCb() {} + /** + * Updates the hosts in a given host set. + * + * @param priority the priority of the host set to update. + * @param update_hosts_param supplies the list of hosts and hosts per locality. + * @param locality_weights supplies a map from locality to associated weight. + * @param hosts_added supplies the hosts added since the last update. + * @param hosts_removed supplies the hosts removed since the last update. + * @param overprovisioning_factor if presents, overwrites the current overprovisioning_factor. + */ + virtual void updateHosts(uint32_t priority, UpdateHostsParams&& update_host_params, + LocalityWeightsConstSharedPtr locality_weights, + const HostVector& hosts_added, const HostVector& hosts_removed, + absl::optional overprovisioning_factor) PURE; + }; + + /** + * Callback that provides the mechanism for performing batch host updates for a PrioritySet. + */ + class BatchUpdateCb { + public: + virtual ~BatchUpdateCb() {} + + /** + * Performs a batch host update. Implementors should use the provided callback to update hosts + * in the PrioritySet. + */ + virtual void batchUpdate(HostUpdateCb& host_update_cb) PURE; + }; + + /** + * Allows updating hosts for multiple priorities at once, deferring the MemberUpdateCb from + * triggering until all priorities have been updated. The resulting callback will take into + * account hosts moved from one priority to another. + * + * @param callback callback to use to add hosts. + */ + virtual void batchHostUpdate(BatchUpdateCb& callback) PURE; }; /** @@ -408,6 +521,7 @@ class PrioritySet { COUNTER (lb_subsets_removed) \ COUNTER (lb_subsets_selected) \ COUNTER (lb_subsets_fallback) \ + COUNTER (lb_subsets_fallback_panic) \ COUNTER (original_dst_host_invalid) \ COUNTER (upstream_cx_total) \ GAUGE (upstream_cx_active) \ @@ -434,6 +548,7 @@ class PrioritySet { COUNTER (upstream_cx_protocol_error) \ COUNTER (upstream_cx_max_requests) \ COUNTER (upstream_cx_none_healthy) \ + COUNTER (upstream_cx_pool_overflow) \ COUNTER (upstream_rq_total) \ GAUGE (upstream_rq_active) \ COUNTER (upstream_rq_completed) \ @@ -461,6 +576,7 @@ class PrioritySet { COUNTER (membership_change) \ GAUGE (membership_healthy) \ GAUGE (membership_degraded) \ + GAUGE (membership_excluded) \ GAUGE (membership_total) \ COUNTER (retry_or_shadow_abandoned) \ COUNTER (update_attempt) \ @@ -468,6 +584,8 @@ class PrioritySet { COUNTER (update_failure) \ COUNTER (update_empty) \ COUNTER (update_no_rebuild) \ + COUNTER (assignment_timeout_received) \ + COUNTER (assignment_stale) \ GAUGE (version) // clang-format on @@ -482,14 +600,21 @@ class PrioritySet { // clang-format on /** - * Cluster circuit breakers stats. + * Cluster circuit breakers stats. Open circuit breaker stats and remaining resource stats + * can be handled differently by passing in different macros. */ // clang-format off -#define ALL_CLUSTER_CIRCUIT_BREAKERS_STATS(GAUGE) \ - GAUGE (cx_open) \ - GAUGE (rq_pending_open) \ - GAUGE (rq_open) \ - GAUGE (rq_retry_open) +#define ALL_CLUSTER_CIRCUIT_BREAKERS_STATS(OPEN_GAUGE, REMAINING_GAUGE) \ + OPEN_GAUGE (cx_open) \ + OPEN_GAUGE (rq_pending_open) \ + OPEN_GAUGE (rq_open) \ + OPEN_GAUGE (rq_retry_open) \ + OPEN_GAUGE (cx_pool_open) \ + REMAINING_GAUGE (remaining_cx) \ + REMAINING_GAUGE (remaining_pending) \ + REMAINING_GAUGE (remaining_rq) \ + REMAINING_GAUGE (remaining_retries) \ + REMAINING_GAUGE (remaining_cx_pools) // clang-format on /** @@ -510,7 +635,7 @@ struct ClusterLoadReportStats { * Struct definition for cluster circuit breakers stats. @see stats_macros.h */ struct ClusterCircuitBreakersStats { - ALL_CLUSTER_CIRCUIT_BREAKERS_STATS(GENERATE_GAUGE_STRUCT) + ALL_CLUSTER_CIRCUIT_BREAKERS_STATS(GENERATE_GAUGE_STRUCT, GENERATE_GAUGE_STRUCT) }; /** @@ -709,6 +834,17 @@ class ClusterInfo { */ virtual bool drainConnectionsOnHostRemoval() const PURE; + /** + * @return true if this cluster is configured to ignore hosts for the purpose of load balancing + * computations until they have been health checked for the first time. + */ + virtual bool warmHosts() const PURE; + + /** + * @return eds cluster service_name of the cluster. + */ + virtual absl::optional eds_service_name() const PURE; + protected: /** * Invoked by extensionProtocolOptionsTyped. diff --git a/security/email-templates.md b/security/email-templates.md new file mode 100644 index 0000000000000..99175d1f64e8a --- /dev/null +++ b/security/email-templates.md @@ -0,0 +1,131 @@ +# Envoy Security Process Email Templates + +This is a collection of email templates to handle various situations the security team encounters. + +## Upcoming security release to envoy-announce@googlegroups.com + +``` +Subject: Upcoming security release of Envoy $VERSION +To: envoy-announce@googlegroups.com +Cc: envoy-security@googlegroups.com, envoy-maintainers@googlegroups.com + +Hello Envoy Community, + +The Envoy maintainers would like to announce the forthcoming release of Envoy +$VERSION. + +This release will be made available on the $ORDINALDAY of $MONTH $YEAR at +$PDTHOUR PDT ($GMTHOUR GMT). This release will fix $NUMDEFECTS security +defect(s). The highest rated security defect is considered $SEVERITY severity. + +No further details or patches will be made available in advance of the release. + +Thanks, +$PERSON (on behalf of the Envoy maintainers) +``` + +## Upcoming security release to cncf-envoy-distributors-announce@lists.cncf.io + +``` +Subject: [CONFIDENTIAL] Further details on security release of Envoy $VERSION +To: envoy-announce@googlegroups.com +Cc: envoy-security@googlegroups.com + +Hello Envoy Distributors, + +The Envoy security team would like to provide advanced notice to the Envoy +Private Distributors List of some details on the pending Envoy $VERSION +security release, following the process described at +https://github.com/envoyproxy/envoy/blob/master/SECURITY_RELEASE_PROCESS.md. + +This release will be made available on the $ORDINALDAY of $MONTH $YEAR at +$PDTHOUR PDT ($GMTHOUR GMT). This release will fix $NUMDEFECTS security +defect(s). The highest rated security defect is considered $SEVERITY severity. + +Below we provide details of these vulnerabilities under our embargo policy +(https://github.com/envoyproxy/envoy/blob/master/SECURITY_RELEASE_PROCESS.md#embargo-policy). +This information should be treated as confidential until public release by the +Envoy maintainers on the Envoy GitHub. + +We will address the following CVE(s): + +* CVE-YEAR-ABCDEF (CVSS score $CVSS, $SEVERITY): $CVESUMMARY +... + +We intend to make candidates release patches available under embargo on the +$ORDINALDAY of $MONTH $YEAR, which you may use for testing and preparing your +distributions. + +Please direct further communication amongst private distributors to this list +or to envoy-security@googlegroups.com for direct communication with the Envoy +security team. + +Thanks, +$PERSON (on behalf of the Envoy security team) +``` + +## Security Fix Announcement + +``` +Subject: Security release of Envoy $VERSION is now available +To: envoy-announce@googlegroups.com +Cc: envoy-security@googlegroups.com, envoy-maintainers@googlegroups.com + +Hello Envoy Community, + +The Envoy maintainers would like to announce the availability of Envoy $VERSION. +This addresses the following CVE(s): + +* CVE-YEAR-ABCDEF (CVSS score $CVSS): $CVESUMMARY +... + +Upgrading to $VERSION is encouraged to fix these issues. + +GitHub tag: https://github.com/envoyproxy/envoy/releases/tag/v$VERSION +Docker images: https://hub.docker.com/r/envoyproxy/envoy/tags +Release notes: https://www.envoyproxy.io/docs/envoy/v$VERSION/intro/version_history +Docs: https://www.envoyproxy.io/docs/envoy/v$VERSION/ + +**Am I vulnerable?** + +Run `envoy --version` and if it indicates a base version of $OLDVERSION or +older you are running a vulnerable version. + + + +**How do I mitigate the vulnerability?** + + + +Avoid the use of feature XYZ in Envoy configuration. + +**How do I upgrade?** + +Update to $VERSION via your Envoy distribution or rebuild from the Envoy GitHub +source at the $VERSION tag or HEAD @ master. + +**Vulnerability Details** + + + +***CVE-YEAR-ABCDEF*** + +$CVESUMMARY + +This issue is filed as $CVE. We have rated it as [$CVSSSTRING]($CVSSURL) +($CVSS, $SEVERITY) [See the GitHub issue for more details]($GITHUBISSUEURL) + +**Thank you** + +Thank you to $REPORTER, $DEVELOPERS, and the $RELEASEMANAGERS for the +coordination in making this release. + +Thanks, + +$PERSON (on behalf of the Envoy maintainers) +``` diff --git a/security/gh-cve-template.md b/security/gh-cve-template.md new file mode 100644 index 0000000000000..8cc9f78bd2391 --- /dev/null +++ b/security/gh-cve-template.md @@ -0,0 +1,52 @@ +>This template is for public disclosure of CVE details on Envoy's GitHub. It should be filed +with the public release of a security patch version, and will be linked to in the announcement sent +to envoy-announce@googlegroups.com. The title of this issue should be the CVE identifier and it +should have the `security` label applied. + +# CVE-YEAR-ABCDEF + +## Brief description + +>Brief description used when filing CVE. + +## CVSS + +>[$CVSSSTRING]($CVSSURL)($CVSSSCORE, $SEVERITY) + +## Affected version(s) + +>Envoy x.y.z and before. + +## Affected component(s) + +>List affected internal components and features. + +## Attack vector(s) + +>How would an attacker use this? + +## Discover(s)/Credits + +>Individual and optional organization. + +## Example exploit or proof-of-concept + +>If there is proof-of-concept or example, provide a concrete example. + +## Details + +>Deep dive into the defect. This should be detailed enough to maintain a record for posterity while +being clear and concise. + +## Mitigations + +>Are there configuration or CLI options that can be used to mitigate? + +## Detection + +>How can exploitation of this bug be detected in existing and future Envoy versions? E.g. access logs. + +## References + +* CVE: $CVEURL +>Any other public information. diff --git a/security/postmortem-template.md b/security/postmortem-template.md new file mode 100644 index 0000000000000..b442897c87272 --- /dev/null +++ b/security/postmortem-template.md @@ -0,0 +1,75 @@ +> Slimmed down template from: Betsy Beyer, Chris Jones, Jennifer Petoff, and Niall Richard +> Murphy. [ā€œSite Reliability +> Engineering.ā€](https://landing.google.com/sre/book/chapters/postmortem.html), +> modified from +> https://raw.githubusercontent.com/dastergon/postmortem-templates/master/templates/postmortem-template-srebook.md. + +> Follow the SRE link for examples of how to populate. + +> A PR should be opened with postmortem placed in security/postmortems/cve-year-abcdef.md. If there +> are multiple CVEs in the postmortem, populate each alias with the string "See cve-year-abcdef.md". + +# Security postmortem for CVE-YEAR-ABCDEF, CVE-YEAR-ABCDEG + +## Incident date(s) + +> YYYY-MM-DD (as a date range if over a period of time) + +## Authors + +> @foo, @bar, ... + +## Status + +> Draft | Final + +## Summary + +> A few sentence summary. + +## CVE issue(s) + +> https://github.com/envoyproxy/envoy/issues/${CVE_ISSUED_ID} + +## Root Causes + +> What defect in Envoy led to the CVEs? How did this defect arise? + +## Resolution + +> How was the security release process followed? How were the fix patches +> structured and authored? + +## Detection + +> How was this discovered? Reported by XYZ, found by fuzzing? Private or public +> disclosure? + +## Action Items + +> Create action item issues and include in their body "Action item for +> CVE-YEAR-ABCDEF". Modify the search string below to include in the PR: + +https://github.com/envoyproxy/envoy/issues?utf8=%E2%9C%93&q=is%3Aissue+%22Action+item+for+CVE-YEAR-ABCDEF%22 + +## Lessons Learned + +### What went well + +### What went wrong + +### Where we got lucky + +## Timeline + +All times US/Pacific + +YYYY-MM-DD +* HH:MM Cake was made available +* HH:MM People ate the cake + +YYYY-MM-DD +* HH:MM More cake was available +* HH:MM People ate more cake + +## Supporting information diff --git a/security/postmortems/cve-2019-9900.md b/security/postmortems/cve-2019-9900.md new file mode 100644 index 0000000000000..d6b6d38af5792 --- /dev/null +++ b/security/postmortems/cve-2019-9900.md @@ -0,0 +1,323 @@ +# Security postmortem for CVE-2019-9900, CVE-2019-9901 + +## Incident date(s) + +2019-02-18 - 2019-04-05 + +## Authors + +@htuch + +## Status + +Final + +## Summary + +Two independent vulnerabilities related to a mismatch between the information used for request +matching and routing were discovered in February/March 2019, leading to the potential ability for an +attacker to bypass access control checks and route table intent. Since these issues had similar +attack vectors and were discovered within the same embargo window, the issues were grouped and +resolved (mostly) privately by Envoy and Istio fix teams, resulting in the Envoy 1.9.1 security +release issued on 2019-04-05. + +This was the first time in which the Envoy security release process was followed and provided a +learning opportunity to refine the process, originally borrowed from the Kubernetes project, to +Envoy's requirements. While the issues were upgraded from medium to high criticality during the fix +process, they were limited in impact to a subset of users and specific configuration patterns. This +postmortem captures the issues encountered during the fix process and provides actionable next +steps. + +## CVE issue(s) + +* https://github.com/envoyproxy/envoy/issues/6434 +* https://github.com/envoyproxy/envoy/issues/6435 + +## Root Causes + +CVE-2019-9900 resulted from Envoy assuming that its codec libraries (http-parser, nghttp2) followed +RFC 7230 and would reject any header value with an embedded NUL character. Unfortunately, +http-parser did not do this due to an optimization in header value processing +(https://github.com/nodejs/http-parser/issues/468, https://github.com/nodejs/http-parser/pull/469). +In addition, Envoy viewed header strings with a mixture of `c_str()` and `string_view`, allowing the +possibility of inconsistent views between checks and resulting action. A combination of a buggy +external dependency and problematic use of C string views led to this vulnerability. + +CVE-2019-9901 resulted from two distinct views of the role of a proxy in path handling. On the one +hand, Envoy was considered a data forwarding engine for HTTP requests that did not need to perform +path normalization, with this concern left to client and backend. However, at the same time, Envoy +was being used in applications where it intermediated on requests for access control purposes (e.g. +RBAC, `ext_authz`) and performed path matching against policy. Especially in the presence of a +backend that itself normalizes, this access control role required that path normalization be applied +in the proxy. + +## Resolution + +CVE-2019-9900 was reported by Envoy maintainer @htuch on 2019-03-10 to +envoy-security@googlegroups.com. After some discussion, it was agreed that this warranted invoking +the security release process. The issue was mitigated in the Envoy private security repository by +@htuch and the Envoy security fix team. A single patch +(https://github.com/envoyproxy/envoy/commit/b155af75fad7861e941b5939dc001abf581c9203) was required +to workaround the http-parser behavior. In addition, both tests and fuzzers were +created to validate the behavior when NULs were introduced anywhere in an HTTP/1 or HTTP/2 request +(https://github.com/envoyproxy/envoy/commit/1e61a3f95f2c4d9ac1e54feae8693cee7906e2eb). Manual code +inspection was also performed in nghttp2 to verify the absence of vulnerability. While doing so, a +non-security related bug was discovered (https://github.com/nghttp2/nghttp2/issues/1331). + +Shortly after discovery of CVE-2019-9900, the http-parser issue was reported to the Node.js security +working group at security@nodejs.org, since http-parser lives under the umbrella of the Node.js +project. The full vulnerability was described and the Envoy security team proposed working with +Node.js PST. As there was no reply, we proceeded independently. Unfortunately, it appears that the +Node.js security WG never received the e-mail, due to the reliance of Node.js on HackerOne to gate +incoming issues and a problematic e-mail forwarding chain +(https://github.com/nodejs/security-wg/issues/454#issuecomment-481919759). We have since filed a +HackerOne issue with the original report e-mail. + +CVE-2019-9901 was privately disclosed to the Istio security team by an external researcher on +2019-02-18 and accidentally publicly disclosed in part in +https://github.com/envoyproxy/envoy/issues/6008 on 2019-03-13. Once the severity of this was +realized via offline discussion between the Envoy security team and the PR authors, we moved to a +private fix process in conjunction with CVE-2019-9900, targeting the 1.9.1 release. The Google Istio +security and networking teams led the efforts to fix this vulnerability in Envoy's private security +repository. The workaround implementation of path normalization borrowed from Chromium's URL +library, adapted and minified for the Envoy context. The follow patches were +produced: +* https://github.com/envoyproxy/envoy/commit/c22cfd2c483fc26534382a0b6835f45264bb137a +* https://github.com/envoyproxy/envoy/commit/7ed6d2187df94c4cb96f7dccb8643bf764af2ccb + +In both cases, the Envoy security team considered the issues of medium criticality (CVSS 6.5) +initially, since it was thought that the attack complexity was high, requiring special circumstances +to apply. As we continued discussion with Istio and Google teams, it became apparent that the +exploits were trivial to automate and we upgraded to high criticality (CVSS 8.3), due to the lower +attack complexity. + +A 1.9.1 security release was initially targeted for 2019-04-02 and announced on 2019-03-22. An +e-mail was sent to the Envoy private distributor list sharing CVE details. + +After private discussions with a distributor on 2019-03-28, who expressed concern over the very +short (3 working day) distance between fix patch availability and release, the Envoy security team +decided to delay the 1.9.1 release until 2019-04-05. This provided 1 week for distributors to +prepare their software for the security release date. + +Fix patches were shared with the private distributor list late on 2019-03-28. + +During the fix process, two distributors reached out to us to request the ability to stage in +publicly accessible locations binary images with the fixes applied. While technically this would +violate embargo, we decided to allow this due to a lack of a clear alternative; Envoy's sidecar +use cases and reliance on Docker for distribution, where images are generally staged on public hubs, +did not lend itself to opaque rollout. + +## Detection + +The underlying issue behind CVE-2019-9900 was first noticed via fuzzers when an explicit `ASSERT` +check for embedded NULs was added in #6170. The following issue was tripped by +`h1_capture_fuzz_test`: +https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=13613. Some experiments with `netcat`, `tcpdump` +and an Envoy binary demonstrated that it was viable to at least bypass header suffix matches via +this mechanism. + +CVE-2019-9901 was reported by an external researcher (Erlend Oftedal) to the Istio security team. + +## Action Items + +* https://github.com/envoyproxy/envoy/issues?utf8=%E2%9C%93&q=is%3Aissue+%22Action+item+for+CVE-2019-9900%22+ +* https://github.com/envoyproxy/envoy/issues?utf8=%E2%9C%93&q=is%3Aissue+%22Action+item+for+CVE-2019-9901%22+ + +## Lessons Learned + +### What went well + +* Fix patches were available within 2 weeks of vulnerability disclosure. The + changes were localized and relatively clean. + +### What went wrong + +* The Envoy private distributor list was initially almost empty. We sent out an + e-mail to remind distributors to sign up on 2019-03-14 and the list is now O(10). + +* The security impact of https://github.com/envoyproxy/envoy/issues/6008 + was not caught by Envoy until this was brought to our attention ~20 days after + the issue was first pushed. Ideally such issues should be routed to + envoy-security@googlegroups.com first in the future and Envoy + reviewers/maintainers should keep an eye out for inadvertent security + disclosures through public channels. In addition, an earlier issue + https://github.com/envoyproxy/envoy/issues/2956 was opened a year previous, but was not tagged as + being security sensitive. + +* Applicants for the private distributor list were turned down based on + membership criteria that was adopted from k8s. This is now being revisited in + https://github.com/envoyproxy/envoy/issues/6586. + +* Distributors were only provided 3 days from candidate fix patch availability + until public release at first. While this was extended to 1 week, even this + might be too little. This is now being codified in + https://github.com/envoyproxy/envoy/issues/6587. + +* The Chromium URL library was forked, minified and adapted to Envoy. This was + expedient but not a maintainable long term solution, see + https://github.com/envoyproxy/envoy/issues/6588. + +* Only coarse grained control over path normalization was provided, since this + was expedient and mitigated the vulnerability. We should provide finer grained + controls, see https://github.com/envoyproxy/envoy/issues/6589. + +* Our report to the Node.js security working group was lost due to + https://github.com/nodejs/security-wg/issues/454#issuecomment-481919759. + We should avoid this happening Envoy-side, see https://github.com/envoyproxy/envoy/issues/6590. + More generally, we should err on the side of reaching out over more channels + in the future, since it's unclear how effective any given disclosure channel + is. + +* The security release day (2019-04-05) was Friday PDT. We should pick a + globally friendly day-of-week, e.g. Tue-Thu, for security releases. + +* Nginx already had a CVE for path normalization + (https://www.rapid7.com/db/vulnerabilities/nginx-cve-2009-3898) similar to + CVE-2019-9901, but we did not know this until after the fact. We should audit + CVEs for similar class software, see + https://github.com/envoyproxy/envoy/issues/6592. + +* A distributor reached out to the security team for permission to perform + silent binary rollouts as discussed above. While in principle our relaxation + of the embargo policy applied to all distributors, an e-mail was not sent to + the list. This resulted in confusion when a second distributor observed this + rollout. We should ensure going forward that any policy relaxation during CVE + handling is clearly communicated across the board. + +* Public, albeit silent, staging of Docker images before the public security + release date was a necessary pragmatic tradeoff. We need to refine the + security release process to deal with this explicitly, see + https://github.com/envoyproxy/envoy/issues/6593. + +* The security release forced `envoy-dev:latest` back to the 1.9.1 release + branch. This should be fixed, see + https://github.com/envoyproxy/envoy/issues/6595. + +* There was a window of ~50 minutes between the release tagging of the Envoy + 1.9.1 branch and availability of Docker images. Ideally we shrink this to + allow users to upgrade faster. See + https://github.com/envoyproxy/envoy/issues/6596. + +* The CVE-2019-9901 fix required either control plane or runtime changes. This + orchestration was not well suited to all deployment environments, so some + distributions, e.g. Istio, applied additional patches to enable at compile + time. Ideally we support control plane, runtime and CLI or compile-time fix + opt-in abilities. + +### Where we got lucky + +* The defects were not critical (by CVSS scoring and intuition) and (mostly) + privately disclosed. This provided an opportunity to exercise and refine the + Envoy security release process. + +* Huffman and HPACK in general frustrates HTTP/2 testing and fuzzing for security + properties. We had no effective fuzzing or testing for this previously as a + result, we were lucky that the scope of CVE-2019-9900 was limited to HTTP/1.1. + +* CVE-2019-9900 was only discovered as a result of additional `ASSERT`s added to + verify a property that Envoy developers were certain held. Fuzzing alone had + not previously caught this. + +* Distributors were able to execute their own security releases within the 1 + week provided from patch availability. Anecdotally, this involved effort + beyond that which we should expect normally to manage an Envoy fix. + +* No known instances reported of pre-release embargo breakage due to silent + public staging of Docker images. + +## Timeline + +All times US/Pacific + +2019-02-18: +* [CVE-2019-9901] Path normalization issue was reported to Istio security team at vulnerabilities@discuss.istio.io. + +2019-02-19: +* [CVE-2019-9901] https://github.com/envoyproxy/envoy/issues/6008 was opened. This was not the first Envoy report of + missing path normalization (see https://github.com/envoyproxy/envoy/issues/2956). Neither issue + mentioned the security basis and Envoy reviewers speculated on the potential for path traversal + attacks. + +2019-03-08: +* [CVE-2019-9900] oss-fuzz reports https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=13613 under embargo. + +2019-03-10: +* [CVE-2019-9900] E-mail thread on envoy-security@googlegroups.com regarding the + potential effects of this bugs. While it was unclear whether there would be + impact beyond some narrow circumstances, agreement was reached to start the + security release process. Analysis began to determine the extent of impact on + HTTP/2 and Envoy's code base was audited. + +2019-03-11: +* [CVE-2019-9901] https://github.com/envoyproxy/envoy/pull/6258 was opened to address + https://github.com/envoyproxy/envoy/issues/6008. + +2019-03-13: +* [CVE-2019-9901] https://github.com/envoyproxy/envoy/pull/6258 was closed after offline discussions + between Envoy security team and the author, once the Envoy security team became aware of the + potential severity in the Istio setup (in particular with RBAC and Mixer in play). + +2019-03-14: +* [CVE-2019-9900] Finding were presented to envoy-security@. A fix plan was + agreed upon and a candidate fix PR was shared with the team by e-mail. At this + point, no private fix repository existed. +* [CVE-2019-9901] The Istio fix leads initiated private work on a fix patch. + Since it was likely that this would land within the 1.9.1 release + window for CVE-2019-9900, CVE-2019-9901 was also scheduled for the release. +* [Announcement](https://groups.google.com/forum/#!topic/envoy-announce/dEOLqAiaSUI) sent to remind + distributors to join cncf-envoy-distributors-announce@lists.cncf.io. + +2019-03-20: +* CVEs were requested from MITRE for both issues. +* Draft fix PRs for CVE-2019-9900 and CVE-2019-9901 were shared on private Envoy + security repository. Reviews and further development occurred over the + following week. + +2019-03-22: +* 11:20 1.9.1 security release for the two vulnerabilities was + [announced](https://groups.google.com/d/msg/envoy-announce/6fwGB2TxB74/dKeURAdfAgAJ). +* 11:24 CVE summary details shared with cncf-envoy-distributors-announce@lists.cncf.io. + +2019-03-28: +* Envoy security team met with a distributor to discuss their concerns over the lack of time between + patch availability and the release date. We agreed that three days was insufficient and agreed to + extend to a week. +* 13:53 A delay of the 1.9.1 release until 2019-04-05 was + [announced](https://groups.google.com/d/msg/envoy-announce/6fwGB2TxB74/Pe3PPFbPBAAJ). +* 20:07 Candidate fix patches for both CVE shared with + cncf-envoy-distributors-announce@lists.cncf.io. + +2019-03-29: +* Envoy security team was contacted by a distributor regarding the permissibility of silently + staging binary images in public locations in advance of the security release due to a lack of + viable alternatives. The Envoy security team agreed that there was no better alternative and + provided an exemption. + +2019-04-02: +* 08:15 The increase of severity from medium to high was + [announced](https://groups.google.com/d/msg/envoy-announce/6fwGB2TxB74/qiDEgclFBgAJ). + This followed several days of offline discussion between Istio and Envoy teams + on Istio's independent assessment of the issues as high severity, and a better + awareness of how to score. What was missing was the intuition that a + vulnerability can be high severity even if it only affects a rather limited + number of users. + +2019-04-04: +* 15:41 The Envoy master branch was frozen to prepare for the security release. PRs were rebased + against master and prepared for the release push. +* 18:33 Envoy security team was contacted by a distributor who had noticed public visibility of + binary images with the fix patch by other vendors. After discussion, we agreed on a general + exemption for these CVEs to the embargo policy for binary images with some constraints. +* 19:18 cncf-envoy-distributors-announce@lists.cncf.io was e-mailed to clarify position on staging + of binary images on public sites prior to the release date. A narrow set of circumstances under + which this was permissible were outlined. + +2019-04-05: +* 10:00 - 10:05 The [v1.9.1](https://github.com/envoyproxy/envoy/tree/v1.9.1) release branch was + pushed and the 1.9.1 releaes was tagged. This started the Docker build process for the release. + The same PRs were pushed to master. +* 10:05 The Envoy 1.9.1 security release was + [announced](https://groups.google.com/forum/?utm_medium=email&utm_source=footer#!topic/envoy-announce/VoHfnDqZiAM). +* 10:57 The v1.9.1 image was available at https://hub.docker.com/r/envoyproxy/envoy/tags. + +## Supporting information diff --git a/security/postmortems/cve-2019-9901.md b/security/postmortems/cve-2019-9901.md new file mode 100644 index 0000000000000..b4e51fa99fc7d --- /dev/null +++ b/security/postmortems/cve-2019-9901.md @@ -0,0 +1 @@ +See [cve-2019-9900.md](cve-2019-9900.md) diff --git a/source/common/access_log/BUILD b/source/common/access_log/BUILD index c7be03032909e..2de71c912d530 100644 --- a/source/common/access_log/BUILD +++ b/source/common/access_log/BUILD @@ -30,6 +30,8 @@ envoy_cc_library( deps = [ "//include/envoy/access_log:access_log_interface", "//include/envoy/api:api_interface", + "//source/common/buffer:buffer_lib", + "//source/common/common:thread_lib", ], ) @@ -37,6 +39,7 @@ envoy_cc_library( name = "access_log_lib", srcs = ["access_log_impl.cc"], hdrs = ["access_log_impl.h"], + external_deps = ["abseil_hash"], deps = [ "//include/envoy/access_log:access_log_interface", "//include/envoy/filesystem:filesystem_interface", diff --git a/source/common/access_log/access_log_formatter.cc b/source/common/access_log/access_log_formatter.cc index b498e85832ece..78f862e7bcee7 100644 --- a/source/common/access_log/access_log_formatter.cc +++ b/source/common/access_log/access_log_formatter.cc @@ -21,6 +21,27 @@ namespace AccessLog { static const std::string UnspecifiedValueString = "-"; +namespace { + +// Helper that handles the case when the ConnectionInfo is missing or if the desired value is +// empty. +StreamInfoFormatter::FieldExtractor sslConnectionInfoStringExtractor( + std::function string_extractor) { + return [string_extractor](const StreamInfo::StreamInfo& stream_info) { + if (stream_info.downstreamSslConnection() == nullptr) { + return UnspecifiedValueString; + } + + const auto value = string_extractor(*stream_info.downstreamSslConnection()); + if (value.empty()) { + return UnspecifiedValueString; + } else { + return value; + } + }; +} +} // namespace + const std::string AccessLogFormatUtils::DEFAULT_FORMAT = "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" " "%RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% " @@ -91,7 +112,7 @@ std::string JsonFormatterImpl::format(const Http::HeaderMap& request_headers, (*output_struct.mutable_fields())[pair.first] = string_value; } - ProtobufTypes::String log_line; + std::string log_line; const auto conversion_status = Protobuf::util::MessageToJsonString(output_struct, &log_line); if (!conversion_status.ok()) { log_line = @@ -135,6 +156,7 @@ void AccessLogFormatParser::parseCommand(const std::string& token, const size_t const std::string& separator, std::string& main, std::vector& sub_items, absl::optional& max_length) { + // TODO(dnoe): Convert this to use string_view throughout. size_t end_request = token.find(')', start); sub_items.clear(); if (end_request != token.length() - 1) { @@ -148,10 +170,10 @@ void AccessLogFormatParser::parseCommand(const std::string& token, const size_t throw EnvoyException(fmt::format("Incorrect position of ')' in token: {}", token)); } - std::string length_str = token.substr(end_request + 2); + const auto length_str = absl::string_view(token).substr(end_request + 2); uint64_t length_value; - if (!StringUtil::atoul(length_str.c_str(), length_value)) { + if (!absl::SimpleAtoi(length_str, &length_value)) { throw EnvoyException(fmt::format("Length must be an integer, given: {}", length_str)); } @@ -289,6 +311,11 @@ StreamInfoFormatter::StreamInfoFormatter(const std::string& field_name) { return stream_info.responseCode() ? fmt::format_int(stream_info.responseCode().value()).str() : "0"; }; + } else if (field_name == "RESPONSE_CODE_DETAILS") { + field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) { + return stream_info.responseCodeDetails() ? stream_info.responseCodeDetails().value() + : UnspecifiedValueString; + }; } else if (field_name == "BYTES_SENT") { field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) { return fmt::format_int(stream_info.bytesSent()).str(); @@ -350,6 +377,34 @@ StreamInfoFormatter::StreamInfoFormatter(const std::string& field_name) { return UnspecifiedValueString; } }; + } else if (field_name == "DOWNSTREAM_PEER_URI_SAN") { + field_extractor_ = + sslConnectionInfoStringExtractor([](const Ssl::ConnectionInfo& connection_info) { + return absl::StrJoin(connection_info.uriSanPeerCertificate(), ","); + }); + } else if (field_name == "DOWNSTREAM_LOCAL_URI_SAN") { + field_extractor_ = + sslConnectionInfoStringExtractor([](const Ssl::ConnectionInfo& connection_info) { + return absl::StrJoin(connection_info.uriSanLocalCertificate(), ","); + }); + } else if (field_name == "DOWNSTREAM_PEER_SUBJECT") { + field_extractor_ = + sslConnectionInfoStringExtractor([](const Ssl::ConnectionInfo& connection_info) { + return connection_info.subjectPeerCertificate(); + }); + } else if (field_name == "DOWNSTREAM_LOCAL_SUBJECT") { + field_extractor_ = + sslConnectionInfoStringExtractor([](const Ssl::ConnectionInfo& connection_info) { + return connection_info.subjectLocalCertificate(); + }); + } else if (field_name == "UPSTREAM_TRANSPORT_FAILURE_REASON") { + field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) { + if (!stream_info.upstreamTransportFailureReason().empty()) { + return stream_info.upstreamTransportFailureReason(); + } else { + return UnspecifiedValueString; + } + }; } else { throw EnvoyException(fmt::format("Not supported field in StreamInfo: {}", field_name)); } @@ -385,7 +440,7 @@ std::string HeaderFormatter::format(const Http::HeaderMap& headers) const { if (!header) { header_value_string = UnspecifiedValueString; } else { - header_value_string = header->value().c_str(); + header_value_string = std::string(header->value().getStringView()); } if (max_length_ && header_value_string.length() > max_length_.value()) { @@ -449,7 +504,7 @@ std::string MetadataFormatter::format(const envoy::api::v2::core::Metadata& meta } data = &val; } - ProtobufTypes::String json; + std::string json; const auto status = Protobuf::util::MessageToJsonString(*data, &json); RELEASE_ASSERT(status.ok(), ""); if (max_length_ && json.length() > max_length_.value()) { diff --git a/source/common/access_log/access_log_formatter.h b/source/common/access_log/access_log_formatter.h index 3c670e9bd9f8b..103657dcb40db 100644 --- a/source/common/access_log/access_log_formatter.h +++ b/source/common/access_log/access_log_formatter.h @@ -198,8 +198,10 @@ class StreamInfoFormatter : public FormatterProvider { std::string format(const Http::HeaderMap&, const Http::HeaderMap&, const Http::HeaderMap&, const StreamInfo::StreamInfo& stream_info) const override; + using FieldExtractor = std::function; + private: - std::function field_extractor_; + FieldExtractor field_extractor_; }; /** diff --git a/source/common/access_log/access_log_impl.cc b/source/common/access_log/access_log_impl.cc index aa335854eaa5e..852b6e4c47461 100644 --- a/source/common/access_log/access_log_impl.cc +++ b/source/common/access_log/access_log_impl.cc @@ -74,19 +74,24 @@ FilterFactory::fromProto(const envoy::config::filter::accesslog::v2::AccessLogFi case envoy::config::filter::accesslog::v2::AccessLogFilter::kResponseFlagFilter: MessageUtil::validate(config); return FilterPtr{new ResponseFlagFilter(config.response_flag_filter())}; + case envoy::config::filter::accesslog::v2::AccessLogFilter::kGrpcStatusFilter: + MessageUtil::validate(config); + return FilterPtr{new GrpcStatusFilter(config.grpc_status_filter())}; default: NOT_REACHED_GCOVR_EXCL_LINE; } } bool TraceableRequestFilter::evaluate(const StreamInfo::StreamInfo& info, - const Http::HeaderMap& request_headers) { + const Http::HeaderMap& request_headers, + const Http::HeaderMap&, const Http::HeaderMap&) { Tracing::Decision decision = Tracing::HttpTracerUtility::isTracing(info, request_headers); return decision.traced && decision.reason == Tracing::Reason::ServiceForced; } -bool StatusCodeFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::HeaderMap&) { +bool StatusCodeFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::HeaderMap&, + const Http::HeaderMap&, const Http::HeaderMap&) { if (!info.responseCode()) { return compareAgainstValue(0ULL); } @@ -94,7 +99,8 @@ bool StatusCodeFilter::evaluate(const StreamInfo::StreamInfo& info, const Http:: return compareAgainstValue(info.responseCode().value()); } -bool DurationFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::HeaderMap&) { +bool DurationFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::HeaderMap&, + const Http::HeaderMap&, const Http::HeaderMap&) { absl::optional final = info.requestComplete(); ASSERT(final); @@ -108,12 +114,14 @@ RuntimeFilter::RuntimeFilter(const envoy::config::filter::accesslog::v2::Runtime percent_(config.percent_sampled()), use_independent_randomness_(config.use_independent_randomness()) {} -bool RuntimeFilter::evaluate(const StreamInfo::StreamInfo&, const Http::HeaderMap& request_header) { +bool RuntimeFilter::evaluate(const StreamInfo::StreamInfo&, const Http::HeaderMap& request_header, + const Http::HeaderMap&, const Http::HeaderMap&) { const Http::HeaderEntry* uuid = request_header.RequestId(); uint64_t random_value; + // TODO(dnoe): Migrate uuidModBy to take string_view (#6580) if (use_independent_randomness_ || uuid == nullptr || !UuidUtils::uuidModBy( - uuid->value().c_str(), random_value, + std::string(uuid->value().getStringView()), random_value, ProtobufPercentHelper::fractionalPercentDenominatorToInt(percent_.denominator()))) { random_value = random_.random(); } @@ -139,11 +147,12 @@ AndFilter::AndFilter(const envoy::config::filter::accesslog::v2::AndFilter& conf Runtime::Loader& runtime, Runtime::RandomGenerator& random) : OperatorFilter(config.filters(), runtime, random) {} -bool OrFilter::evaluate(const StreamInfo::StreamInfo& info, - const Http::HeaderMap& request_headers) { +bool OrFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::HeaderMap& request_headers, + const Http::HeaderMap& response_headers, + const Http::HeaderMap& response_trailers) { bool result = false; for (auto& filter : filters_) { - result |= filter->evaluate(info, request_headers); + result |= filter->evaluate(info, request_headers, response_headers, response_trailers); if (result) { break; @@ -153,11 +162,12 @@ bool OrFilter::evaluate(const StreamInfo::StreamInfo& info, return result; } -bool AndFilter::evaluate(const StreamInfo::StreamInfo& info, - const Http::HeaderMap& request_headers) { +bool AndFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::HeaderMap& request_headers, + const Http::HeaderMap& response_headers, + const Http::HeaderMap& response_trailers) { bool result = true; for (auto& filter : filters_) { - result &= filter->evaluate(info, request_headers); + result &= filter->evaluate(info, request_headers, response_headers, response_trailers); if (!result) { break; @@ -167,7 +177,8 @@ bool AndFilter::evaluate(const StreamInfo::StreamInfo& info, return result; } -bool NotHealthCheckFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::HeaderMap&) { +bool NotHealthCheckFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::HeaderMap&, + const Http::HeaderMap&, const Http::HeaderMap&) { return !info.healthCheck(); } @@ -175,7 +186,8 @@ HeaderFilter::HeaderFilter(const envoy::config::filter::accesslog::v2::HeaderFil header_data_.push_back(Http::HeaderUtility::HeaderData(config.header())); } -bool HeaderFilter::evaluate(const StreamInfo::StreamInfo&, const Http::HeaderMap& request_headers) { +bool HeaderFilter::evaluate(const StreamInfo::StreamInfo&, const Http::HeaderMap& request_headers, + const Http::HeaderMap&, const Http::HeaderMap&) { return Http::HeaderUtility::matchHeaders(request_headers, header_data_); } @@ -190,13 +202,60 @@ ResponseFlagFilter::ResponseFlagFilter( } } -bool ResponseFlagFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::HeaderMap&) { +bool ResponseFlagFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::HeaderMap&, + const Http::HeaderMap&, const Http::HeaderMap&) { if (configured_flags_ != 0) { return info.intersectResponseFlags(configured_flags_); } return info.hasAnyResponseFlag(); } +GrpcStatusFilter::GrpcStatusFilter( + const envoy::config::filter::accesslog::v2::GrpcStatusFilter& config) { + for (int i = 0; i < config.statuses_size(); i++) { + statuses_.insert(protoToGrpcStatus(config.statuses(i))); + } + + exclude_ = config.exclude(); +} + +bool GrpcStatusFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::HeaderMap&, + const Http::HeaderMap& response_headers, + const Http::HeaderMap& response_trailers) { + // The gRPC specification does not guarantee a gRPC status code will be returned from a gRPC + // request. When it is returned, it will be in the response trailers. With that said, Envoy will + // treat a trailers-only response as a headers-only response, so we have to check the following + // in order: + // 1. response_trailers gRPC status, if it exists. + // 2. response_headers gRPC status, if it exists. + // 3. Inferred from info HTTP status, if it exists. + // + // If none of those options exist, it will default to Grpc::Status::GrpcStatus::Unknown. + const std::array, 3> optional_statuses = {{ + {Grpc::Common::getGrpcStatus(response_trailers)}, + {Grpc::Common::getGrpcStatus(response_headers)}, + {info.responseCode() ? absl::optional( + Grpc::Utility::httpToGrpcStatus(info.responseCode().value())) + : absl::nullopt}, + }}; + + Grpc::Status::GrpcStatus status = Grpc::Status::GrpcStatus::Unknown; + for (const auto& optional_status : optional_statuses) { + if (optional_status.has_value()) { + status = optional_status.value(); + break; + } + } + + const bool found = statuses_.find(status) != statuses_.end(); + return exclude_ ? !found : found; +} + +Grpc::Status::GrpcStatus GrpcStatusFilter::protoToGrpcStatus( + envoy::config::filter::accesslog::v2::GrpcStatusFilter_Status status) const { + return static_cast(status); +} + InstanceSharedPtr AccessLogFactory::fromProto(const envoy::config::filter::accesslog::v2::AccessLog& config, Server::Configuration::FactoryContext& context) { diff --git a/source/common/access_log/access_log_impl.h b/source/common/access_log/access_log_impl.h index 34c77a22164f3..3635957aa95fd 100644 --- a/source/common/access_log/access_log_impl.h +++ b/source/common/access_log/access_log_impl.h @@ -2,6 +2,7 @@ #include #include +#include #include #include "envoy/access_log/access_log.h" @@ -9,9 +10,12 @@ #include "envoy/runtime/runtime.h" #include "envoy/server/access_log_config.h" +#include "common/grpc/status.h" #include "common/http/header_utility.h" #include "common/protobuf/protobuf.h" +#include "absl/hash/hash.h" + namespace Envoy { namespace AccessLog { @@ -51,8 +55,9 @@ class StatusCodeFilter : public ComparisonFilter { : ComparisonFilter(config.comparison(), runtime) {} // AccessLog::Filter - bool evaluate(const StreamInfo::StreamInfo& info, - const Http::HeaderMap& request_headers) override; + bool evaluate(const StreamInfo::StreamInfo& info, const Http::HeaderMap& request_headers, + const Http::HeaderMap& response_headers, + const Http::HeaderMap& response_trailers) override; }; /** @@ -65,8 +70,9 @@ class DurationFilter : public ComparisonFilter { : ComparisonFilter(config.comparison(), runtime) {} // AccessLog::Filter - bool evaluate(const StreamInfo::StreamInfo& info, - const Http::HeaderMap& request_headers) override; + bool evaluate(const StreamInfo::StreamInfo& info, const Http::HeaderMap& request_headers, + const Http::HeaderMap& response_headers, + const Http::HeaderMap& response_trailers) override; }; /** @@ -91,8 +97,9 @@ class AndFilter : public OperatorFilter { Runtime::RandomGenerator& random); // AccessLog::Filter - bool evaluate(const StreamInfo::StreamInfo& info, - const Http::HeaderMap& request_headers) override; + bool evaluate(const StreamInfo::StreamInfo& info, const Http::HeaderMap& request_headers, + const Http::HeaderMap& response_headers, + const Http::HeaderMap& response_trailers) override; }; /** @@ -104,8 +111,9 @@ class OrFilter : public OperatorFilter { Runtime::RandomGenerator& random); // AccessLog::Filter - bool evaluate(const StreamInfo::StreamInfo& info, - const Http::HeaderMap& request_headers) override; + bool evaluate(const StreamInfo::StreamInfo& info, const Http::HeaderMap& request_headers, + const Http::HeaderMap& response_headers, + const Http::HeaderMap& response_trailers) override; }; /** @@ -116,8 +124,9 @@ class NotHealthCheckFilter : public Filter { NotHealthCheckFilter() {} // AccessLog::Filter - bool evaluate(const StreamInfo::StreamInfo& info, - const Http::HeaderMap& request_headers) override; + bool evaluate(const StreamInfo::StreamInfo& info, const Http::HeaderMap& request_headers, + const Http::HeaderMap& response_headers, + const Http::HeaderMap& response_trailers) override; }; /** @@ -126,8 +135,9 @@ class NotHealthCheckFilter : public Filter { class TraceableRequestFilter : public Filter { public: // AccessLog::Filter - bool evaluate(const StreamInfo::StreamInfo& info, - const Http::HeaderMap& request_headers) override; + bool evaluate(const StreamInfo::StreamInfo& info, const Http::HeaderMap& request_headers, + const Http::HeaderMap& response_headers, + const Http::HeaderMap& response_trailers) override; }; /** @@ -139,8 +149,9 @@ class RuntimeFilter : public Filter { Runtime::Loader& runtime, Runtime::RandomGenerator& random); // AccessLog::Filter - bool evaluate(const StreamInfo::StreamInfo& info, - const Http::HeaderMap& request_headers) override; + bool evaluate(const StreamInfo::StreamInfo& info, const Http::HeaderMap& request_headers, + const Http::HeaderMap& response_headers, + const Http::HeaderMap& response_trailers) override; private: Runtime::Loader& runtime_; @@ -158,8 +169,9 @@ class HeaderFilter : public Filter { HeaderFilter(const envoy::config::filter::accesslog::v2::HeaderFilter& config); // AccessLog::Filter - bool evaluate(const StreamInfo::StreamInfo& info, - const Http::HeaderMap& request_headers) override; + bool evaluate(const StreamInfo::StreamInfo& info, const Http::HeaderMap& request_headers, + const Http::HeaderMap& response_headers, + const Http::HeaderMap& response_trailers) override; private: std::vector header_data_; @@ -173,13 +185,43 @@ class ResponseFlagFilter : public Filter { ResponseFlagFilter(const envoy::config::filter::accesslog::v2::ResponseFlagFilter& config); // AccessLog::Filter - bool evaluate(const StreamInfo::StreamInfo& info, - const Http::HeaderMap& request_headers) override; + bool evaluate(const StreamInfo::StreamInfo& info, const Http::HeaderMap& request_headers, + const Http::HeaderMap& response_headers, + const Http::HeaderMap& response_trailers) override; private: uint64_t configured_flags_{}; }; +/** + * Filters requests that have a response with a gRPC status. Because the gRPC protocol does not + * guarantee a gRPC status code, if a gRPC status code is not available, then the filter will infer + * the gRPC status code from an HTTP status code if available. + */ +class GrpcStatusFilter : public Filter { +public: + using GrpcStatusHashSet = + std::unordered_set>; + + GrpcStatusFilter(const envoy::config::filter::accesslog::v2::GrpcStatusFilter& config); + + // AccessLog::Filter + bool evaluate(const StreamInfo::StreamInfo& info, const Http::HeaderMap& request_headers, + const Http::HeaderMap& response_headers, + const Http::HeaderMap& response_trailers) override; + +private: + GrpcStatusHashSet statuses_; + bool exclude_; + + /** + * Converts a Protobuf representation of a gRPC status into the equivalent code version of a gRPC + * status. + */ + Grpc::Status::GrpcStatus + protoToGrpcStatus(envoy::config::filter::accesslog::v2::GrpcStatusFilter_Status status) const; +}; + /** * Access log factory that reads the configuration from proto. */ diff --git a/source/common/access_log/access_log_manager_impl.cc b/source/common/access_log/access_log_manager_impl.cc index ecc62535b0b6f..cbf5f8caa4a6a 100644 --- a/source/common/access_log/access_log_manager_impl.cc +++ b/source/common/access_log/access_log_manager_impl.cc @@ -2,6 +2,11 @@ #include +#include "common/common/assert.h" +#include "common/common/fmt.h" +#include "common/common/lock_guard.h" +#include "common/common/stack_array.h" + namespace Envoy { namespace AccessLog { @@ -11,14 +16,178 @@ void AccessLogManagerImpl::reopen() { } } -Filesystem::FileSharedPtr AccessLogManagerImpl::createAccessLog(const std::string& file_name) { +AccessLogFileSharedPtr AccessLogManagerImpl::createAccessLog(const std::string& file_name) { if (access_logs_.count(file_name)) { return access_logs_[file_name]; } - access_logs_[file_name] = api_.fileSystem().createFile(file_name, dispatcher_, lock_); + access_logs_[file_name] = std::make_shared( + api_.fileSystem().createFile(file_name), dispatcher_, lock_, file_stats_, + file_flush_interval_msec_, api_.threadFactory()); return access_logs_[file_name]; } +AccessLogFileImpl::AccessLogFileImpl(Filesystem::FilePtr&& file, Event::Dispatcher& dispatcher, + Thread::BasicLockable& lock, AccessLogFileStats& stats, + std::chrono::milliseconds flush_interval_msec, + Thread::ThreadFactory& thread_factory) + : file_(std::move(file)), file_lock_(lock), + flush_timer_(dispatcher.createTimer([this]() -> void { + stats_.flushed_by_timer_.inc(); + flush_event_.notifyOne(); + flush_timer_->enableTimer(flush_interval_msec_); + })), + thread_factory_(thread_factory), flush_interval_msec_(flush_interval_msec), stats_(stats) { + open(); +} + +void AccessLogFileImpl::open() { + const Api::IoCallBoolResult result = file_->open(); + if (!result.rc_) { + throw EnvoyException( + fmt::format("unable to open file '{}': {}", file_->path(), result.err_->getErrorDetails())); + } +} + +void AccessLogFileImpl::reopen() { reopen_file_ = true; } + +AccessLogFileImpl::~AccessLogFileImpl() { + { + Thread::LockGuard lock(write_lock_); + flush_thread_exit_ = true; + flush_event_.notifyOne(); + } + + if (flush_thread_ != nullptr) { + flush_thread_->join(); + } + + // Flush any remaining data. If file was not opened for some reason, skip flushing part. + if (file_->isOpen()) { + if (flush_buffer_.length() > 0) { + doWrite(flush_buffer_); + } + + const Api::IoCallBoolResult result = file_->close(); + ASSERT(result.rc_, fmt::format("unable to close file '{}': {}", file_->path(), + result.err_->getErrorDetails())); + } +} + +void AccessLogFileImpl::doWrite(Buffer::Instance& buffer) { + uint64_t num_slices = buffer.getRawSlices(nullptr, 0); + STACK_ARRAY(slices, Buffer::RawSlice, num_slices); + buffer.getRawSlices(slices.begin(), num_slices); + + // We must do the actual writes to disk under lock, so that we don't intermix chunks from + // different AccessLogFileImpl pointing to the same underlying file. This can happen either via + // hot restart or if calling code opens the same underlying file into a different + // AccessLogFileImpl in the same process. + // TODO PERF: Currently, we use a single cross process lock to serialize all disk writes. This + // will never block network workers, but does mean that only a single flush thread can + // actually flush to disk. In the future it would be nice if we did away with the cross + // process lock or had multiple locks. + { + Thread::LockGuard lock(file_lock_); + for (const Buffer::RawSlice& slice : slices) { + absl::string_view data(static_cast(slice.mem_), slice.len_); + const Api::IoCallSizeResult result = file_->write(data); + ASSERT(result.rc_ == static_cast(slice.len_)); + stats_.write_completed_.inc(); + } + } + + stats_.write_total_buffered_.sub(buffer.length()); + buffer.drain(buffer.length()); +} + +void AccessLogFileImpl::flushThreadFunc() { + + while (true) { + std::unique_lock flush_lock; + + { + Thread::LockGuard write_lock(write_lock_); + + // flush_event_ can be woken up either by large enough flush_buffer or by timer. + // In case it was timer, flush_buffer_ can be empty. + while (flush_buffer_.length() == 0 && !flush_thread_exit_) { + // CondVar::wait() does not throw, so it's safe to pass the mutex rather than the guard. + flush_event_.wait(write_lock_); + } + + if (flush_thread_exit_) { + return; + } + + flush_lock = std::unique_lock(flush_lock_); + ASSERT(flush_buffer_.length() > 0); + about_to_write_buffer_.move(flush_buffer_); + ASSERT(flush_buffer_.length() == 0); + } + + // if we failed to open file before, then simply ignore + if (file_->isOpen()) { + try { + if (reopen_file_) { + reopen_file_ = false; + const Api::IoCallBoolResult result = file_->close(); + ASSERT(result.rc_, fmt::format("unable to close file '{}': {}", file_->path(), + result.err_->getErrorDetails())); + open(); + } + + doWrite(about_to_write_buffer_); + } catch (const EnvoyException&) { + stats_.reopen_failed_.inc(); + } + } + } +} + +void AccessLogFileImpl::flush() { + std::unique_lock flush_buffer_lock; + + { + Thread::LockGuard write_lock(write_lock_); + + // flush_lock_ must be held while checking this or else it is + // possible that flushThreadFunc() has already moved data from + // flush_buffer_ to about_to_write_buffer_, has unlocked write_lock_, + // but has not yet completed doWrite(). This would allow flush() to + // return before the pending data has actually been written to disk. + flush_buffer_lock = std::unique_lock(flush_lock_); + + if (flush_buffer_.length() == 0) { + return; + } + + about_to_write_buffer_.move(flush_buffer_); + ASSERT(flush_buffer_.length() == 0); + } + + doWrite(about_to_write_buffer_); +} + +void AccessLogFileImpl::write(absl::string_view data) { + Thread::LockGuard lock(write_lock_); + + if (flush_thread_ == nullptr) { + createFlushStructures(); + } + + stats_.write_buffered_.inc(); + stats_.write_total_buffered_.add(data.length()); + flush_buffer_.add(data.data(), data.size()); + if (flush_buffer_.length() > MIN_FLUSH_SIZE) { + flush_event_.notifyOne(); + } +} + +void AccessLogFileImpl::createFlushStructures() { + flush_thread_ = thread_factory_.createThread([this]() -> void { flushThreadFunc(); }); + flush_timer_->enableTimer(flush_interval_msec_); +} + } // namespace AccessLog } // namespace Envoy diff --git a/source/common/access_log/access_log_manager_impl.h b/source/common/access_log/access_log_manager_impl.h index 502ca50c74e86..d009a933042d1 100644 --- a/source/common/access_log/access_log_manager_impl.h +++ b/source/common/access_log/access_log_manager_impl.h @@ -5,24 +5,130 @@ #include "envoy/access_log/access_log.h" #include "envoy/api/api.h" +#include "envoy/event/dispatcher.h" +#include "envoy/filesystem/filesystem.h" +#include "envoy/stats/stats_macros.h" +#include "envoy/stats/store.h" + +#include "common/buffer/buffer_impl.h" +#include "common/common/thread.h" namespace Envoy { + +// clang-format off +#define ACCESS_LOG_FILE_STATS(COUNTER, GAUGE) \ + COUNTER(write_buffered) \ + COUNTER(write_completed) \ + COUNTER(flushed_by_timer) \ + COUNTER(reopen_failed) \ + GAUGE (write_total_buffered) +// clang-format on + +struct AccessLogFileStats { + ACCESS_LOG_FILE_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT) +}; + namespace AccessLog { class AccessLogManagerImpl : public AccessLogManager { public: - AccessLogManagerImpl(Api::Api& api, Event::Dispatcher& dispatcher, Thread::BasicLockable& lock) - : api_(api), dispatcher_(dispatcher), lock_(lock) {} + AccessLogManagerImpl(std::chrono::milliseconds file_flush_interval_msec, Api::Api& api, + Event::Dispatcher& dispatcher, Thread::BasicLockable& lock, + Stats::Store& stats_store) + : file_flush_interval_msec_(file_flush_interval_msec), api_(api), dispatcher_(dispatcher), + lock_(lock), file_stats_{ACCESS_LOG_FILE_STATS( + POOL_COUNTER_PREFIX(stats_store, "access_log_file."), + POOL_GAUGE_PREFIX(stats_store, "access_log_file."))} {} // AccessLog::AccessLogManager void reopen() override; - Filesystem::FileSharedPtr createAccessLog(const std::string& file_name) override; + AccessLogFileSharedPtr createAccessLog(const std::string& file_name) override; private: + const std::chrono::milliseconds file_flush_interval_msec_; Api::Api& api_; Event::Dispatcher& dispatcher_; Thread::BasicLockable& lock_; - std::unordered_map access_logs_; + AccessLogFileStats file_stats_; + std::unordered_map access_logs_; +}; + +/** + * This is a file implementation geared for writing out access logs. It turn out that in certain + * cases even if a standard file is opened with O_NONBLOCK, the kernel can still block when writing. + * This implementation uses a flush thread per file, with the idea there there aren't that many + * files. If this turns out to be a good implementation we can potentially have a single flush + * thread that flushes all files, but we will start with this. + */ +class AccessLogFileImpl : public AccessLogFile { +public: + AccessLogFileImpl(Filesystem::FilePtr&& file, Event::Dispatcher& dispatcher, + Thread::BasicLockable& lock, AccessLogFileStats& stats_, + std::chrono::milliseconds flush_interval_msec, + Thread::ThreadFactory& thread_factory); + ~AccessLogFileImpl(); + + // AccessLog::AccessLogFile + void write(absl::string_view data) override; + + /** + * Reopen file asynchronously. + * This only sets reopen flag, actual reopen operation is delayed. + * Reopen happens before the next write operation. + */ + void reopen() override; + void flush() override; + +private: + void doWrite(Buffer::Instance& buffer); + void flushThreadFunc(); + void open(); + void createFlushStructures(); + + // Minimum size before the flush thread will be told to flush. + static const uint64_t MIN_FLUSH_SIZE = 1024 * 64; + + Filesystem::FilePtr file_; + + // These locks are always acquired in the following order if multiple locks are held: + // 1) write_lock_ + // 2) flush_lock_ + // 3) file_lock_ + Thread::BasicLockable& file_lock_; // This lock is used only by the flush thread when writing + // to disk. This is used to make sure that file blocks do + // not get interleaved by multiple processes writing to + // the same file during hot-restart. + Thread::MutexBasicLockable flush_lock_; // This lock is used to prevent simultaneous flushes from + // the flush thread and a synchronous flush. This protects + // concurrent access to the about_to_write_buffer_, fd_, + // and all other data used during flushing and file + // re-opening. + Thread::MutexBasicLockable + write_lock_; // The lock is used when filling the flush buffer. It allows + // multiple threads to write to the same file at relatively + // high performance. It is always local to the process. + Thread::ThreadPtr flush_thread_; + Thread::CondVar flush_event_; + std::atomic flush_thread_exit_{}; + std::atomic reopen_file_{}; + Buffer::OwnedImpl + flush_buffer_ GUARDED_BY(write_lock_); // This buffer is used by multiple threads. It gets + // filled and then flushed either when max size is + // reached or when a timer fires. + // TODO(jmarantz): this should be GUARDED_BY(flush_lock_) but the analysis cannot poke through + // the std::make_unique assignment. I do not believe it's possible to annotate this properly now + // due to limitations in the clang thread annotation analysis. + Buffer::OwnedImpl about_to_write_buffer_; // This buffer is used only by the flush thread. Data + // is moved from flush_buffer_ under lock, and then + // the lock is released so that flush_buffer_ can + // continue to fill. This buffer is then used for the + // final write to disk. + Event::TimerPtr flush_timer_; + Thread::ThreadFactory& thread_factory_; + const std::chrono::milliseconds flush_interval_msec_; // Time interval buffer gets flushed no + // matter if it reached the MIN_FLUSH_SIZE + // or not. + AccessLogFileStats& stats_; }; } // namespace AccessLog diff --git a/source/common/api/BUILD b/source/common/api/BUILD index c42676952895d..7ae9523647301 100644 --- a/source/common/api/BUILD +++ b/source/common/api/BUILD @@ -4,6 +4,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", + "envoy_select_hot_restart", ) envoy_package() @@ -14,17 +15,23 @@ envoy_cc_library( hdrs = ["api_impl.h"], deps = [ "//include/envoy/api:api_interface", - "//source/common/api:os_sys_calls_lib", "//source/common/common:thread_lib", "//source/common/event:dispatcher_lib", - "//source/common/filesystem:filesystem_lib", ], ) envoy_cc_library( name = "os_sys_calls_lib", - srcs = ["os_sys_calls_impl.cc"], - hdrs = ["os_sys_calls_impl.h"], + srcs = ["os_sys_calls_impl.cc"] + select({ + "//bazel:linux_x86_64": ["os_sys_calls_impl_linux.cc"], + "//bazel:linux_aarch64": ["os_sys_calls_impl_linux.cc"], + "//conditions:default": [], + }) + envoy_select_hot_restart(["os_sys_calls_impl_hot_restart.cc"]), + hdrs = ["os_sys_calls_impl.h"] + select({ + "//bazel:linux_x86_64": ["os_sys_calls_impl_linux.h"], + "//bazel:linux_aarch64": ["os_sys_calls_impl_linux.h"], + "//conditions:default": [], + }) + envoy_select_hot_restart(["os_sys_calls_impl_hot_restart.h"]), deps = [ "//include/envoy/api:os_sys_calls_interface", "//source/common/singleton:threadsafe_singleton", diff --git a/source/common/api/api_impl.cc b/source/common/api/api_impl.cc index ab6e1479b56c6..9f997f377a09b 100644 --- a/source/common/api/api_impl.cc +++ b/source/common/api/api_impl.cc @@ -9,15 +9,16 @@ namespace Envoy { namespace Api { -Impl::Impl(std::chrono::milliseconds file_flush_interval_msec, - Thread::ThreadFactory& thread_factory, Stats::Store& stats_store, - Event::TimeSystem& time_system) - : thread_factory_(thread_factory), - file_system_(file_flush_interval_msec, thread_factory, stats_store), - time_system_(time_system) {} +Impl::Impl(Thread::ThreadFactory& thread_factory, Stats::Store&, Event::TimeSystem& time_system, + Filesystem::Instance& file_system) + : thread_factory_(thread_factory), time_system_(time_system), file_system_(file_system) {} Event::DispatcherPtr Impl::allocateDispatcher() { - return std::make_unique(*this); + return std::make_unique(*this, time_system_); +} + +Event::DispatcherPtr Impl::allocateDispatcher(Buffer::WatermarkFactoryPtr&& factory) { + return std::make_unique(std::move(factory), *this, time_system_); } } // namespace Api diff --git a/source/common/api/api_impl.h b/source/common/api/api_impl.h index 050b18cfca12c..61733b4ce53fe 100644 --- a/source/common/api/api_impl.h +++ b/source/common/api/api_impl.h @@ -8,8 +8,6 @@ #include "envoy/filesystem/filesystem.h" #include "envoy/thread/thread.h" -#include "common/filesystem/filesystem_impl.h" - namespace Envoy { namespace Api { @@ -18,19 +16,20 @@ namespace Api { */ class Impl : public Api { public: - Impl(std::chrono::milliseconds file_flush_interval_msec, Thread::ThreadFactory& thread_factory, - Stats::Store& stats_store, Event::TimeSystem& time_system); + Impl(Thread::ThreadFactory& thread_factory, Stats::Store&, Event::TimeSystem& time_system, + Filesystem::Instance& file_system); // Api::Api Event::DispatcherPtr allocateDispatcher() override; + Event::DispatcherPtr allocateDispatcher(Buffer::WatermarkFactoryPtr&& watermark_factory) override; Thread::ThreadFactory& threadFactory() override { return thread_factory_; } Filesystem::Instance& fileSystem() override { return file_system_; } - Event::TimeSystem& timeSystem() override { return time_system_; } + TimeSource& timeSource() override { return time_system_; } private: Thread::ThreadFactory& thread_factory_; - Filesystem::InstanceImpl file_system_; Event::TimeSystem& time_system_; + Filesystem::Instance& file_system_; }; } // namespace Api diff --git a/source/common/api/os_sys_calls_impl.cc b/source/common/api/os_sys_calls_impl.cc index c6e6a43c17e15..e19bd50822a1a 100644 --- a/source/common/api/os_sys_calls_impl.cc +++ b/source/common/api/os_sys_calls_impl.cc @@ -18,21 +18,11 @@ SysCallIntResult OsSysCallsImpl::ioctl(int sockfd, unsigned long int request, vo return {rc, errno}; } -SysCallIntResult OsSysCallsImpl::open(const std::string& full_path, int flags, int mode) { - const int rc = ::open(full_path.c_str(), flags, mode); - return {rc, errno}; -} - SysCallIntResult OsSysCallsImpl::close(int fd) { const int rc = ::close(fd); return {rc, errno}; } -SysCallSizeResult OsSysCallsImpl::write(int fd, const void* buffer, size_t num_bytes) { - const ssize_t rc = ::write(fd, buffer, num_bytes); - return {rc, errno}; -} - SysCallSizeResult OsSysCallsImpl::writev(int fd, const iovec* iovec, int num_iovec) { const ssize_t rc = ::writev(fd, iovec, num_iovec); return {rc, errno}; @@ -48,13 +38,9 @@ SysCallSizeResult OsSysCallsImpl::recv(int socket, void* buffer, size_t length, return {rc, errno}; } -SysCallIntResult OsSysCallsImpl::shmOpen(const char* name, int oflag, mode_t mode) { - const int rc = ::shm_open(name, oflag, mode); - return {rc, errno}; -} - -SysCallIntResult OsSysCallsImpl::shmUnlink(const char* name) { - const int rc = ::shm_unlink(name); +SysCallSizeResult OsSysCallsImpl::recvfrom(int sockfd, void* buffer, size_t length, int flags, + struct sockaddr* addr, socklen_t* addrlen) { + const ssize_t rc = ::recvfrom(sockfd, buffer, length, flags, addr, addrlen); return {rc, errno}; } diff --git a/source/common/api/os_sys_calls_impl.h b/source/common/api/os_sys_calls_impl.h index eed6d1798645f..53a13fb19b912 100644 --- a/source/common/api/os_sys_calls_impl.h +++ b/source/common/api/os_sys_calls_impl.h @@ -12,14 +12,12 @@ class OsSysCallsImpl : public OsSysCalls { // Api::OsSysCalls SysCallIntResult bind(int sockfd, const sockaddr* addr, socklen_t addrlen) override; SysCallIntResult ioctl(int sockfd, unsigned long int request, void* argp) override; - SysCallIntResult open(const std::string& full_path, int flags, int mode) override; - SysCallSizeResult write(int fd, const void* buffer, size_t num_bytes) override; SysCallSizeResult writev(int fd, const iovec* iovec, int num_iovec) override; SysCallSizeResult readv(int fd, const iovec* iovec, int num_iovec) override; SysCallSizeResult recv(int socket, void* buffer, size_t length, int flags) override; + SysCallSizeResult recvfrom(int sockfd, void* buffer, size_t length, int flags, + struct sockaddr* addr, socklen_t* addrlen) override; SysCallIntResult close(int fd) override; - SysCallIntResult shmOpen(const char* name, int oflag, mode_t mode) override; - SysCallIntResult shmUnlink(const char* name) override; SysCallIntResult ftruncate(int fd, off_t length) override; SysCallPtrResult mmap(void* addr, size_t length, int prot, int flags, int fd, off_t offset) override; diff --git a/source/common/api/os_sys_calls_impl_hot_restart.cc b/source/common/api/os_sys_calls_impl_hot_restart.cc new file mode 100644 index 0000000000000..ab0496ccecad0 --- /dev/null +++ b/source/common/api/os_sys_calls_impl_hot_restart.cc @@ -0,0 +1,19 @@ +#include "common/api/os_sys_calls_impl_hot_restart.h" + +#include + +namespace Envoy { +namespace Api { + +SysCallIntResult HotRestartOsSysCallsImpl::shmOpen(const char* name, int oflag, mode_t mode) { + const int rc = ::shm_open(name, oflag, mode); + return {rc, errno}; +} + +SysCallIntResult HotRestartOsSysCallsImpl::shmUnlink(const char* name) { + const int rc = ::shm_unlink(name); + return {rc, errno}; +} + +} // namespace Api +} // namespace Envoy diff --git a/source/common/api/os_sys_calls_impl_hot_restart.h b/source/common/api/os_sys_calls_impl_hot_restart.h new file mode 100644 index 0000000000000..bfce5dd9bc182 --- /dev/null +++ b/source/common/api/os_sys_calls_impl_hot_restart.h @@ -0,0 +1,20 @@ +#pragma once + +#include "envoy/api/os_sys_calls_hot_restart.h" + +#include "common/singleton/threadsafe_singleton.h" + +namespace Envoy { +namespace Api { + +class HotRestartOsSysCallsImpl : public HotRestartOsSysCalls { +public: + // Api::HotRestartOsSysCalls + SysCallIntResult shmOpen(const char* name, int oflag, mode_t mode) override; + SysCallIntResult shmUnlink(const char* name) override; +}; + +typedef ThreadSafeSingleton HotRestartOsSysCallsSingleton; + +} // namespace Api +} // namespace Envoy \ No newline at end of file diff --git a/source/common/api/os_sys_calls_impl_linux.cc b/source/common/api/os_sys_calls_impl_linux.cc new file mode 100644 index 0000000000000..fcf2fafdc7d0d --- /dev/null +++ b/source/common/api/os_sys_calls_impl_linux.cc @@ -0,0 +1,20 @@ +#if !defined(__linux__) +#error "Linux platform file is part of non-Linux build." +#endif + +#include "common/api/os_sys_calls_impl_linux.h" + +#include +#include + +namespace Envoy { +namespace Api { + +SysCallIntResult LinuxOsSysCallsImpl::sched_getaffinity(pid_t pid, size_t cpusetsize, + cpu_set_t* mask) { + const int rc = ::sched_getaffinity(pid, cpusetsize, mask); + return {rc, errno}; +} + +} // namespace Api +} // namespace Envoy diff --git a/source/common/api/os_sys_calls_impl_linux.h b/source/common/api/os_sys_calls_impl_linux.h new file mode 100644 index 0000000000000..d3b08fe427d9f --- /dev/null +++ b/source/common/api/os_sys_calls_impl_linux.h @@ -0,0 +1,23 @@ +#pragma once + +#if !defined(__linux__) +#error "Linux platform file is part of non-Linux build." +#endif + +#include "envoy/api/os_sys_calls_linux.h" + +#include "common/singleton/threadsafe_singleton.h" + +namespace Envoy { +namespace Api { + +class LinuxOsSysCallsImpl : public LinuxOsSysCalls { +public: + // Api::LinuxOsSysCalls + SysCallIntResult sched_getaffinity(pid_t pid, size_t cpusetsize, cpu_set_t* mask) override; +}; + +typedef ThreadSafeSingleton LinuxOsSysCallsSingleton; + +} // namespace Api +} // namespace Envoy diff --git a/source/common/buffer/BUILD b/source/common/buffer/BUILD index 617cce095b2d0..f4e6d9602c190 100644 --- a/source/common/buffer/BUILD +++ b/source/common/buffer/BUILD @@ -24,7 +24,6 @@ envoy_cc_library( hdrs = ["buffer_impl.h"], deps = [ "//include/envoy/buffer:buffer_interface", - "//source/common/api:os_sys_calls_lib", "//source/common/common:non_copyable", "//source/common/common:stack_array", "//source/common/event:libevent_lib", diff --git a/source/common/buffer/buffer_impl.cc b/source/common/buffer/buffer_impl.cc index 192920f6b0575..5cfa59af62085 100644 --- a/source/common/buffer/buffer_impl.cc +++ b/source/common/buffer/buffer_impl.cc @@ -3,7 +3,6 @@ #include #include -#include "common/api/os_sys_calls_impl.h" #include "common/common/assert.h" #include "common/common/stack_array.h" @@ -12,28 +11,47 @@ namespace Envoy { namespace Buffer { -// RawSlice is the same structure as evbuffer_iovec. This was put into place to avoid leaking -// libevent into most code since we will likely replace evbuffer with our own implementation at -// some point. However, we can avoid a bunch of copies since the structure is the same. -static_assert(sizeof(RawSlice) == sizeof(evbuffer_iovec), "RawSlice != evbuffer_iovec"); -static_assert(offsetof(RawSlice, mem_) == offsetof(evbuffer_iovec, iov_base), - "RawSlice != evbuffer_iovec"); -static_assert(offsetof(RawSlice, len_) == offsetof(evbuffer_iovec, iov_len), - "RawSlice != evbuffer_iovec"); - -void OwnedImpl::add(const void* data, uint64_t size) { evbuffer_add(buffer_.get(), data, size); } +void OwnedImpl::add(const void* data, uint64_t size) { + if (old_impl_) { + evbuffer_add(buffer_.get(), data, size); + } else { + const char* src = static_cast(data); + bool new_slice_needed = slices_.empty(); + while (size != 0) { + if (new_slice_needed) { + slices_.emplace_back(OwnedSlice::create(size)); + } + uint64_t copy_size = slices_.back()->append(src, size); + src += copy_size; + size -= copy_size; + length_ += copy_size; + new_slice_needed = true; + } + } +} void OwnedImpl::addBufferFragment(BufferFragment& fragment) { - evbuffer_add_reference( - buffer_.get(), fragment.data(), fragment.size(), - [](const void*, size_t, void* arg) { static_cast(arg)->done(); }, &fragment); + if (old_impl_) { + evbuffer_add_reference( + buffer_.get(), fragment.data(), fragment.size(), + [](const void*, size_t, void* arg) { static_cast(arg)->done(); }, + &fragment); + } else { + length_ += fragment.size(); + slices_.emplace_back(std::make_unique(fragment)); + } } void OwnedImpl::add(absl::string_view data) { - evbuffer_add(buffer_.get(), data.data(), data.size()); + if (old_impl_) { + evbuffer_add(buffer_.get(), data.data(), data.size()); + } else { + add(data.data(), data.size()); + } } void OwnedImpl::add(const Instance& data) { + ASSERT(&data != this); uint64_t num_slices = data.getRawSlices(nullptr, 0); STACK_ARRAY(slices, RawSlice, num_slices); data.getRawSlices(slices.begin(), num_slices); @@ -43,157 +61,495 @@ void OwnedImpl::add(const Instance& data) { } void OwnedImpl::prepend(absl::string_view data) { - evbuffer_prepend(buffer_.get(), data.data(), data.size()); + if (old_impl_) { + // Prepending an empty string seems to mess up libevent internally. + // evbuffer_prepend doesn't have a check for empty (unlike + // evbuffer_prepend_buffer which does). This then results in an allocation of + // an empty chain, which causes problems with a following move/append. This + // only seems to happen the original buffer was created via + // addBufferFragment(), this forces the code execution path in + // evbuffer_prepend related to immutable buffers. + if (data.empty()) { + return; + } + evbuffer_prepend(buffer_.get(), data.data(), data.size()); + } else { + uint64_t size = data.size(); + bool new_slice_needed = slices_.empty(); + while (size != 0) { + if (new_slice_needed) { + slices_.emplace_front(OwnedSlice::create(size)); + } + uint64_t copy_size = slices_.front()->prepend(data.data(), size); + size -= copy_size; + length_ += copy_size; + new_slice_needed = true; + } + } } void OwnedImpl::prepend(Instance& data) { - int rc = - evbuffer_prepend_buffer(buffer_.get(), static_cast(data).buffer().get()); - ASSERT(rc == 0); - ASSERT(data.length() == 0); - static_cast(data).postProcess(); + ASSERT(&data != this); + ASSERT(isSameBufferImpl(data)); + // See the comments in move() for why we do the static_cast. + if (old_impl_) { + ASSERT(dynamic_cast(&data) != nullptr); + int rc = + evbuffer_prepend_buffer(buffer_.get(), static_cast(data).buffer().get()); + ASSERT(rc == 0); + ASSERT(data.length() == 0); + static_cast(data).postProcess(); + } else { + OwnedImpl& other = static_cast(data); + while (!other.slices_.empty()) { + uint64_t slice_size = other.slices_.back()->dataSize(); + length_ += slice_size; + slices_.emplace_front(std::move(other.slices_.back())); + other.slices_.pop_back(); + other.length_ -= slice_size; + } + other.postProcess(); + } } void OwnedImpl::commit(RawSlice* iovecs, uint64_t num_iovecs) { - int rc = - evbuffer_commit_space(buffer_.get(), reinterpret_cast(iovecs), num_iovecs); - ASSERT(rc == 0); -} + if (old_impl_) { + int rc = + evbuffer_commit_space(buffer_.get(), reinterpret_cast(iovecs), num_iovecs); + ASSERT(rc == 0); + } else { + if (num_iovecs == 0) { + return; + } + // Find the slices in the buffer that correspond to the iovecs: + // First, scan backward from the end of the buffer to find the last slice containing + // any content. Reservations are made from the end of the buffer, and out-of-order commits + // aren't supported, so any slices before this point cannot match the iovecs being committed. + ssize_t slice_index = static_cast(slices_.size()) - 1; + while (slice_index >= 0 && slices_[slice_index]->dataSize() == 0) { + slice_index--; + } + if (slice_index < 0) { + slice_index = 0; + if (!slices_[0]) { + return; + } + } -void OwnedImpl::copyOut(size_t start, uint64_t size, void* data) const { - ASSERT(start + size <= length()); + // Next, scan forward and attempt to match the slices against iovecs. + uint64_t num_slices_committed = 0; + while (num_slices_committed < num_iovecs) { + if (slices_[slice_index]->commit(iovecs[num_slices_committed])) { + length_ += iovecs[num_slices_committed].len_; + num_slices_committed++; + } + slice_index++; + if (slice_index == static_cast(slices_.size())) { + break; + } + } - evbuffer_ptr start_ptr; - int rc = evbuffer_ptr_set(buffer_.get(), &start_ptr, start, EVBUFFER_PTR_SET); - ASSERT(rc != -1); + ASSERT(num_slices_committed > 0); + } +} - ev_ssize_t copied = evbuffer_copyout_from(buffer_.get(), &start_ptr, data, size); - ASSERT(static_cast(copied) == size); +void OwnedImpl::copyOut(size_t start, uint64_t size, void* data) const { + if (old_impl_) { + ASSERT(start + size <= length()); + + evbuffer_ptr start_ptr; + int rc = evbuffer_ptr_set(buffer_.get(), &start_ptr, start, EVBUFFER_PTR_SET); + ASSERT(rc != -1); + + ev_ssize_t copied = evbuffer_copyout_from(buffer_.get(), &start_ptr, data, size); + ASSERT(static_cast(copied) == size); + } else { + uint64_t bytes_to_skip = start; + uint8_t* dest = static_cast(data); + for (const auto& slice : slices_) { + if (size == 0) { + break; + } + uint64_t data_size = slice->dataSize(); + if (data_size <= bytes_to_skip) { + // The offset where the caller wants to start copying is after the end of this slice, + // so just skip over this slice completely. + bytes_to_skip -= data_size; + continue; + } + uint64_t copy_size = std::min(size, data_size - bytes_to_skip); + memcpy(dest, slice->data() + bytes_to_skip, copy_size); + size -= copy_size; + dest += copy_size; + // Now that we've started copying, there are no bytes left to skip over. If there + // is any more data to be copied, the next iteration can start copying from the very + // beginning of the next slice. + bytes_to_skip = 0; + } + ASSERT(size == 0); + } } void OwnedImpl::drain(uint64_t size) { - ASSERT(size <= length()); - int rc = evbuffer_drain(buffer_.get(), size); - ASSERT(rc == 0); + if (old_impl_) { + ASSERT(size <= length()); + int rc = evbuffer_drain(buffer_.get(), size); + ASSERT(rc == 0); + } else { + while (size != 0) { + if (slices_.empty()) { + break; + } + uint64_t slice_size = slices_.front()->dataSize(); + if (slice_size <= size) { + slices_.pop_front(); + length_ -= slice_size; + size -= slice_size; + } else { + slices_.front()->drain(size); + length_ -= size; + size = 0; + } + } + } } uint64_t OwnedImpl::getRawSlices(RawSlice* out, uint64_t out_size) const { - return evbuffer_peek(buffer_.get(), -1, nullptr, reinterpret_cast(out), - out_size); + if (old_impl_) { + return evbuffer_peek(buffer_.get(), -1, nullptr, reinterpret_cast(out), + out_size); + } else { + uint64_t num_slices = 0; + for (const auto& slice : slices_) { + if (slice->dataSize() == 0) { + continue; + } + if (num_slices < out_size) { + out[num_slices].mem_ = slice->data(); + out[num_slices].len_ = slice->dataSize(); + } + // Per the definition of getRawSlices in include/envoy/buffer/buffer.h, we need to return + // the total number of slices needed to access all the data in the buffer, which can be + // larger than out_size. So we keep iterating and counting non-empty slices here, even + // if all the caller-supplied slices have been filled. + num_slices++; + } + return num_slices; + } } -uint64_t OwnedImpl::length() const { return evbuffer_get_length(buffer_.get()); } +uint64_t OwnedImpl::length() const { + if (old_impl_) { + return evbuffer_get_length(buffer_.get()); + } else { +#ifndef NDEBUG + // When running in debug mode, verify that the precomputed length matches the sum + // of the lengths of the slices. + uint64_t length = 0; + for (const auto& slice : slices_) { + length += slice->dataSize(); + } + ASSERT(length == length_); +#endif + + return length_; + } +} void* OwnedImpl::linearize(uint32_t size) { - ASSERT(size <= length()); - void* const ret = evbuffer_pullup(buffer_.get(), size); - RELEASE_ASSERT(ret != nullptr || size == 0, - "Failure to linearize may result in buffer overflow by the caller."); - return ret; + RELEASE_ASSERT(size <= length(), "Linearize size exceeds buffer size"); + if (old_impl_) { + void* const ret = evbuffer_pullup(buffer_.get(), size); + RELEASE_ASSERT(ret != nullptr || size == 0, + "Failure to linearize may result in buffer overflow by the caller."); + return ret; + } else { + if (slices_.empty()) { + return nullptr; + } + uint64_t linearized_size = 0; + uint64_t num_slices_to_linearize = 0; + for (const auto& slice : slices_) { + num_slices_to_linearize++; + linearized_size += slice->dataSize(); + if (linearized_size >= size) { + break; + } + } + if (num_slices_to_linearize > 1) { + auto new_slice = OwnedSlice::create(linearized_size); + uint64_t bytes_copied = 0; + Slice::Reservation reservation = new_slice->reserve(linearized_size); + ASSERT(reservation.mem_ != nullptr); + ASSERT(reservation.len_ == linearized_size); + auto dest = static_cast(reservation.mem_); + do { + uint64_t data_size = slices_.front()->dataSize(); + memcpy(dest, slices_.front()->data(), data_size); + bytes_copied += data_size; + dest += data_size; + slices_.pop_front(); + } while (bytes_copied < linearized_size); + ASSERT(dest == static_cast(reservation.mem_) + linearized_size); + new_slice->commit(reservation); + slices_.emplace_front(std::move(new_slice)); + } + return slices_.front()->data(); + } } void OwnedImpl::move(Instance& rhs) { - // We do the static cast here because in practice we only have one buffer implementation right - // now and this is safe. Using the evbuffer move routines require having access to both evbuffers. - // This is a reasonable compromise in a high performance path where we want to maintain an - // abstraction in case we get rid of evbuffer later. - int rc = evbuffer_add_buffer(buffer_.get(), static_cast(rhs).buffer().get()); - ASSERT(rc == 0); - static_cast(rhs).postProcess(); + ASSERT(&rhs != this); + ASSERT(isSameBufferImpl(rhs)); + if (old_impl_) { + // We do the static cast here because in practice we only have one buffer implementation right + // now and this is safe. Using the evbuffer move routines require having access to both + // evbuffers. This is a reasonable compromise in a high performance path where we want to + // maintain an abstraction in case we get rid of evbuffer later. + ASSERT(dynamic_cast(&rhs) != nullptr); + int rc = evbuffer_add_buffer(buffer_.get(), static_cast(rhs).buffer().get()); + ASSERT(rc == 0); + static_cast(rhs).postProcess(); + } else { + // We do the static cast here because in practice we only have one buffer implementation right + // now and this is safe. This is a reasonable compromise in a high performance path where we + // want to maintain an abstraction. + OwnedImpl& other = static_cast(rhs); + while (!other.slices_.empty()) { + const uint64_t slice_size = other.slices_.front()->dataSize(); + slices_.emplace_back(std::move(other.slices_.front())); + other.slices_.pop_front(); + length_ += slice_size; + other.length_ -= slice_size; + } + other.postProcess(); + } } void OwnedImpl::move(Instance& rhs, uint64_t length) { - // See move() above for why we do the static cast. - int rc = evbuffer_remove_buffer(static_cast(rhs).buffer().get(), buffer_.get(), - length); - ASSERT(static_cast(rc) == length); - static_cast(rhs).postProcess(); + ASSERT(&rhs != this); + ASSERT(isSameBufferImpl(rhs)); + if (old_impl_) { + // See move() above for why we do the static cast. + int rc = evbuffer_remove_buffer(static_cast(rhs).buffer().get(), + buffer_.get(), length); + ASSERT(static_cast(rc) == length); + static_cast(rhs).postProcess(); + } else { + // See move() above for why we do the static cast. + OwnedImpl& other = static_cast(rhs); + while (length != 0 && !other.slices_.empty()) { + const uint64_t slice_size = other.slices_.front()->dataSize(); + const uint64_t copy_size = std::min(slice_size, length); + if (copy_size == 0) { + other.slices_.pop_front(); + } else if (copy_size < slice_size) { + // TODO(brian-pane) add reference-counting to allow slices to share their storage + // and eliminate the copy for this partial-slice case? + add(other.slices_.front()->data(), copy_size); + other.slices_.front()->drain(copy_size); + other.length_ -= copy_size; + } else { + slices_.emplace_back(std::move(other.slices_.front())); + other.slices_.pop_front(); + length_ += slice_size; + other.length_ -= slice_size; + } + length -= copy_size; + } + other.postProcess(); + } } -Api::SysCallIntResult OwnedImpl::read(int fd, uint64_t max_length) { +Api::IoCallUint64Result OwnedImpl::read(Network::IoHandle& io_handle, uint64_t max_length) { if (max_length == 0) { - return {0, 0}; + return Api::ioCallUint64ResultNoError(); } constexpr uint64_t MaxSlices = 2; RawSlice slices[MaxSlices]; const uint64_t num_slices = reserve(max_length, slices, MaxSlices); - STACK_ARRAY(iov, iovec, num_slices); - uint64_t num_slices_to_read = 0; - uint64_t num_bytes_to_read = 0; - for (; num_slices_to_read < num_slices && num_bytes_to_read < max_length; num_slices_to_read++) { - iov[num_slices_to_read].iov_base = slices[num_slices_to_read].mem_; - const size_t slice_length = std::min(slices[num_slices_to_read].len_, - static_cast(max_length - num_bytes_to_read)); - iov[num_slices_to_read].iov_len = slice_length; - num_bytes_to_read += slice_length; - } - ASSERT(num_slices_to_read <= MaxSlices); - ASSERT(num_bytes_to_read <= max_length); - auto& os_syscalls = Api::OsSysCallsSingleton::get(); - const Api::SysCallSizeResult result = - os_syscalls.readv(fd, iov.begin(), static_cast(num_slices_to_read)); - if (result.rc_ < 0) { - return {static_cast(result.rc_), result.errno_}; - } - uint64_t num_slices_to_commit = 0; - uint64_t bytes_to_commit = result.rc_; - ASSERT(bytes_to_commit <= max_length); - while (bytes_to_commit != 0) { - slices[num_slices_to_commit].len_ = - std::min(slices[num_slices_to_commit].len_, static_cast(bytes_to_commit)); - ASSERT(bytes_to_commit >= slices[num_slices_to_commit].len_); - bytes_to_commit -= slices[num_slices_to_commit].len_; - num_slices_to_commit++; - } - ASSERT(num_slices_to_commit <= num_slices); - commit(slices, num_slices_to_commit); - return {static_cast(result.rc_), result.errno_}; + Api::IoCallUint64Result result = io_handle.readv(max_length, slices, num_slices); + if (old_impl_) { + if (!result.ok()) { + return result; + } + uint64_t num_slices_to_commit = 0; + uint64_t bytes_to_commit = result.rc_; + ASSERT(bytes_to_commit <= max_length); + while (bytes_to_commit != 0) { + slices[num_slices_to_commit].len_ = + std::min(slices[num_slices_to_commit].len_, static_cast(bytes_to_commit)); + ASSERT(bytes_to_commit >= slices[num_slices_to_commit].len_); + bytes_to_commit -= slices[num_slices_to_commit].len_; + num_slices_to_commit++; + } + ASSERT(num_slices_to_commit <= num_slices); + commit(slices, num_slices_to_commit); + } else { + uint64_t bytes_to_commit = result.ok() ? result.rc_ : 0; + ASSERT(bytes_to_commit <= max_length); + for (uint64_t i = 0; i < num_slices; i++) { + slices[i].len_ = std::min(slices[i].len_, static_cast(bytes_to_commit)); + bytes_to_commit -= slices[i].len_; + } + commit(slices, num_slices); + } + return result; } uint64_t OwnedImpl::reserve(uint64_t length, RawSlice* iovecs, uint64_t num_iovecs) { - int ret = evbuffer_reserve_space(buffer_.get(), length, reinterpret_cast(iovecs), - num_iovecs); - RELEASE_ASSERT(ret >= 1, "Failure to allocate may result in callers writing to uninitialized " - "memory, buffer overflows, etc"); - return static_cast(ret); + if (num_iovecs == 0 || length == 0) { + return 0; + } + if (old_impl_) { + int ret = evbuffer_reserve_space(buffer_.get(), length, + reinterpret_cast(iovecs), num_iovecs); + RELEASE_ASSERT(ret >= 1, "Failure to allocate may result in callers writing to uninitialized " + "memory, buffer overflows, etc"); + return static_cast(ret); + } else { + // Check whether there are any empty slices with reservable space at the end of the buffer. + size_t first_reservable_slice = slices_.size(); + while (first_reservable_slice > 0) { + if (slices_[first_reservable_slice - 1]->reservableSize() == 0) { + break; + } + first_reservable_slice--; + if (slices_[first_reservable_slice]->dataSize() != 0) { + // There is some content in this slice, so anything in front of it is nonreservable. + break; + } + } + + // Having found the sequence of reservable slices at the back of the buffer, reserve + // as much space as possible from each one. + uint64_t num_slices_used = 0; + uint64_t bytes_remaining = length; + size_t slice_index = first_reservable_slice; + while (slice_index < slices_.size() && bytes_remaining != 0 && num_slices_used < num_iovecs) { + auto& slice = slices_[slice_index]; + const uint64_t reservation_size = std::min(slice->reservableSize(), bytes_remaining); + if (num_slices_used + 1 == num_iovecs && reservation_size < bytes_remaining) { + // There is only one iovec left, and this next slice does not have enough space to + // complete the reservation. Stop iterating, with last one iovec still unpopulated, + // so the code following this loop can allocate a new slice to hold the rest of the + // reservation. + break; + } + iovecs[num_slices_used] = slice->reserve(reservation_size); + bytes_remaining -= iovecs[num_slices_used].len_; + num_slices_used++; + slice_index++; + } + + // If needed, allocate one more slice at the end to provide the remainder of the reservation. + if (bytes_remaining != 0) { + slices_.emplace_back(OwnedSlice::create(bytes_remaining)); + iovecs[num_slices_used] = slices_.back()->reserve(bytes_remaining); + bytes_remaining -= iovecs[num_slices_used].len_; + num_slices_used++; + } + + ASSERT(num_slices_used <= num_iovecs); + ASSERT(bytes_remaining == 0); + return num_slices_used; + } } ssize_t OwnedImpl::search(const void* data, uint64_t size, size_t start) const { - evbuffer_ptr start_ptr; - if (-1 == evbuffer_ptr_set(buffer_.get(), &start_ptr, start, EVBUFFER_PTR_SET)) { + if (old_impl_) { + evbuffer_ptr start_ptr; + if (-1 == evbuffer_ptr_set(buffer_.get(), &start_ptr, start, EVBUFFER_PTR_SET)) { + return -1; + } + + evbuffer_ptr result_ptr = + evbuffer_search(buffer_.get(), static_cast(data), size, &start_ptr); + return result_ptr.pos; + } else { + // This implementation uses the same search algorithm as evbuffer_search(), a naive + // scan that requires O(M*N) comparisons in the worst case. + // TODO(brian-pane): replace this with a more efficient search if it shows up + // prominently in CPU profiling. + if (size == 0) { + return (start <= length_) ? start : -1; + } + ssize_t offset = 0; + const uint8_t* needle = static_cast(data); + for (size_t slice_index = 0; slice_index < slices_.size(); slice_index++) { + const auto& slice = slices_[slice_index]; + uint64_t slice_size = slice->dataSize(); + if (slice_size <= start) { + start -= slice_size; + offset += slice_size; + continue; + } + const uint8_t* slice_start = slice->data(); + const uint8_t* haystack = slice_start; + const uint8_t* haystack_end = haystack + slice_size; + haystack += start; + while (haystack < haystack_end) { + // Search within this slice for the first byte of the needle. + const uint8_t* first_byte_match = + static_cast(memchr(haystack, needle[0], haystack_end - haystack)); + if (first_byte_match == nullptr) { + break; + } + // After finding a match for the first byte of the needle, check whether the following + // bytes in the buffer match the remainder of the needle. Note that the match can span + // two or more slices. + size_t i = 1; + size_t match_index = slice_index; + const uint8_t* match_next = first_byte_match + 1; + const uint8_t* match_end = haystack_end; + while (i < size) { + if (match_next >= match_end) { + // We've hit the end of this slice, so continue checking against the next slice. + match_index++; + if (match_index == slices_.size()) { + // We've hit the end of the entire buffer. + break; + } + const auto& match_slice = slices_[match_index]; + match_next = match_slice->data(); + match_end = match_next + match_slice->dataSize(); + continue; + } + if (*match_next++ != needle[i]) { + break; + } + i++; + } + if (i == size) { + // Successful match of the entire needle. + return offset + (first_byte_match - slice_start); + } + // If this wasn't a successful match, start scanning again at the next byte. + haystack = first_byte_match + 1; + } + start = 0; + offset += slice_size; + } return -1; } - - evbuffer_ptr result_ptr = - evbuffer_search(buffer_.get(), static_cast(data), size, &start_ptr); - return result_ptr.pos; } -Api::SysCallIntResult OwnedImpl::write(int fd) { +Api::IoCallUint64Result OwnedImpl::write(Network::IoHandle& io_handle) { constexpr uint64_t MaxSlices = 16; RawSlice slices[MaxSlices]; const uint64_t num_slices = std::min(getRawSlices(slices, MaxSlices), MaxSlices); - STACK_ARRAY(iov, iovec, num_slices); - uint64_t num_slices_to_write = 0; - for (uint64_t i = 0; i < num_slices; i++) { - if (slices[i].mem_ != nullptr && slices[i].len_ != 0) { - iov[num_slices_to_write].iov_base = slices[i].mem_; - iov[num_slices_to_write].iov_len = slices[i].len_; - num_slices_to_write++; - } - } - if (num_slices_to_write == 0) { - return {0, 0}; - } - auto& os_syscalls = Api::OsSysCallsSingleton::get(); - const Api::SysCallSizeResult result = os_syscalls.writev(fd, iov.begin(), num_slices_to_write); - if (result.rc_ > 0) { + Api::IoCallUint64Result result = io_handle.writev(slices, num_slices); + if (result.ok() && result.rc_ > 0) { drain(static_cast(result.rc_)); } - return {static_cast(result.rc_), result.errno_}; + return result; } -OwnedImpl::OwnedImpl() : buffer_(evbuffer_new()) {} +OwnedImpl::OwnedImpl() : old_impl_(use_old_impl_) { + if (old_impl_) { + buffer_ = evbuffer_new(); + } +} OwnedImpl::OwnedImpl(absl::string_view data) : OwnedImpl() { add(data); } @@ -218,5 +574,33 @@ std::string OwnedImpl::toString() const { return output; } +void OwnedImpl::postProcess() {} + +void OwnedImpl::appendSliceForTest(const void* data, uint64_t size) { + if (old_impl_) { + OwnedImpl rhs(data, size); + move(rhs); + } else { + slices_.emplace_back(OwnedSlice::create(data, size)); + length_ += size; + } +} + +void OwnedImpl::appendSliceForTest(absl::string_view data) { + appendSliceForTest(data.data(), data.size()); +} + +void OwnedImpl::useOldImpl(bool use_old_impl) { use_old_impl_ = use_old_impl; } + +bool OwnedImpl::isSameBufferImpl(const Instance& rhs) const { + const OwnedImpl* other = dynamic_cast(&rhs); + if (other == nullptr) { + return false; + } + return usesOldImpl() == other->usesOldImpl(); +} + +bool OwnedImpl::use_old_impl_ = false; + } // namespace Buffer } // namespace Envoy diff --git a/source/common/buffer/buffer_impl.h b/source/common/buffer/buffer_impl.h index e215d53ea81c8..b09c0773ac8bc 100644 --- a/source/common/buffer/buffer_impl.h +++ b/source/common/buffer/buffer_impl.h @@ -1,16 +1,428 @@ #pragma once +#include #include +#include #include #include "envoy/buffer/buffer.h" +#include "envoy/network/io_handle.h" +#include "common/common/assert.h" #include "common/common/non_copyable.h" #include "common/event/libevent.h" namespace Envoy { namespace Buffer { +/** + * A Slice manages a contiguous block of bytes. + * The block is arranged like this: + * |<- data_size() -->|<- reservable_size() ->| + * +-----------------+------------------+-----------------------+ + * | Drained | Data | Reservable | + * | Unused space | Usable content | New content can be | + * | that formerly | | added here with | + * | was in the Data | | reserve()/commit() | + * | section | | | + * +-----------------+------------------+-----------------------+ + * ^ + * | + * data() + */ +class Slice { +public: + using Reservation = RawSlice; + + virtual ~Slice() = default; + + /** + * @return a pointer to the start of the usable content. + */ + const uint8_t* data() const { return base_ + data_; } + + /** + * @return a pointer to the start of the usable content. + */ + uint8_t* data() { return base_ + data_; } + + /** + * @return the size in bytes of the usable content. + */ + uint64_t dataSize() const { return reservable_ - data_; } + + /** + * Remove the first `size` bytes of usable content. Runs in O(1) time. + * @param size number of bytes to remove. If greater than data_size(), the result is undefined. + */ + void drain(uint64_t size) { + ASSERT(data_ + size <= reservable_); + data_ += size; + if (data_ == reservable_ && !reservation_outstanding_) { + // There is no more content in the slice, and there is no outstanding reservation, + // so reset the Data section to the start of the slice to facilitate reuse. + data_ = reservable_ = 0; + } + } + + /** + * @return the number of bytes available to be reserve()d. + * @note If reserve() has been called without a corresponding commit(), this method + * should return 0. + * @note Read-only implementations of Slice should return zero from this method. + */ + uint64_t reservableSize() const { + if (reservation_outstanding_) { + return 0; + } + return capacity_ - reservable_; + } + + /** + * Reserve `size` bytes that the caller can populate with content. The caller SHOULD then + * call commit() to add the newly populated content from the Reserved section to the Data + * section. + * @note If there is already an outstanding reservation (i.e., a reservation obtained + * from reserve() that has not been released by calling commit()), this method will + * return {nullptr, 0}. + * @param size the number of bytes to reserve. The Slice implementation MAY reserve + * fewer bytes than requested (for example, if it doesn't have enough room in the + * Reservable section to fulfill the whole request). + * @return a tuple containing the address of the start of resulting reservation and the + * reservation size in bytes. If the address is null, the reservation failed. + * @note Read-only implementations of Slice should return {nullptr, 0} from this method. + */ + Reservation reserve(uint64_t size) { + if (reservation_outstanding_ || size == 0) { + return {nullptr, 0}; + } + uint64_t available_size = capacity_ - reservable_; + if (available_size == 0) { + return {nullptr, 0}; + } + uint64_t reservation_size = std::min(size, available_size); + void* reservation = &(base_[reservable_]); + reservation_outstanding_ = true; + return {reservation, reservation_size}; + } + + /** + * Commit a Reservation that was previously obtained from a call to reserve(). + * The Reservation's size is added to the Data section. + * @param reservation a reservation obtained from a previous call to reserve(). + * If the reservation is not from this Slice, commit() will return false. + * If the caller is committing fewer bytes than provided by reserve(), it + * should change the mem_ field of the reservation before calling commit(). + * For example, if a caller reserve()s 4KB to do a nonblocking socket read, + * and the read only returns two bytes, the caller should set + * reservation.mem_ = 2 and then call `commit(reservation)`. + * @return whether the Reservation was successfully committed to the Slice. + */ + bool commit(const Reservation& reservation) { + if (static_cast(reservation.mem_) != base_ + reservable_ || + reservable_ + reservation.len_ > capacity_ || reservable_ >= capacity_) { + // The reservation is not from this OwnedSlice. + return false; + } + ASSERT(reservation_outstanding_); + reservable_ += reservation.len_; + reservation_outstanding_ = false; + return true; + } + + /** + * Copy as much of the supplied data as possible to the end of the slice. + * @param data start of the data to copy. + * @param size number of bytes to copy. + * @return number of bytes copied (may be a smaller than size, may even be zero). + */ + uint64_t append(const void* data, uint64_t size) { + if (reservation_outstanding_) { + return 0; + } + uint64_t copy_size = std::min(size, reservableSize()); + uint8_t* dest = base_ + reservable_; + reservable_ += copy_size; + // NOLINTNEXTLINE(clang-analyzer-core.NullDereference) + memcpy(dest, data, copy_size); + return copy_size; + } + + /** + * Copy as much of the supplied data as possible to the front of the slice. + * If only part of the data will fit in the slice, the bytes from the _end_ are + * copied. + * @param data start of the data to copy. + * @param size number of bytes to copy. + * @return number of bytes copied (may be a smaller than size, may even be zero). + */ + uint64_t prepend(const void* data, uint64_t size) { + if (reservation_outstanding_) { + return 0; + } + const uint8_t* src = static_cast(data); + uint64_t copy_size; + if (dataSize() == 0) { + // There is nothing in the slice, so put the data at the very end in case the caller + // later tries to prepend anything else in front of it. + copy_size = std::min(size, reservableSize()); + reservable_ = capacity_; + data_ = capacity_ - copy_size; + } else { + if (data_ == 0) { + // There is content in the slice, and no space in front of it to write anything. + return 0; + } + // Write into the space in front of the slice's current content. + copy_size = std::min(size, data_); + data_ -= copy_size; + } + memcpy(base_ + data_, src + size - copy_size, copy_size); + return copy_size; + } + +protected: + Slice(uint64_t data, uint64_t reservable, uint64_t capacity) + : data_(data), reservable_(reservable), capacity_(capacity) {} + + /** Start of the slice - subclasses must set this */ + uint8_t* base_{nullptr}; + + /** Offset in bytes from the start of the slice to the start of the Data section */ + uint64_t data_; + + /** Offset in bytes from the start of the slice to the start of the Reservable section */ + uint64_t reservable_; + + /** Total number of bytes in the slice */ + uint64_t capacity_; + + /** Whether reserve() has been called without a corresponding commit(). */ + bool reservation_outstanding_{false}; +}; + +using SlicePtr = std::unique_ptr; + +class OwnedSlice : public Slice { +public: + /** + * Create an empty OwnedSlice. + * @param capacity number of bytes of space the slice should have. + * @return an OwnedSlice with at least the specified capacity. + */ + static SlicePtr create(uint64_t capacity) { + uint64_t slice_capacity = sliceSize(capacity); + return SlicePtr(new (slice_capacity) OwnedSlice(slice_capacity)); + } + + /** + * Create an OwnedSlice and initialize it with a copy of the supplied copy. + * @param data the content to copy into the slice. + * @param size length of the content. + * @return an OwnedSlice containing a copy of the content, which may (dependent on + * the internal implementation) have a nonzero amount of reservable space at the end. + */ + static SlicePtr create(const void* data, uint64_t size) { + uint64_t slice_capacity = sliceSize(size); + std::unique_ptr slice(new (slice_capacity) OwnedSlice(slice_capacity)); + memcpy(slice->base_, data, size); + slice->reservable_ = size; + return slice; + } + + // Custom delete operator to keep C++14 from using the global operator delete(void*, size_t), + // which would result in the compiler error: + // "exception cleanup for this placement new selects non-placement operator delete" + static void operator delete(void* address) { ::operator delete(address); } + +private: + static void* operator new(size_t object_size, size_t data_size) { + return ::operator new(object_size + data_size); + } + + OwnedSlice(uint64_t size) : Slice(0, 0, size) { base_ = storage_; } + + /** + * Compute a slice size big enough to hold a specified amount of data. + * @param data_size the minimum amount of data the slice must be able to store, in bytes. + * @return a recommended slice size, in bytes. + */ + static uint64_t sliceSize(uint64_t data_size) { + static constexpr uint64_t PageSize = 4096; + const uint64_t num_pages = (sizeof(OwnedSlice) + data_size + PageSize - 1) / PageSize; + return num_pages * PageSize - sizeof(OwnedSlice); + } + + uint8_t storage_[]; +}; + +/** + * Queue of SlicePtr that supports efficient read and write access to both + * the front and the back of the queue. + * @note This class has similar properties to std::deque. The reason for using + * a custom deque implementation is that benchmark testing during development + * revealed that std::deque was too slow to reach performance parity with the + * prior evbuffer-based buffer implementation. + */ +class SliceDeque { +public: + SliceDeque() : ring_(inline_ring_), capacity_(InlineRingCapacity) {} + + SliceDeque(SliceDeque&& rhs) noexcept { + // This custom move constructor is needed so that ring_ will be updated properly. + std::move(rhs.inline_ring_, rhs.inline_ring_ + InlineRingCapacity, inline_ring_); + external_ring_ = std::move(rhs.external_ring_); + ring_ = (external_ring_ != nullptr) ? external_ring_.get() : inline_ring_; + start_ = rhs.start_; + size_ = rhs.size_; + capacity_ = rhs.capacity_; + } + + SliceDeque& operator=(SliceDeque&& rhs) noexcept { + // This custom assignment move operator is needed so that ring_ will be updated properly. + std::move(rhs.inline_ring_, rhs.inline_ring_ + InlineRingCapacity, inline_ring_); + external_ring_ = std::move(rhs.external_ring_); + ring_ = (external_ring_ != nullptr) ? external_ring_.get() : inline_ring_; + start_ = rhs.start_; + size_ = rhs.size_; + capacity_ = rhs.capacity_; + return *this; + } + + void emplace_back(SlicePtr&& slice) { + growRing(); + size_t index = internalIndex(size_); + ring_[index] = std::move(slice); + size_++; + } + + void emplace_front(SlicePtr&& slice) { + growRing(); + start_ = (start_ == 0) ? capacity_ - 1 : start_ - 1; + ring_[start_] = std::move(slice); + size_++; + } + + bool empty() const { return size() == 0; } + size_t size() const { return size_; } + + SlicePtr& front() { return ring_[start_]; } + const SlicePtr& front() const { return ring_[start_]; } + SlicePtr& back() { return ring_[internalIndex(size_ - 1)]; } + const SlicePtr& back() const { return ring_[internalIndex(size_ - 1)]; } + + SlicePtr& operator[](size_t i) { return ring_[internalIndex(i)]; } + const SlicePtr& operator[](size_t i) const { return ring_[internalIndex(i)]; } + + void pop_front() { + if (size() == 0) { + return; + } + front() = SlicePtr(); + size_--; + start_++; + if (start_ == capacity_) { + start_ = 0; + } + } + + void pop_back() { + if (size() == 0) { + return; + } + back() = SlicePtr(); + size_--; + } + + /** + * Forward const iterator for SliceDeque. + * @note this implementation currently supports the minimum functionality needed to support + * the `for (const auto& slice : slice_deque)` idiom. + */ + class ConstIterator { + public: + const SlicePtr& operator*() { return deque_[index_]; } + + ConstIterator operator++() { + index_++; + return *this; + } + + bool operator!=(const ConstIterator& rhs) const { + return &deque_ != &rhs.deque_ || index_ != rhs.index_; + } + + friend class SliceDeque; + + private: + ConstIterator(const SliceDeque& deque, size_t index) : deque_(deque), index_(index) {} + const SliceDeque& deque_; + size_t index_; + }; + + ConstIterator begin() const noexcept { return ConstIterator(*this, 0); } + + ConstIterator end() const noexcept { return ConstIterator(*this, size_); } + +private: + constexpr static size_t InlineRingCapacity = 8; + + size_t internalIndex(size_t index) const { + size_t internal_index = start_ + index; + if (internal_index >= capacity_) { + internal_index -= capacity_; + ASSERT(internal_index < capacity_); + } + return internal_index; + } + + void growRing() { + if (size_ < capacity_) { + return; + } + const size_t new_capacity = capacity_ * 2; + auto new_ring = std::make_unique(new_capacity); + for (size_t i = 0; i < new_capacity; i++) { + ASSERT(new_ring[i] == nullptr); + } + size_t src = start_; + size_t dst = 0; + for (size_t i = 0; i < size_; i++) { + new_ring[dst++] = std::move(ring_[src++]); + if (src == capacity_) { + src = 0; + } + } + for (size_t i = 0; i < capacity_; i++) { + ASSERT(ring_[i].get() == nullptr); + } + external_ring_.swap(new_ring); + ring_ = external_ring_.get(); + start_ = 0; + capacity_ = new_capacity; + } + + SlicePtr inline_ring_[InlineRingCapacity]; + std::unique_ptr external_ring_; + SlicePtr* ring_; // points to start of either inline or external ring. + size_t start_{0}; + size_t size_{0}; + size_t capacity_; +}; + +class UnownedSlice : public Slice { +public: + UnownedSlice(BufferFragment& fragment) + : Slice(0, fragment.size(), fragment.size()), fragment_(fragment) { + base_ = static_cast(const_cast(fragment.data())); + } + + ~UnownedSlice() override { fragment_.done(); } + +private: + BufferFragment& fragment_; +}; + /** * An implementation of BufferFragment where a releasor callback is called when the data is * no longer needed. @@ -55,10 +467,34 @@ class LibEventInstance : public Instance { }; /** - * Wraps an allocated and owned evbuffer. + * Wrapper for uint64_t that asserts upon integer overflow and underflow. + */ +class OverflowDetectingUInt64 { +public: + operator uint64_t() const { return value_; } + + OverflowDetectingUInt64& operator+=(uint64_t size) { + uint64_t new_value = value_ + size; + RELEASE_ASSERT(new_value >= value_, "64-bit unsigned integer overflowed"); + value_ = new_value; + return *this; + } + + OverflowDetectingUInt64& operator-=(uint64_t size) { + RELEASE_ASSERT(value_ >= size, "unsigned integer underflowed"); + value_ -= size; + return *this; + } + +private: + uint64_t value_{0}; +}; + +/** + * Wraps an allocated and owned buffer. * - * Note that due to the internals of move() accessing buffer(), OwnedImpl is not - * compatible with non-LibEventInstance buffers. + * Note that due to the internals of move(), OwnedImpl is not + * compatible with non-OwnedImpl buffers. */ class OwnedImpl : public LibEventInstance { public: @@ -67,7 +503,7 @@ class OwnedImpl : public LibEventInstance { OwnedImpl(const Instance& data); OwnedImpl(const void* data, uint64_t size); - // LibEventInstance + // Buffer::Instance void add(const void* data, uint64_t size) override; void addBufferFragment(BufferFragment& fragment) override; void add(absl::string_view data) override; @@ -82,16 +518,67 @@ class OwnedImpl : public LibEventInstance { void* linearize(uint32_t size) override; void move(Instance& rhs) override; void move(Instance& rhs, uint64_t length) override; - Api::SysCallIntResult read(int fd, uint64_t max_length) override; + Api::IoCallUint64Result read(Network::IoHandle& io_handle, uint64_t max_length) override; uint64_t reserve(uint64_t length, RawSlice* iovecs, uint64_t num_iovecs) override; ssize_t search(const void* data, uint64_t size, size_t start) const override; - Api::SysCallIntResult write(int fd) override; - void postProcess() override {} + Api::IoCallUint64Result write(Network::IoHandle& io_handle) override; std::string toString() const override; + // LibEventInstance Event::Libevent::BufferPtr& buffer() override { return buffer_; } + virtual void postProcess() override; + + /** + * Create a new slice at the end of the buffer, and copy the supplied content into it. + * @param data start of the content to copy. + * + */ + void appendSliceForTest(const void* data, uint64_t size); + + /** + * Create a new slice at the end of the buffer, and copy the supplied string into it. + * @param data the string to append to the buffer. + */ + void appendSliceForTest(absl::string_view data); + + // Support for choosing the buffer implementation at runtime. + // TODO(brian-pane) remove this once the new implementation has been + // running in production for a while. + + /** @return whether this buffer uses the old evbuffer-based implementation. */ + bool usesOldImpl() const { return old_impl_; } + + /** + * @param use_old_impl whether to use the evbuffer-based implementation for new buffers + * @warning Except for testing code, this method should be called at most once per process, + * before any OwnedImpl objects are created. The reason is that it is unsafe to + * mix and match buffers with different implementations. The move() method, + * in particular, only works if the source and destination objects are using + * the same destination. + */ + static void useOldImpl(bool use_old_impl); private: + /** + * @param rhs another buffer + * @return whether the rhs buffer is also an instance of OwnedImpl (or a subclass) that + * uses the same internal implementation as this buffer. + */ + bool isSameBufferImpl(const Instance& rhs) const; + + /** Whether to use the old evbuffer implementation when constructing new OwnedImpl objects. */ + static bool use_old_impl_; + + /** Whether this buffer uses the old evbuffer implementation. */ + bool old_impl_; + + /** Ring buffer of slices. */ + SliceDeque slices_; + + /** Sum of the dataSize of all slices. */ + OverflowDetectingUInt64 length_; + + /** Used when old_impl_==true */ Event::Libevent::BufferPtr buffer_; }; diff --git a/source/common/buffer/watermark_buffer.cc b/source/common/buffer/watermark_buffer.cc index 2e7cb5d9bd2e0..f8ef04bfefce8 100644 --- a/source/common/buffer/watermark_buffer.cc +++ b/source/common/buffer/watermark_buffer.cc @@ -50,8 +50,8 @@ void WatermarkBuffer::move(Instance& rhs, uint64_t length) { checkHighWatermark(); } -Api::SysCallIntResult WatermarkBuffer::read(int fd, uint64_t max_length) { - Api::SysCallIntResult result = OwnedImpl::read(fd, max_length); +Api::IoCallUint64Result WatermarkBuffer::read(Network::IoHandle& io_handle, uint64_t max_length) { + Api::IoCallUint64Result result = OwnedImpl::read(io_handle, max_length); checkHighWatermark(); return result; } @@ -62,8 +62,8 @@ uint64_t WatermarkBuffer::reserve(uint64_t length, RawSlice* iovecs, uint64_t nu return bytes_reserved; } -Api::SysCallIntResult WatermarkBuffer::write(int fd) { - Api::SysCallIntResult result = OwnedImpl::write(fd); +Api::IoCallUint64Result WatermarkBuffer::write(Network::IoHandle& io_handle) { + Api::IoCallUint64Result result = OwnedImpl::write(io_handle); checkLowWatermark(); return result; } diff --git a/source/common/buffer/watermark_buffer.h b/source/common/buffer/watermark_buffer.h index 31459570ddb81..9dd12d3d8aff9 100644 --- a/source/common/buffer/watermark_buffer.h +++ b/source/common/buffer/watermark_buffer.h @@ -30,9 +30,9 @@ class WatermarkBuffer : public OwnedImpl { void drain(uint64_t size) override; void move(Instance& rhs) override; void move(Instance& rhs, uint64_t length) override; - Api::SysCallIntResult read(int fd, uint64_t max_length) override; + Api::IoCallUint64Result read(Network::IoHandle& io_handle, uint64_t max_length) override; uint64_t reserve(uint64_t length, RawSlice* iovecs, uint64_t num_iovecs) override; - Api::SysCallIntResult write(int fd) override; + Api::IoCallUint64Result write(Network::IoHandle& io_handle) override; void postProcess() override { checkLowWatermark(); } void setWatermarks(uint32_t watermark) { setWatermarks(watermark / 2, watermark); } diff --git a/source/common/chromium_url/BUILD b/source/common/chromium_url/BUILD new file mode 100644 index 0000000000000..9b07e76b00130 --- /dev/null +++ b/source/common/chromium_url/BUILD @@ -0,0 +1,28 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "chromium_url", + srcs = [ + "url_canon.cc", + "url_canon_internal.cc", + "url_canon_path.cc", + "url_canon_stdstring.cc", + ], + hdrs = [ + "envoy_shim.h", + "url_canon.h", + "url_canon_internal.h", + "url_canon_stdstring.h", + "url_parse.h", + "url_parse_internal.h", + ], + deps = ["//source/common/common:assert_lib"], +) diff --git a/source/common/chromium_url/LICENSE b/source/common/chromium_url/LICENSE new file mode 100644 index 0000000000000..a32e00ce6be36 --- /dev/null +++ b/source/common/chromium_url/LICENSE @@ -0,0 +1,27 @@ +// Copyright 2015 The Chromium Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/source/common/chromium_url/README.md b/source/common/chromium_url/README.md new file mode 100644 index 0000000000000..32e251c82d4d2 --- /dev/null +++ b/source/common/chromium_url/README.md @@ -0,0 +1,16 @@ +This is a manually minified variant of +https://chromium.googlesource.com/chromium/src.git/+archive/74.0.3729.15/url.tar.gz, +providing just the parts needed for `url::CanonicalizePath()`. This is intended +to support a security release fix for CVE-2019-9901. Long term we need this to +be moved to absl or QUICHE for upgrades and long-term support. + +Some specific transforms of interest: +* The namespace `url` was changed to `chromium_url`. +* `url_parse.h` is minified to just `Component` and flattened back into the URL + directory. It does not contain any non-Chromium authored code any longer and + so does not have a separate LICENSE. +* `envoy_shim.h` adapts various macros to the Envoy context. +* Anything not reachable from `url::CanonicalizePath()` has been dropped. +* Header include paths have changed as needed. +* BUILD was manually written. +* Various clang-tidy and format fixes. diff --git a/source/common/chromium_url/envoy_shim.h b/source/common/chromium_url/envoy_shim.h new file mode 100644 index 0000000000000..2b7443926c1f5 --- /dev/null +++ b/source/common/chromium_url/envoy_shim.h @@ -0,0 +1,17 @@ +#pragma once + +#include "common/common/assert.h" + +// This is a minimal Envoy adaptation layer for the Chromium URL library. +// NOLINT(namespace-envoy) + +#define DISALLOW_COPY_AND_ASSIGN(TypeName) \ + TypeName(const TypeName&) = delete; \ + TypeName& operator=(const TypeName&) = delete + +#define EXPORT_TEMPLATE_DECLARE(x) +#define EXPORT_TEMPLATE_DEFINE(x) +#define COMPONENT_EXPORT(x) + +#define DCHECK(x) ASSERT(x) +#define NOTREACHED() NOT_REACHED_GCOVR_EXCL_LINE diff --git a/source/common/chromium_url/url_canon.cc b/source/common/chromium_url/url_canon.cc new file mode 100644 index 0000000000000..b9ad1b829726c --- /dev/null +++ b/source/common/chromium_url/url_canon.cc @@ -0,0 +1,16 @@ +// Envoy snapshot of Chromium URL path normalization, see README.md. +// NOLINT(namespace-envoy) + +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "common/chromium_url/url_canon.h" + +#include "common/chromium_url/envoy_shim.h" + +namespace chromium_url { + +template class EXPORT_TEMPLATE_DEFINE(COMPONENT_EXPORT(URL)) CanonOutputT; + +} // namespace chromium_url diff --git a/source/common/chromium_url/url_canon.h b/source/common/chromium_url/url_canon.h new file mode 100644 index 0000000000000..89a11bb0418b7 --- /dev/null +++ b/source/common/chromium_url/url_canon.h @@ -0,0 +1,186 @@ +// Envoy snapshot of Chromium URL path normalization, see README.md. +// NOLINT(namespace-envoy) + +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef URL_URL_CANON_H_ +#define URL_URL_CANON_H_ + +#include +#include + +#include "common/chromium_url/envoy_shim.h" +#include "common/chromium_url/url_parse.h" + +namespace chromium_url { + +// Canonicalizer output ------------------------------------------------------- + +// Base class for the canonicalizer output, this maintains a buffer and +// supports simple resizing and append operations on it. +// +// It is VERY IMPORTANT that no virtual function calls be made on the common +// code path. We only have two virtual function calls, the destructor and a +// resize function that is called when the existing buffer is not big enough. +// The derived class is then in charge of setting up our buffer which we will +// manage. +template class CanonOutputT { +public: + CanonOutputT() : buffer_(NULL), buffer_len_(0), cur_len_(0) {} + virtual ~CanonOutputT() {} + + // Implemented to resize the buffer. This function should update the buffer + // pointer to point to the new buffer, and any old data up to |cur_len_| in + // the buffer must be copied over. + // + // The new size |sz| must be larger than buffer_len_. + virtual void Resize(int sz) = 0; + + // Accessor for returning a character at a given position. The input offset + // must be in the valid range. + inline T at(int offset) const { return buffer_[offset]; } + + // Sets the character at the given position. The given position MUST be less + // than the length(). + inline void set(int offset, T ch) { buffer_[offset] = ch; } + + // Returns the number of characters currently in the buffer. + inline int length() const { return cur_len_; } + + // Returns the current capacity of the buffer. The length() is the number of + // characters that have been declared to be written, but the capacity() is + // the number that can be written without reallocation. If the caller must + // write many characters at once, it can make sure there is enough capacity, + // write the data, then use set_size() to declare the new length(). + int capacity() const { return buffer_len_; } + + // Called by the user of this class to get the output. The output will NOT + // be NULL-terminated. Call length() to get the + // length. + const T* data() const { return buffer_; } + T* data() { return buffer_; } + + // Shortens the URL to the new length. Used for "backing up" when processing + // relative paths. This can also be used if an external function writes a lot + // of data to the buffer (when using the "Raw" version below) beyond the end, + // to declare the new length. + // + // This MUST NOT be used to expand the size of the buffer beyond capacity(). + void set_length(int new_len) { cur_len_ = new_len; } + + // This is the most performance critical function, since it is called for + // every character. + void push_back(T ch) { + // In VC2005, putting this common case first speeds up execution + // dramatically because this branch is predicted as taken. + if (cur_len_ < buffer_len_) { + buffer_[cur_len_] = ch; + cur_len_++; + return; + } + + // Grow the buffer to hold at least one more item. Hopefully we won't have + // to do this very often. + if (!Grow(1)) + return; + + // Actually do the insertion. + buffer_[cur_len_] = ch; + cur_len_++; + } + + // Appends the given string to the output. + void Append(const T* str, int str_len) { + if (cur_len_ + str_len > buffer_len_) { + if (!Grow(cur_len_ + str_len - buffer_len_)) + return; + } + for (int i = 0; i < str_len; i++) + buffer_[cur_len_ + i] = str[i]; + cur_len_ += str_len; + } + + void ReserveSizeIfNeeded(int estimated_size) { + // Reserve a bit extra to account for escaped chars. + if (estimated_size > buffer_len_) + Resize(estimated_size + 8); + } + +protected: + // Grows the given buffer so that it can fit at least |min_additional| + // characters. Returns true if the buffer could be resized, false on OOM. + bool Grow(int min_additional) { + static const int kMinBufferLen = 16; + int new_len = (buffer_len_ == 0) ? kMinBufferLen : buffer_len_; + do { + if (new_len >= (1 << 30)) // Prevent overflow below. + return false; + new_len *= 2; + } while (new_len < buffer_len_ + min_additional); + Resize(new_len); + return true; + } + + T* buffer_; + int buffer_len_; + + // Used characters in the buffer. + int cur_len_; +}; + +// Simple implementation of the CanonOutput using new[]. This class +// also supports a static buffer so if it is allocated on the stack, most +// URLs can be canonicalized with no heap allocations. +template class RawCanonOutputT : public CanonOutputT { +public: + RawCanonOutputT() : CanonOutputT() { + this->buffer_ = fixed_buffer_; + this->buffer_len_ = fixed_capacity; + } + ~RawCanonOutputT() override { + if (this->buffer_ != fixed_buffer_) + delete[] this->buffer_; + } + + void Resize(int sz) override { + T* new_buf = new T[sz]; + memcpy(new_buf, this->buffer_, sizeof(T) * (this->cur_len_ < sz ? this->cur_len_ : sz)); + if (this->buffer_ != fixed_buffer_) + delete[] this->buffer_; + this->buffer_ = new_buf; + this->buffer_len_ = sz; + } + +protected: + T fixed_buffer_[fixed_capacity]; +}; + +// Explicitly instantiate commonly used instantiations. +extern template class EXPORT_TEMPLATE_DECLARE(COMPONENT_EXPORT(URL)) CanonOutputT; + +// Normally, all canonicalization output is in narrow characters. We support +// the templates so it can also be used internally if a wide buffer is +// required. +typedef CanonOutputT CanonOutput; + +template +class RawCanonOutput : public RawCanonOutputT {}; + +// Path. If the input does not begin in a slash (including if the input is +// empty), we'll prepend a slash to the path to make it canonical. +// +// The 8-bit version assumes UTF-8 encoding, but does not verify the validity +// of the UTF-8 (i.e., you can have invalid UTF-8 sequences, invalid +// characters, etc.). Normally, URLs will come in as UTF-16, so this isn't +// an issue. Somebody giving us an 8-bit path is responsible for generating +// the path that the server expects (we'll escape high-bit characters), so +// if something is invalid, it's their problem. +COMPONENT_EXPORT(URL) +bool CanonicalizePath(const char* spec, const Component& path, CanonOutput* output, + Component* out_path); + +} // namespace chromium_url + +#endif // URL_URL_CANON_H_ diff --git a/source/common/chromium_url/url_canon_internal.cc b/source/common/chromium_url/url_canon_internal.cc new file mode 100644 index 0000000000000..38c932cad5b47 --- /dev/null +++ b/source/common/chromium_url/url_canon_internal.cc @@ -0,0 +1,295 @@ +// Envoy snapshot of Chromium URL path normalization, see README.md. +// NOLINT(namespace-envoy) + +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "common/chromium_url/url_canon_internal.h" + +namespace chromium_url { + +// See the header file for this array's declaration. +const unsigned char kSharedCharTypeTable[0x100] = { + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, // 0x00 - 0x0f + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, // 0x10 - 0x1f + 0, // 0x20 ' ' (escape spaces in queries) + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x21 ! + 0, // 0x22 " + 0, // 0x23 # (invalid in query since it marks the ref) + CHAR_QUERY | CHAR_USERINFO, // 0x24 $ + CHAR_QUERY | CHAR_USERINFO, // 0x25 % + CHAR_QUERY | CHAR_USERINFO, // 0x26 & + 0, // 0x27 ' (Try to prevent XSS.) + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x28 ( + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x29 ) + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x2a * + CHAR_QUERY | CHAR_USERINFO, // 0x2b + + CHAR_QUERY | CHAR_USERINFO, // 0x2c , + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x2d - + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_COMPONENT, // 0x2e . + CHAR_QUERY, // 0x2f / + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | + CHAR_COMPONENT, // 0x30 0 + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | + CHAR_COMPONENT, // 0x31 1 + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | + CHAR_COMPONENT, // 0x32 2 + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | + CHAR_COMPONENT, // 0x33 3 + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | + CHAR_COMPONENT, // 0x34 4 + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | + CHAR_COMPONENT, // 0x35 5 + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | + CHAR_COMPONENT, // 0x36 6 + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | + CHAR_COMPONENT, // 0x37 7 + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_COMPONENT, // 0x38 8 + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_COMPONENT, // 0x39 9 + CHAR_QUERY, // 0x3a : + CHAR_QUERY, // 0x3b ; + 0, // 0x3c < (Try to prevent certain types of XSS.) + CHAR_QUERY, // 0x3d = + 0, // 0x3e > (Try to prevent certain types of XSS.) + CHAR_QUERY, // 0x3f ? + CHAR_QUERY, // 0x40 @ + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x41 A + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x42 B + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x43 C + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x44 D + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x45 E + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x46 F + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x47 G + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x48 H + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x49 I + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4a J + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4b K + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4c L + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4d M + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4e N + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4f O + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x50 P + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x51 Q + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x52 R + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x53 S + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x54 T + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x55 U + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x56 V + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x57 W + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_COMPONENT, // 0x58 X + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x59 Y + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x5a Z + CHAR_QUERY, // 0x5b [ + CHAR_QUERY, // 0x5c '\' + CHAR_QUERY, // 0x5d ] + CHAR_QUERY, // 0x5e ^ + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x5f _ + CHAR_QUERY, // 0x60 ` + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x61 a + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x62 b + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x63 c + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x64 d + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x65 e + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x66 f + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x67 g + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x68 h + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x69 i + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6a j + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6b k + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6c l + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6d m + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6e n + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6f o + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x70 p + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x71 q + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x72 r + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x73 s + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x74 t + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x75 u + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x76 v + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x77 w + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_COMPONENT, // 0x78 x + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x79 y + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x7a z + CHAR_QUERY, // 0x7b { + CHAR_QUERY, // 0x7c | + CHAR_QUERY, // 0x7d } + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x7e ~ + 0, // 0x7f + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, // 0x80 - 0x8f + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, // 0x90 - 0x9f + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, // 0xa0 - 0xaf + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, // 0xb0 - 0xbf + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, // 0xc0 - 0xcf + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, // 0xd0 - 0xdf + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, // 0xe0 - 0xef + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, // 0xf0 - 0xff +}; + +const char kHexCharLookup[0x10] = { + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', +}; + +const char kCharToHexLookup[8] = { + 0, // 0x00 - 0x1f + '0', // 0x20 - 0x3f: digits 0 - 9 are 0x30 - 0x39 + 'A' - 10, // 0x40 - 0x5f: letters A - F are 0x41 - 0x46 + 'a' - 10, // 0x60 - 0x7f: letters a - f are 0x61 - 0x66 + 0, // 0x80 - 0x9F + 0, // 0xA0 - 0xBF + 0, // 0xC0 - 0xDF + 0, // 0xE0 - 0xFF +}; + +} // namespace chromium_url diff --git a/source/common/chromium_url/url_canon_internal.h b/source/common/chromium_url/url_canon_internal.h new file mode 100644 index 0000000000000..bffff5c12f4a0 --- /dev/null +++ b/source/common/chromium_url/url_canon_internal.h @@ -0,0 +1,246 @@ +// Envoy snapshot of Chromium URL path normalization, see README.md. +// NOLINT(namespace-envoy) + +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef URL_URL_CANON_INTERNAL_H_ +#define URL_URL_CANON_INTERNAL_H_ + +// This file is intended to be included in another C++ file where the character +// types are defined. This allows us to write mostly generic code, but not have +// template bloat because everything is inlined when anybody calls any of our +// functions. + +#include +#include + +#include "common/chromium_url/envoy_shim.h" +#include "common/chromium_url/url_canon.h" + +namespace chromium_url { + +// Character type handling ----------------------------------------------------- + +// Bits that identify different character types. These types identify different +// bits that are set for each 8-bit character in the kSharedCharTypeTable. +enum SharedCharTypes { + // Characters that do not require escaping in queries. Characters that do + // not have this flag will be escaped; see url_canon_query.cc + CHAR_QUERY = 1, + + // Valid in the username/password field. + CHAR_USERINFO = 2, + + // Valid in a IPv4 address (digits plus dot and 'x' for hex). + CHAR_IPV4 = 4, + + // Valid in an ASCII-representation of a hex digit (as in %-escaped). + CHAR_HEX = 8, + + // Valid in an ASCII-representation of a decimal digit. + CHAR_DEC = 16, + + // Valid in an ASCII-representation of an octal digit. + CHAR_OCT = 32, + + // Characters that do not require escaping in encodeURIComponent. Characters + // that do not have this flag will be escaped; see url_util.cc. + CHAR_COMPONENT = 64, +}; + +// This table contains the flags in SharedCharTypes for each 8-bit character. +// Some canonicalization functions have their own specialized lookup table. +// For those with simple requirements, we have collected the flags in one +// place so there are fewer lookup tables to load into the CPU cache. +// +// Using an unsigned char type has a small but measurable performance benefit +// over using a 32-bit number. +extern const unsigned char kSharedCharTypeTable[0x100]; + +// More readable wrappers around the character type lookup table. +inline bool IsCharOfType(unsigned char c, SharedCharTypes type) { + return !!(kSharedCharTypeTable[c] & type); +} +inline bool IsQueryChar(unsigned char c) { return IsCharOfType(c, CHAR_QUERY); } +inline bool IsIPv4Char(unsigned char c) { return IsCharOfType(c, CHAR_IPV4); } +inline bool IsHexChar(unsigned char c) { return IsCharOfType(c, CHAR_HEX); } +inline bool IsComponentChar(unsigned char c) { return IsCharOfType(c, CHAR_COMPONENT); } + +// Maps the hex numerical values 0x0 to 0xf to the corresponding ASCII digit +// that will be used to represent it. +COMPONENT_EXPORT(URL) extern const char kHexCharLookup[0x10]; + +// This lookup table allows fast conversion between ASCII hex letters and their +// corresponding numerical value. The 8-bit range is divided up into 8 +// regions of 0x20 characters each. Each of the three character types (numbers, +// uppercase, lowercase) falls into different regions of this range. The table +// contains the amount to subtract from characters in that range to get at +// the corresponding numerical value. +// +// See HexDigitToValue for the lookup. +extern const char kCharToHexLookup[8]; + +// Assumes the input is a valid hex digit! Call IsHexChar before using this. +inline unsigned char HexCharToValue(unsigned char c) { return c - kCharToHexLookup[c / 0x20]; } + +// Indicates if the given character is a dot or dot equivalent, returning the +// number of characters taken by it. This will be one for a literal dot, 3 for +// an escaped dot. If the character is not a dot, this will return 0. +template inline int IsDot(const CHAR* spec, int offset, int end) { + if (spec[offset] == '.') { + return 1; + } else if (spec[offset] == '%' && offset + 3 <= end && spec[offset + 1] == '2' && + (spec[offset + 2] == 'e' || spec[offset + 2] == 'E')) { + // Found "%2e" + return 3; + } + return 0; +} + +// Write a single character, escaped, to the output. This always escapes: it +// does no checking that thee character requires escaping. +// Escaping makes sense only 8 bit chars, so code works in all cases of +// input parameters (8/16bit). +template +inline void AppendEscapedChar(UINCHAR ch, CanonOutputT* output) { + output->push_back('%'); + output->push_back(kHexCharLookup[(ch >> 4) & 0xf]); + output->push_back(kHexCharLookup[ch & 0xf]); +} + +// UTF-8 functions ------------------------------------------------------------ + +// Reads one character in UTF-8 starting at |*begin| in |str| and places +// the decoded value into |*code_point|. If the character is valid, we will +// return true. If invalid, we'll return false and put the +// kUnicodeReplacementCharacter into |*code_point|. +// +// |*begin| will be updated to point to the last character consumed so it +// can be incremented in a loop and will be ready for the next character. +// (for a single-byte ASCII character, it will not be changed). +COMPONENT_EXPORT(URL) +bool ReadUTFChar(const char* str, int* begin, int length, unsigned* code_point_out); + +// Generic To-UTF-8 converter. This will call the given append method for each +// character that should be appended, with the given output method. Wrappers +// are provided below for escaped and non-escaped versions of this. +// +// The char_value must have already been checked that it's a valid Unicode +// character. +template +inline void DoAppendUTF8(unsigned char_value, Output* output) { + if (char_value <= 0x7f) { + Appender(static_cast(char_value), output); + } else if (char_value <= 0x7ff) { + // 110xxxxx 10xxxxxx + Appender(static_cast(0xC0 | (char_value >> 6)), output); + Appender(static_cast(0x80 | (char_value & 0x3f)), output); + } else if (char_value <= 0xffff) { + // 1110xxxx 10xxxxxx 10xxxxxx + Appender(static_cast(0xe0 | (char_value >> 12)), output); + Appender(static_cast(0x80 | ((char_value >> 6) & 0x3f)), output); + Appender(static_cast(0x80 | (char_value & 0x3f)), output); + } else if (char_value <= 0x10FFFF) { // Max Unicode code point. + // 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + Appender(static_cast(0xf0 | (char_value >> 18)), output); + Appender(static_cast(0x80 | ((char_value >> 12) & 0x3f)), output); + Appender(static_cast(0x80 | ((char_value >> 6) & 0x3f)), output); + Appender(static_cast(0x80 | (char_value & 0x3f)), output); + } else { + // Invalid UTF-8 character (>20 bits). + NOTREACHED(); + } +} + +// Helper used by AppendUTF8Value below. We use an unsigned parameter so there +// are no funny sign problems with the input, but then have to convert it to +// a regular char for appending. +inline void AppendCharToOutput(unsigned char ch, CanonOutput* output) { + output->push_back(static_cast(ch)); +} + +// Writes the given character to the output as UTF-8. This does NO checking +// of the validity of the Unicode characters; the caller should ensure that +// the value it is appending is valid to append. +inline void AppendUTF8Value(unsigned char_value, CanonOutput* output) { + DoAppendUTF8(char_value, output); +} + +// Writes the given character to the output as UTF-8, escaping ALL +// characters (even when they are ASCII). This does NO checking of the +// validity of the Unicode characters; the caller should ensure that the value +// it is appending is valid to append. +inline void AppendUTF8EscapedValue(unsigned char_value, CanonOutput* output) { + DoAppendUTF8(char_value, output); +} + +// Escaping functions --------------------------------------------------------- + +// Writes the given character to the output as UTF-8, escaped. Call this +// function only when the input is wide. Returns true on success. Failure +// means there was some problem with the encoding, we'll still try to +// update the |*begin| pointer and add a placeholder character to the +// output so processing can continue. +// +// We will append the character starting at ch[begin] with the buffer ch +// being |length|. |*begin| will be updated to point to the last character +// consumed (we may consume more than one for UTF-16) so that if called in +// a loop, incrementing the pointer will move to the next character. +// +// Every single output character will be escaped. This means that if you +// give it an ASCII character as input, it will be escaped. Some code uses +// this when it knows that a character is invalid according to its rules +// for validity. If you don't want escaping for ASCII characters, you will +// have to filter them out prior to calling this function. +// +// Assumes that ch[begin] is within range in the array, but does not assume +// that any following characters are. +inline bool AppendUTF8EscapedChar(const char* str, int* begin, int length, CanonOutput* output) { + // ReadUTF8Char will handle invalid characters for us and give us the + // kUnicodeReplacementCharacter, so we don't have to do special checking + // after failure, just pass through the failure to the caller. + unsigned ch; + bool success = ReadUTFChar(str, begin, length, &ch); + AppendUTF8EscapedValue(ch, output); + return success; +} + +// Given a '%' character at |*begin| in the string |spec|, this will decode +// the escaped value and put it into |*unescaped_value| on success (returns +// true). On failure, this will return false, and will not write into +// |*unescaped_value|. +// +// |*begin| will be updated to point to the last character of the escape +// sequence so that when called with the index of a for loop, the next time +// through it will point to the next character to be considered. On failure, +// |*begin| will be unchanged. +inline bool Is8BitChar(char /*c*/) { + return true; // this case is specialized to avoid a warning +} + +template +inline bool DecodeEscaped(const CHAR* spec, int* begin, int end, unsigned char* unescaped_value) { + if (*begin + 3 > end || !Is8BitChar(spec[*begin + 1]) || !Is8BitChar(spec[*begin + 2])) { + // Invalid escape sequence because there's not enough room, or the + // digits are not ASCII. + return false; + } + + unsigned char first = static_cast(spec[*begin + 1]); + unsigned char second = static_cast(spec[*begin + 2]); + if (!IsHexChar(first) || !IsHexChar(second)) { + // Invalid hex digits, fail. + return false; + } + + // Valid escape sequence. + *unescaped_value = (HexCharToValue(first) << 4) + HexCharToValue(second); + *begin += 2; + return true; +} + +} // namespace chromium_url + +#endif // URL_URL_CANON_INTERNAL_H_ diff --git a/source/common/chromium_url/url_canon_path.cc b/source/common/chromium_url/url_canon_path.cc new file mode 100644 index 0000000000000..f8c803a9c5f5a --- /dev/null +++ b/source/common/chromium_url/url_canon_path.cc @@ -0,0 +1,417 @@ +// Envoy snapshot of Chromium URL path normalization, see README.md. +// NOLINT(namespace-envoy) + +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include + +#include "common/chromium_url/url_canon.h" +#include "common/chromium_url/url_canon_internal.h" +#include "common/chromium_url/url_parse_internal.h" + +namespace chromium_url { + +namespace { + +enum CharacterFlags { + // Pass through unchanged, whether escaped or unescaped. This doesn't + // actually set anything so you can't OR it to check, it's just to make the + // table below more clear when neither ESCAPE or UNESCAPE is set. + PASS = 0, + + // This character requires special handling in DoPartialPath. Doing this test + // first allows us to filter out the common cases of regular characters that + // can be directly copied. + SPECIAL = 1, + + // This character must be escaped in the canonical output. Note that all + // escaped chars also have the "special" bit set so that the code that looks + // for this is triggered. Not valid with PASS or ESCAPE + ESCAPE_BIT = 2, + ESCAPE = ESCAPE_BIT | SPECIAL, + + // This character must be unescaped in canonical output. Not valid with + // ESCAPE or PASS. We DON'T set the SPECIAL flag since if we encounter these + // characters unescaped, they should just be copied. + UNESCAPE = 4, + + // This character is disallowed in URLs. Note that the "special" bit is also + // set to trigger handling. + INVALID_BIT = 8, + INVALID = INVALID_BIT | SPECIAL, +}; + +// This table contains one of the above flag values. Note some flags are more +// than one bits because they also turn on the "special" flag. Special is the +// only flag that may be combined with others. +// +// This table is designed to match exactly what IE does with the characters. +// +// Dot is even more special, and the escaped version is handled specially by +// IsDot. Therefore, we don't need the "escape" flag, and even the "unescape" +// bit is never handled (we just need the "special") bit. +const unsigned char kPathCharLookup[0x100] = { + // NULL control chars... + INVALID, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, + // control chars... + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, + // ' ' ! " # $ % & ' ( ) * + // + , - . / + ESCAPE, PASS, ESCAPE, ESCAPE, PASS, ESCAPE, PASS, PASS, PASS, PASS, PASS, PASS, PASS, UNESCAPE, + SPECIAL, PASS, + // 0 1 2 3 4 5 6 7 8 9 : + // ; < = > ? + UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, + UNESCAPE, PASS, PASS, ESCAPE, PASS, ESCAPE, ESCAPE, + // @ A B C D E F G H I J + // K L M N O + PASS, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, + UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, + // P Q R S T U V W X Y Z + // [ \ ] ^ _ + UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, + UNESCAPE, UNESCAPE, PASS, ESCAPE, PASS, ESCAPE, UNESCAPE, + // ` a b c d e f g h i j + // k l m n o + ESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, + UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, + // p q r s t u v w x y z + // { | } ~ + UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, + UNESCAPE, UNESCAPE, ESCAPE, ESCAPE, ESCAPE, UNESCAPE, ESCAPE, + // ...all the high-bit characters are escaped + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE}; + +enum DotDisposition { + // The given dot is just part of a filename and is not special. + NOT_A_DIRECTORY, + + // The given dot is the current directory. + DIRECTORY_CUR, + + // The given dot is the first of a double dot that should take us up one. + DIRECTORY_UP +}; + +// When the path resolver finds a dot, this function is called with the +// character following that dot to see what it is. The return value +// indicates what type this dot is (see above). This code handles the case +// where the dot is at the end of the input. +// +// |*consumed_len| will contain the number of characters in the input that +// express what we found. +// +// If the input is "../foo", |after_dot| = 1, |end| = 6, and +// at the end, |*consumed_len| = 2 for the "./" this function consumed. The +// original dot length should be handled by the caller. +template +DotDisposition ClassifyAfterDot(const CHAR* spec, int after_dot, int end, int* consumed_len) { + if (after_dot == end) { + // Single dot at the end. + *consumed_len = 0; + return DIRECTORY_CUR; + } + if (IsURLSlash(spec[after_dot])) { + // Single dot followed by a slash. + *consumed_len = 1; // Consume the slash + return DIRECTORY_CUR; + } + + int second_dot_len = IsDot(spec, after_dot, end); + if (second_dot_len) { + int after_second_dot = after_dot + second_dot_len; + if (after_second_dot == end) { + // Double dot at the end. + *consumed_len = second_dot_len; + return DIRECTORY_UP; + } + if (IsURLSlash(spec[after_second_dot])) { + // Double dot followed by a slash. + *consumed_len = second_dot_len + 1; + return DIRECTORY_UP; + } + } + + // The dots are followed by something else, not a directory. + *consumed_len = 0; + return NOT_A_DIRECTORY; +} + +// Rewinds the output to the previous slash. It is assumed that the output +// ends with a slash and this doesn't count (we call this when we are +// appending directory paths, so the previous path component has and ending +// slash). +// +// This will stop at the first slash (assumed to be at position +// |path_begin_in_output| and not go any higher than that. Some web pages +// do ".." too many times, so we need to handle that brokenness. +// +// It searches for a literal slash rather than including a backslash as well +// because it is run only on the canonical output. +// +// The output is guaranteed to end in a slash when this function completes. +void BackUpToPreviousSlash(int path_begin_in_output, CanonOutput* output) { + DCHECK(output->length() > 0); + + int i = output->length() - 1; + DCHECK(output->at(i) == '/'); + if (i == path_begin_in_output) + return; // We're at the first slash, nothing to do. + + // Now back up (skipping the trailing slash) until we find another slash. + i--; + while (output->at(i) != '/' && i > path_begin_in_output) + i--; + + // Now shrink the output to just include that last slash we found. + output->set_length(i + 1); +} + +// Looks for problematic nested escape sequences and escapes the output as +// needed to ensure they can't be misinterpreted. +// +// Our concern is that in input escape sequence that's invalid because it +// contains nested escape sequences might look valid once those are unescaped. +// For example, "%%300" is not a valid escape sequence, but after unescaping the +// inner "%30" this becomes "%00" which is valid. Leaving this in the output +// string can result in callers re-canonicalizing the string and unescaping this +// sequence, thus resulting in something fundamentally different than the +// original input here. This can cause a variety of problems. +// +// This function is called after we've just unescaped a sequence that's within +// two output characters of a previous '%' that we know didn't begin a valid +// escape sequence in the input string. We look for whether the output is going +// to turn into a valid escape sequence, and if so, convert the initial '%' into +// an escaped "%25" so the output can't be misinterpreted. +// +// |spec| is the input string we're canonicalizing. +// |next_input_index| is the index of the next unprocessed character in |spec|. +// |input_len| is the length of |spec|. +// |last_invalid_percent_index| is the index in |output| of a previously-seen +// '%' character. The caller knows this '%' character isn't followed by a valid +// escape sequence in the input string. +// |output| is the canonicalized output thus far. The caller guarantees this +// ends with a '%' followed by one or two characters, and the '%' is the one +// pointed to by |last_invalid_percent_index|. The last character in the string +// was just unescaped. +template +void CheckForNestedEscapes(const CHAR* spec, int next_input_index, int input_len, + int last_invalid_percent_index, CanonOutput* output) { + const int length = output->length(); + const char last_unescaped_char = output->at(length - 1); + + // If |output| currently looks like "%c", we need to try appending the next + // input character to see if this will result in a problematic escape + // sequence. Note that this won't trigger on the first nested escape of a + // two-escape sequence like "%%30%30" -- we'll allow the conversion to + // "%0%30" -- but the second nested escape will be caught by this function + // when it's called again in that case. + const bool append_next_char = last_invalid_percent_index == length - 2; + if (append_next_char) { + // If the input doesn't contain a 7-bit character next, this case won't be a + // problem. + if ((next_input_index == input_len) || (spec[next_input_index] >= 0x80)) + return; + output->push_back(static_cast(spec[next_input_index])); + } + + // Now output ends like "%cc". Try to unescape this. + int begin = last_invalid_percent_index; + unsigned char temp; + if (DecodeEscaped(output->data(), &begin, output->length(), &temp)) { + // New escape sequence found. Overwrite the characters following the '%' + // with "25", and push_back() the one or two characters that were following + // the '%' when we were called. + if (!append_next_char) + output->push_back(output->at(last_invalid_percent_index + 1)); + output->set(last_invalid_percent_index + 1, '2'); + output->set(last_invalid_percent_index + 2, '5'); + output->push_back(last_unescaped_char); + } else if (append_next_char) { + // Not a valid escape sequence, but we still need to undo appending the next + // source character so the caller can process it normally. + output->set_length(length); + } +} + +// Appends the given path to the output. It assumes that if the input path +// starts with a slash, it should be copied to the output. If no path has +// already been appended to the output (the case when not resolving +// relative URLs), the path should begin with a slash. +// +// If there are already path components (this mode is used when appending +// relative paths for resolving), it assumes that the output already has +// a trailing slash and that if the input begins with a slash, it should be +// copied to the output. +// +// We do not collapse multiple slashes in a row to a single slash. It seems +// no web browsers do this, and we don't want incompatibilities, even though +// it would be correct for most systems. +template +bool DoPartialPath(const CHAR* spec, const Component& path, int path_begin_in_output, + CanonOutput* output) { + int end = path.end(); + + // We use this variable to minimize the amount of work done when unescaping -- + // we'll only call CheckForNestedEscapes() when this points at one of the last + // couple of characters in |output|. + int last_invalid_percent_index = INT_MIN; + + bool success = true; + for (int i = path.begin; i < end; i++) { + UCHAR uch = static_cast(spec[i]); + if (sizeof(CHAR) > 1 && uch >= 0x80) { + // We only need to test wide input for having non-ASCII characters. For + // narrow input, we'll always just use the lookup table. We don't try to + // do anything tricky with decoding/validating UTF-8. This function will + // read one or two UTF-16 characters and append the output as UTF-8. This + // call will be removed in 8-bit mode. + success &= AppendUTF8EscapedChar(spec, &i, end, output); + } else { + // Normal ASCII character or 8-bit input, use the lookup table. + unsigned char out_ch = static_cast(uch); + unsigned char flags = kPathCharLookup[out_ch]; + if (flags & SPECIAL) { + // Needs special handling of some sort. + int dotlen; + if ((dotlen = IsDot(spec, i, end)) > 0) { + // See if this dot was preceded by a slash in the output. We + // assume that when canonicalizing paths, they will always + // start with a slash and not a dot, so we don't have to + // bounds check the output. + // + // Note that we check this in the case of dots so we don't have to + // special case slashes. Since slashes are much more common than + // dots, this actually increases performance measurably (though + // slightly). + DCHECK(output->length() > path_begin_in_output); + if (output->length() > path_begin_in_output && output->at(output->length() - 1) == '/') { + // Slash followed by a dot, check to see if this is means relative + int consumed_len; + switch (ClassifyAfterDot(spec, i + dotlen, end, &consumed_len)) { + case NOT_A_DIRECTORY: + // Copy the dot to the output, it means nothing special. + output->push_back('.'); + i += dotlen - 1; + break; + case DIRECTORY_CUR: // Current directory, just skip the input. + i += dotlen + consumed_len - 1; + break; + case DIRECTORY_UP: + BackUpToPreviousSlash(path_begin_in_output, output); + i += dotlen + consumed_len - 1; + break; + } + } else { + // This dot is not preceded by a slash, it is just part of some + // file name. + output->push_back('.'); + i += dotlen - 1; + } + + } else if (out_ch == '\\') { + // Convert backslashes to forward slashes + output->push_back('/'); + + } else if (out_ch == '%') { + // Handle escape sequences. + unsigned char unescaped_value; + if (DecodeEscaped(spec, &i, end, &unescaped_value)) { + // Valid escape sequence, see if we keep, reject, or unescape it. + // Note that at this point DecodeEscape() will have advanced |i| to + // the last character of the escape sequence. + char unescaped_flags = kPathCharLookup[unescaped_value]; + + if (unescaped_flags & UNESCAPE) { + // This escaped value shouldn't be escaped. Try to copy it. + output->push_back(unescaped_value); + // If we just unescaped a value within 2 output characters of the + // '%' from a previously-detected invalid escape sequence, we + // might have an input string with problematic nested escape + // sequences; detect and fix them. + if (last_invalid_percent_index >= (output->length() - 3)) { + CheckForNestedEscapes(spec, i + 1, end, last_invalid_percent_index, output); + } + } else { + // Either this is an invalid escaped character, or it's a valid + // escaped character we should keep escaped. In the first case we + // should just copy it exactly and remember the error. In the + // second we also copy exactly in case the server is sensitive to + // changing the case of any hex letters. + output->push_back('%'); + output->push_back(static_cast(spec[i - 1])); + output->push_back(static_cast(spec[i])); + if (unescaped_flags & INVALID_BIT) + success = false; + } + } else { + // Invalid escape sequence. IE7+ rejects any URLs with such + // sequences, while other browsers pass them through unchanged. We + // use the permissive behavior. + // TODO(brettw): Consider testing IE's strict behavior, which would + // allow removing the code to handle nested escapes above. + last_invalid_percent_index = output->length(); + output->push_back('%'); + } + + } else if (flags & INVALID_BIT) { + // For NULLs, etc. fail. + AppendEscapedChar(out_ch, output); + success = false; + + } else if (flags & ESCAPE_BIT) { + // This character should be escaped. + AppendEscapedChar(out_ch, output); + } + } else { + // Nothing special about this character, just append it. + output->push_back(out_ch); + } + } + } + return success; +} + +template +bool DoPath(const CHAR* spec, const Component& path, CanonOutput* output, Component* out_path) { + bool success = true; + out_path->begin = output->length(); + if (path.len > 0) { + // Write out an initial slash if the input has none. If we just parse a URL + // and then canonicalize it, it will of course have a slash already. This + // check is for the replacement and relative URL resolving cases of file + // URLs. + if (!IsURLSlash(spec[path.begin])) + output->push_back('/'); + + success = DoPartialPath(spec, path, out_path->begin, output); + } else { + // No input, canonical path is a slash. + output->push_back('/'); + } + out_path->len = output->length() - out_path->begin; + return success; +} + +} // namespace + +bool CanonicalizePath(const char* spec, const Component& path, CanonOutput* output, + Component* out_path) { + return DoPath(spec, path, output, out_path); +} + +} // namespace chromium_url diff --git a/source/common/chromium_url/url_canon_stdstring.cc b/source/common/chromium_url/url_canon_stdstring.cc new file mode 100644 index 0000000000000..0c61831e5f1ac --- /dev/null +++ b/source/common/chromium_url/url_canon_stdstring.cc @@ -0,0 +1,33 @@ +// Envoy snapshot of Chromium URL path normalization, see README.md. +// NOLINT(namespace-envoy) + +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "common/chromium_url/url_canon_stdstring.h" + +namespace chromium_url { + +StdStringCanonOutput::StdStringCanonOutput(std::string* str) : CanonOutput(), str_(str) { + cur_len_ = static_cast(str_->size()); // Append to existing data. + buffer_ = str_->empty() ? NULL : &(*str_)[0]; + buffer_len_ = static_cast(str_->size()); +} + +StdStringCanonOutput::~StdStringCanonOutput() { + // Nothing to do, we don't own the string. +} + +void StdStringCanonOutput::Complete() { + str_->resize(cur_len_); + buffer_len_ = cur_len_; +} + +void StdStringCanonOutput::Resize(int sz) { + str_->resize(sz); + buffer_ = str_->empty() ? NULL : &(*str_)[0]; + buffer_len_ = sz; +} + +} // namespace chromium_url diff --git a/source/common/chromium_url/url_canon_stdstring.h b/source/common/chromium_url/url_canon_stdstring.h new file mode 100644 index 0000000000000..e14d6c22e74e8 --- /dev/null +++ b/source/common/chromium_url/url_canon_stdstring.h @@ -0,0 +1,58 @@ +// Envoy snapshot of Chromium URL path normalization, see README.md. +// NOLINT(namespace-envoy) + +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef URL_URL_CANON_STDSTRING_H_ +#define URL_URL_CANON_STDSTRING_H_ + +// This header file defines a canonicalizer output method class for STL +// strings. Because the canonicalizer tries not to be dependent on the STL, +// we have segregated it here. + +#include + +#include "common/chromium_url/envoy_shim.h" +#include "common/chromium_url/url_canon.h" + +#define DISALLOW_COPY_AND_ASSIGN(TypeName) \ + TypeName(const TypeName&) = delete; \ + TypeName& operator=(const TypeName&) = delete + +namespace chromium_url { + +// Write into a std::string given in the constructor. This object does not own +// the string itself, and the user must ensure that the string stays alive +// throughout the lifetime of this object. +// +// The given string will be appended to; any existing data in the string will +// be preserved. +// +// Note that when canonicalization is complete, the string will likely have +// unused space at the end because we make the string very big to start out +// with (by |initial_size|). This ends up being important because resize +// operations are slow, and because the base class needs to write directly +// into the buffer. +// +// Therefore, the user should call Complete() before using the string that +// this class wrote into. +class COMPONENT_EXPORT(URL) StdStringCanonOutput : public CanonOutput { +public: + StdStringCanonOutput(std::string* str); + ~StdStringCanonOutput() override; + + // Must be called after writing has completed but before the string is used. + void Complete(); + + void Resize(int sz) override; + +protected: + std::string* str_; + DISALLOW_COPY_AND_ASSIGN(StdStringCanonOutput); +}; + +} // namespace chromium_url + +#endif // URL_URL_CANON_STDSTRING_H_ diff --git a/source/common/chromium_url/url_parse.h b/source/common/chromium_url/url_parse.h new file mode 100644 index 0000000000000..b840af60438d1 --- /dev/null +++ b/source/common/chromium_url/url_parse.h @@ -0,0 +1,49 @@ +// Envoy snapshot of Chromium URL path normalization, see README.md. +// NOLINT(namespace-envoy) + +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef URL_PARSE_H_ +#define URL_PARSE_H_ + +namespace chromium_url { + +// Component ------------------------------------------------------------------ + +// Represents a substring for URL parsing. +struct Component { + Component() : begin(0), len(-1) {} + + // Normal constructor: takes an offset and a length. + Component(int b, int l) : begin(b), len(l) {} + + int end() const { return begin + len; } + + // Returns true if this component is valid, meaning the length is given. Even + // valid components may be empty to record the fact that they exist. + bool is_valid() const { return (len != -1); } + + // Returns true if the given component is specified on false, the component + // is either empty or invalid. + bool is_nonempty() const { return (len > 0); } + + void reset() { + begin = 0; + len = -1; + } + + bool operator==(const Component& other) const { return begin == other.begin && len == other.len; } + + int begin; // Byte offset in the string of this component. + int len; // Will be -1 if the component is unspecified. +}; + +// Helper that returns a component created with the given begin and ending +// points. The ending point is non-inclusive. +inline Component MakeRange(int begin, int end) { return Component(begin, end - begin); } + +} // namespace chromium_url + +#endif // URL_PARSE_H_ diff --git a/source/common/chromium_url/url_parse_internal.h b/source/common/chromium_url/url_parse_internal.h new file mode 100644 index 0000000000000..0ca47bc488461 --- /dev/null +++ b/source/common/chromium_url/url_parse_internal.h @@ -0,0 +1,18 @@ +// Envoy snapshot of Chromium URL path normalization, see README.md. +// NOLINT(namespace-envoy) + +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef URL_URL_PARSE_INTERNAL_H_ +#define URL_URL_PARSE_INTERNAL_H_ + +namespace chromium_url { + +// We treat slashes and backslashes the same for IE compatibility. +inline bool IsURLSlash(char ch) { return ch == '/' || ch == '\\'; } + +} // namespace chromium_url + +#endif // URL_URL_PARSE_INTERNAL_H_ diff --git a/source/common/common/BUILD b/source/common/common/BUILD index 6b1d9c0209868..a758943e1c1a4 100644 --- a/source/common/common/BUILD +++ b/source/common/common/BUILD @@ -140,7 +140,6 @@ envoy_cc_library( ":macros", ":minimal_logger_lib", "//include/envoy/access_log:access_log_interface", - "//include/envoy/filesystem:filesystem_interface", ], ) @@ -252,6 +251,23 @@ genrule( cmd = """echo "#define BUILD_VERSION_NUMBER \\"$$(cat $<)\\"" >$@""", ) +genrule( + name = "generate_version_linkstamp", + outs = ["lib/version_linkstamp.h"], + cmd = "$(location :generate_version_linkstamp.sh) >> $@", + # Undocumented attr to depend on workspace status files. + # https://github.com/bazelbuild/bazel/issues/4942 + # Used here because generate_version_linkstamp.sh depends on the workspace status files. + stamp = 1, + tools = [":generate_version_linkstamp.sh"], +) + +genrule( + name = "generate_version_linkstamp_empty", + outs = ["empty/version_linkstamp.h"], + cmd = """>$@""", +) + envoy_cc_library( name = "version_includes", hdrs = [ @@ -263,11 +279,21 @@ envoy_cc_library( envoy_cc_library( name = "version_lib", srcs = ["version.cc"], + hdrs = select({ + "//bazel:manual_stamp": [":generate_version_linkstamp"], + # By default the header file is empty. + # This is done so that the definitions linked via the linkstamp rule don't cause collisions. + "//conditions:default": [":generate_version_linkstamp_empty"], + }), copts = envoy_select_boringssl( ["-DENVOY_SSL_VERSION=\\\"BoringSSL-FIPS\\\""], ["-DENVOY_SSL_VERSION=\\\"BoringSSL\\\""], ), linkstamp = "version_linkstamp.cc", + strip_include_prefix = select({ + "//bazel:manual_stamp": "lib", + "//conditions:default": "empty", + }), deps = [ ":version_includes", "//source/common/common:macros", @@ -289,16 +315,6 @@ envoy_cc_library( ], ) -envoy_cc_library( - name = "block_memory_hash_set_lib", - hdrs = ["block_memory_hash_set.h"], - deps = [ - ":assert_lib", - ":logger_lib", - "//source/common/stats:stats_options_lib", - ], -) - envoy_cc_library( name = "perf_annotation_lib", srcs = ["perf_annotation.cc"], diff --git a/source/common/common/assert.h b/source/common/common/assert.h index 8ae1104b0d6ae..d99ce8f754f80 100644 --- a/source/common/common/assert.h +++ b/source/common/common/assert.h @@ -80,11 +80,17 @@ void invokeDebugAssertionFailureRecordAction_ForAssertMacroUseOnly(); #define _ASSERT_VERBOSE(X, Y) _ASSERT_IMPL(X, #X, ASSERT_ACTION, Y) #define _ASSERT_SELECTOR(_1, _2, ASSERT_MACRO, ...) ASSERT_MACRO +// This is a workaround for fact that MSVC expands __VA_ARGS__ after passing them into a macro, +// rather than before passing them into a macro. Without this, _ASSERT_SELECTOR does not work +// correctly when compiled with MSVC +#define EXPAND(X) X + // If ASSERT is called with one argument, the ASSERT_SELECTOR will return // _ASSERT_ORIGINAL and this will call _ASSERT_ORIGINAL(__VA_ARGS__). // If ASSERT is called with two arguments, ASSERT_SELECTOR will return // _ASSERT_VERBOSE, and this will call _ASSERT_VERBOSE,(__VA_ARGS__) -#define ASSERT(...) _ASSERT_SELECTOR(__VA_ARGS__, _ASSERT_VERBOSE, _ASSERT_ORIGINAL)(__VA_ARGS__) +#define ASSERT(...) \ + EXPAND(_ASSERT_SELECTOR(__VA_ARGS__, _ASSERT_VERBOSE, _ASSERT_ORIGINAL)(__VA_ARGS__)) #else // This non-implementation ensures that its argument is a valid expression that can be statically // casted to a bool, but the expression is never evaluated and will be compiled away. diff --git a/source/common/common/base64.cc b/source/common/common/base64.cc index df5c84dd222d9..9a56871464b94 100644 --- a/source/common/common/base64.cc +++ b/source/common/common/base64.cc @@ -202,6 +202,10 @@ std::string Base64::encode(const Buffer::Instance& buffer, uint64_t length) { } std::string Base64::encode(const char* input, uint64_t length) { + return encode(input, length, true); +} + +std::string Base64::encode(const char* input, uint64_t length, bool add_padding) { uint64_t output_length = (length + 2) / 3 * 4; std::string ret; ret.reserve(output_length); @@ -213,7 +217,7 @@ std::string Base64::encode(const char* input, uint64_t length) { encodeBase(input[i], pos++, next_c, ret, CHAR_TABLE); } - encodeLast(pos, next_c, ret, CHAR_TABLE, true); + encodeLast(pos, next_c, ret, CHAR_TABLE, add_padding); return ret; } diff --git a/source/common/common/base64.h b/source/common/common/base64.h index 4e4245909c180..ea614eadedd56 100644 --- a/source/common/common/base64.h +++ b/source/common/common/base64.h @@ -27,6 +27,14 @@ class Base64 { */ static std::string encode(const char* input, uint64_t length); + /** + * Base64 encode an input char buffer with a given length. + * @param input char array to encode. + * @param length of the input array. + * @param whether add padding at the end of the output. + */ + static std::string encode(const char* input, uint64_t length, bool add_padding); + /** * Base64 decode an input string. Padding is required. * @param input supplies the input to decode. diff --git a/source/common/common/block_memory_hash_set.h b/source/common/common/block_memory_hash_set.h deleted file mode 100644 index ef1e2b34c6955..0000000000000 --- a/source/common/common/block_memory_hash_set.h +++ /dev/null @@ -1,365 +0,0 @@ -#pragma once - -#include -#include -#include - -#include "envoy/common/exception.h" -#include "envoy/stats/stats_options.h" - -#include "common/common/assert.h" -#include "common/common/fmt.h" -#include "common/common/logger.h" - -#include "absl/strings/string_view.h" - -namespace Envoy { - -/** - * Initialization parameters for BlockMemoryHashSet. The options are duplicated - * to the control-block after init, to aid with sanity checking when attaching - * an existing memory segment. - */ -struct BlockMemoryHashSetOptions { - std::string toString() const { - return fmt::format("capacity={}, num_slots={}", capacity, num_slots); - } - bool operator==(const BlockMemoryHashSetOptions& that) const { - return capacity == that.capacity && num_slots == that.num_slots; - } - bool operator!=(const BlockMemoryHashSetOptions& that) const { return !(*this == that); } - - uint32_t capacity; // how many values can be stored. - uint32_t num_slots; // determines speed of hash vs size efficiency. -}; - -/** - * Implements hash_set without using pointers, suitable for use - * in shared memory. Users must commit to capacity and num_slots at - * construction time. Value must provide these methods: - * absl::string_view Value::key() - * void Value::initialize(absl::string_view key) - * static uint64_t Value::size() - * static uint64_t Value::hash() - * - * This set may also be suitable for persisting a hash-table to long - * term storage, but not across machine architectures, as it doesn't - * use network byte order for storing ints. - */ -template class BlockMemoryHashSet : public Logger::Loggable { -public: - /** - * Sentinel used for next_cell links to indicate end-of-list. - */ - static const uint32_t Sentinel = 0xffffffff; - - /** Type used by put() to indicate the value at a key, and whether it was created */ - typedef std::pair ValueCreatedPair; - - /** - * Constructs a map control structure given a set of options, which cannot be changed. - * @param hash_set_options describes the parameters controlling set layout. - * @param init true if the memory should be initialized on construction. If false, - * the data in the table will be sanity checked, and an exception thrown if - * it is incoherent or mismatches the passed-in options. - * @param memory the memory buffer for the set data. - * @param stats_options a reference to the top-level StatsOptions struct containing - * information about max allowable stat name lengths. - * - * Note that no locking of any kind is done by this class; this must be done at the - * call-site to support concurrent access. - */ - BlockMemoryHashSet(const BlockMemoryHashSetOptions& hash_set_options, bool init, uint8_t* memory, - const Stats::StatsOptions& stats_options) - : cells_(nullptr), control_(nullptr), slots_(nullptr), stats_options_(stats_options) { - mapMemorySegments(hash_set_options, memory); - if (init) { - initialize(hash_set_options); - } else if (!attach(hash_set_options)) { - throw EnvoyException("BlockMemoryHashSet: Incompatible memory block"); - } - } - - /** - * Returns the numbers of byte required for the hash-table, based on - * the control structure. This must be used to allocate the - * backing-store (eg) in memory, which we do after - * constructing the object with the desired sizing. - */ - static uint64_t numBytes(const BlockMemoryHashSetOptions& hash_set_options, - const Stats::StatsOptions& stats_options) { - uint64_t size = cellOffset(hash_set_options.capacity, stats_options) + sizeof(Control) + - hash_set_options.num_slots * sizeof(uint32_t); - return align(size); - } - - uint64_t numBytes(const Stats::StatsOptions& stats_options) const { - return numBytes(control_->hash_set_options, stats_options); - } - - /** Examines the data structures to see if they are sane, assert-failing on any trouble. */ - void sanityCheck() { - RELEASE_ASSERT(control_->size <= control_->hash_set_options.capacity, ""); - - // As a sanity check, make sure there are control_->size values - // reachable from the slots, each of which has a valid - // char_offset. - // - // Avoid infinite loops if there is a next_cell_index cycle within a - // slot. Note that the num_values message will be emitted outside - // the loop. - uint32_t num_values = 0; - for (uint32_t slot = 0; slot < control_->hash_set_options.num_slots; ++slot) { - uint32_t next = 0; // initialized to silence compilers. - for (uint32_t cell_index = slots_[slot]; - (cell_index != Sentinel) && (num_values <= control_->size); cell_index = next) { - RELEASE_ASSERT(cell_index < control_->hash_set_options.capacity, ""); - Cell& cell = getCell(cell_index); - absl::string_view key = cell.value.key(); - RELEASE_ASSERT(computeSlot(key) == slot, ""); - next = cell.next_cell_index; - ++num_values; - } - } - RELEASE_ASSERT(num_values == control_->size, ""); - - uint32_t num_free_entries = 0; - uint32_t expected_free_entries = control_->hash_set_options.capacity - control_->size; - - // Don't infinite-loop with a corruption; break when we see there's a problem. - for (uint32_t cell_index = control_->free_cell_index; - (cell_index != Sentinel) && (num_free_entries <= expected_free_entries); - cell_index = getCell(cell_index).next_cell_index) { - ++num_free_entries; - } - RELEASE_ASSERT(num_free_entries == expected_free_entries, ""); - } - - /** - * Inserts a value into the set. If successful (e.g. map has - * capacity) then put returns a pointer to the value object, which - * the caller can then write, Returns {nullptr, false} if the key - * was too large, or the capacity of the map has been exceeded. - * - * If the value was already present in the map, then {value, false} is returned. - * The caller may need to clean up an old value. - * - * If the value is newly allocated, then {value, true} is returned. - * - * @return a pair with the value-pointer (or nullptr), and a bool indicating - * whether the value is newly allocated. - */ - ValueCreatedPair insert(absl::string_view key) { - Value* value = get(key); - if (value != nullptr) { - return ValueCreatedPair(value, false); - } - if (control_->size >= control_->hash_set_options.capacity) { - return ValueCreatedPair(nullptr, false); - } - const uint32_t slot = computeSlot(key); - const uint32_t cell_index = control_->free_cell_index; - Cell& cell = getCell(cell_index); - control_->free_cell_index = cell.next_cell_index; - cell.next_cell_index = slots_[slot]; - slots_[slot] = cell_index; - value = &cell.value; - value->initialize(key, stats_options_); - ++control_->size; - return ValueCreatedPair(value, true); - } - - /** - * Removes the specified key from the map, returning true if the key - * was found. - * @param key the key to remove - */ - bool remove(absl::string_view key) { - const uint32_t slot = computeSlot(key); - uint32_t* next = nullptr; - for (uint32_t* cptr = &slots_[slot]; *cptr != Sentinel; cptr = next) { - const uint32_t cell_index = *cptr; - Cell& cell = getCell(cell_index); - if (cell.value.key() == key) { - // Splice current cell out of slot-chain. - *cptr = cell.next_cell_index; - - // Splice current cell into free-list. - cell.next_cell_index = control_->free_cell_index; - control_->free_cell_index = cell_index; - - --control_->size; - return true; - } - next = &cell.next_cell_index; - } - return false; - } - - /** Returns the number of key/values stored in the map. */ - uint32_t size() const { return control_->size; } - - /** - * Gets the value associated with a key, returning nullptr if the value was not found. - * @param key - */ - Value* get(absl::string_view key) { - const uint32_t slot = computeSlot(key); - for (uint32_t c = slots_[slot]; c != Sentinel; c = getCell(c).next_cell_index) { - Cell& cell = getCell(c); - if (cell.value.key() == key) { - return &cell.value; - } - } - return nullptr; - } - - /** - * Computes a version signature based on the options and the hash function. - */ - std::string version(const Stats::StatsOptions& stats_options) { - return fmt::format("options={} hash={} size={}", control_->hash_set_options.toString(), - control_->hash_signature, numBytes(stats_options)); - } - -private: - friend class BlockMemoryHashSetTest; - - /** - * Initializes a hash-map on raw memory. No expectations are made about the state of the memory - * coming in. - * @param memory - */ - void initialize(const BlockMemoryHashSetOptions& hash_set_options) { - control_->hash_signature = Value::hash(signatureStringToHash()); - control_->num_bytes = numBytes(hash_set_options, stats_options_); - control_->hash_set_options = hash_set_options; - control_->size = 0; - control_->free_cell_index = 0; - - // Initialize all the slots; - for (uint32_t slot = 0; slot < hash_set_options.num_slots; ++slot) { - slots_[slot] = Sentinel; - } - - // Initialize the free-cell list. - const uint32_t last_cell = hash_set_options.capacity - 1; - for (uint32_t cell_index = 0; cell_index < last_cell; ++cell_index) { - Cell& cell = getCell(cell_index); - cell.next_cell_index = cell_index + 1; - } - getCell(last_cell).next_cell_index = Sentinel; - } - - /** - * Attempts to attach to an existing memory segment. Does a (relatively) quick - * sanity check to make sure the options copied to the provided memory match, and also - * that the slot, cell, and key-string structures look sane. - */ - bool attach(const BlockMemoryHashSetOptions& hash_set_options) { - if (numBytes(hash_set_options, stats_options_) != control_->num_bytes) { - ENVOY_LOG(error, "BlockMemoryHashSet unexpected memory size {} != {}", - numBytes(hash_set_options, stats_options_), control_->num_bytes); - return false; - } - if (Value::hash(signatureStringToHash()) != control_->hash_signature) { - ENVOY_LOG(error, "BlockMemoryHashSet hash signature mismatch."); - return false; - } - sanityCheck(); - return true; - } - - uint32_t computeSlot(absl::string_view key) { - return Value::hash(key) % control_->hash_set_options.num_slots; - } - - /** - * Computes a signature string, composed of all the non-zero 8-bit characters. - * This is used for detecting if the hash algorithm changes, which invalidates - * any saved stats-set. - */ - static std::string signatureStringToHash() { - std::string signature_string; - signature_string.resize(255); - for (int i = 1; i <= 255; ++i) { - signature_string[i - 1] = i; - } - return signature_string; - } - - /** - * Represents control-values for the hash-table. - */ - struct Control { - BlockMemoryHashSetOptions hash_set_options; // Options established at map construction time. - uint64_t hash_signature; // Hash of a constant signature string. - uint64_t num_bytes; // Bytes allocated on behalf of the map. - uint32_t size; // Number of values currently stored. - uint32_t free_cell_index; // Offset of first free cell. - }; - - /** - * Represents a value-cell, which is stored in a linked-list from each slot. - */ - struct Cell { - uint32_t next_cell_index; // Index of next cell in map->cells_, terminated with Sentinel. - Value value; // Templated value field. - }; - - // It seems like this is an obvious constexpr, but it won't compile as one. - static uint64_t calculateAlignment() { - return std::max(alignof(Cell), std::max(alignof(uint32_t), alignof(Control))); - } - - static uint64_t align(uint64_t size) { - const uint64_t alignment = calculateAlignment(); - // Check that alignment is a power of 2: - // http://www.graphics.stanford.edu/~seander/bithacks.html#DetermineIfPowerOf2 - RELEASE_ASSERT((alignment > 0) && ((alignment & (alignment - 1)) == 0), ""); - return (size + alignment - 1) & ~(alignment - 1); - } - - /** - * Computes the byte offset of a cell into cells_. This is not - * simply an array index because we don't know the size of a key at - * compile-time. - */ - static uint64_t cellOffset(uint32_t cell_index, const Stats::StatsOptions& stats_options) { - // sizeof(Cell) includes 'sizeof Value' which may not be accurate. So we need to - // subtract that off, and add the template method's view of the actual value-size. - uint64_t cell_size = - align(sizeof(Cell) + Value::structSizeWithOptions(stats_options) - sizeof(Value)); - return cell_index * cell_size; - } - - /** - * Returns a reference to a Cell at the specified index. - */ - Cell& getCell(uint32_t cell_index) { - // Because the key-size is parameterizable, an array-lookup on sizeof(Cell) does not work. - char* ptr = reinterpret_cast(cells_) + cellOffset(cell_index, stats_options_); - RELEASE_ASSERT((reinterpret_cast(ptr) & (calculateAlignment() - 1)) == 0, ""); - return *reinterpret_cast(ptr); - } - - /** Maps out the segments of memory for us to work with. */ - void mapMemorySegments(const BlockMemoryHashSetOptions& hash_set_options, uint8_t* memory) { - // Note that we are not examining or mutating memory here, just looking at the pointer, - // so we don't need to hold any locks. - cells_ = reinterpret_cast(memory); // First because Value may need to be aligned. - memory += cellOffset(hash_set_options.capacity, stats_options_); - control_ = reinterpret_cast(memory); - memory += sizeof(Control); - slots_ = reinterpret_cast(memory); - } - - // Pointers into memory. Cells go first, because Value may need a more aggressive - // alignment. - Cell* cells_; - Control* control_; - uint32_t* slots_; - const Stats::StatsOptions& stats_options_; -}; - -} // namespace Envoy diff --git a/source/common/common/enum_to_int.h b/source/common/common/enum_to_int.h index a9c77b59419ee..ec613ef67df9b 100644 --- a/source/common/common/enum_to_int.h +++ b/source/common/common/enum_to_int.h @@ -6,5 +6,10 @@ namespace Envoy { /** * Convert an int based enum to an int. */ -template uint32_t enumToInt(T val) { return static_cast(val); } +template constexpr uint32_t enumToInt(T val) { return static_cast(val); } + +/** + * Convert an int based enum to a signed int. + */ +template constexpr int32_t enumToSignedInt(T val) { return static_cast(val); } } // namespace Envoy diff --git a/source/common/common/generate_version_linkstamp.sh b/source/common/common/generate_version_linkstamp.sh new file mode 100755 index 0000000000000..4ad2da073589d --- /dev/null +++ b/source/common/common/generate_version_linkstamp.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +# This script generates a header file that is used by version_lib whenever linkstamp is not allowed. +# linkstamp is used to link in version_linkstamp.cc into the version_lib. +# However, linkstamp is not available to non-binary bazel targets. +# This means that if the topmost target being used to compile version_lib is a envoy_cc_library or related, linkstamp will not be in effect. +# In turn this means that version_linkstamp.cc is not linked, and the build_scm_revision and build_scm_status are unknown symbols to the linker. + +# Unfortunately linkstamp is not well documented (https://github.com/bazelbuild/bazel/issues/2893). +# But following the implicit trail one can deduce that linkstamp is in effect when "stamping" (https://github.com/bazelbuild/bazel/issues/2893) is on. +# envoy_cc_library -- and the underlying cc_library rule -- does not support "stamping". +# This makes sense as stamping mainly makes sense in the context of binaries for production releases, not static libraries. +build_scm_revision=$(grep BUILD_SCM_REVISION bazel-out/volatile-status.txt | sed 's/^BUILD_SCM_REVISION //' | tr -d '\\n') + +echo "extern const char build_scm_revision[];" +echo "extern const char build_scm_status[];" +echo "const char build_scm_revision[] = \"$build_scm_revision\";" +echo "const char build_scm_status[] = \"Library\";" \ No newline at end of file diff --git a/source/common/common/hash.h b/source/common/common/hash.h index a56500c566434..b6c50e63ac74f 100644 --- a/source/common/common/hash.h +++ b/source/common/common/hash.h @@ -60,7 +60,7 @@ class MurmurHash { private: static inline uint64_t unaligned_load(const char* p) { uint64_t result; - __builtin_memcpy(&result, p, sizeof(result)); + memcpy(&result, p, sizeof(result)); return result; } @@ -77,16 +77,49 @@ class MurmurHash { static inline uint64_t shift_mix(uint64_t v) { return v ^ (v >> 47); } }; -struct CharStarHash { +struct ConstCharStarHash { size_t operator()(const char* a) const { return HashUtil::xxHash64(a); } }; -struct CharStarEqual { +struct ConstCharStarEqual { size_t operator()(const char* a, const char* b) const { return strcmp(a, b) == 0; } }; template -using CharStarHashMap = absl::flat_hash_map; -using CharStarHashSet = absl::flat_hash_set; +using ConstCharStarHashMap = + absl::flat_hash_map; +using ConstCharStarHashSet = + absl::flat_hash_set; + +using SharedString = std::shared_ptr; + +struct HeterogeneousStringHash { + // Specifying is_transparent indicates to the library infrastructure that + // type-conversions should not be applied when calling find(), but instead + // pass the actual types of the contained and searched-for objects directly to + // these functors. See + // https://en.cppreference.com/w/cpp/utility/functional/less_void for an + // official reference, and https://abseil.io/tips/144 for a description of + // using it in the context of absl. + using is_transparent = void; + + size_t operator()(absl::string_view a) const { return HashUtil::xxHash64(a); } + size_t operator()(const SharedString& a) const { return HashUtil::xxHash64(*a); } +}; + +struct HeterogeneousStringEqual { + // See description for HeterogeneousStringHash::is_transparent. + using is_transparent = void; + + size_t operator()(absl::string_view a, absl::string_view b) const { return a == b; } + size_t operator()(const SharedString& a, const SharedString& b) const { return *a == *b; } + size_t operator()(absl::string_view a, const SharedString& b) const { return a == *b; } + size_t operator()(const SharedString& a, absl::string_view b) const { return *a == b; } +}; + +// We use heterogeneous hash/equal functors to do a find() without constructing +// a shared_string, which would entail making a full copy of the stat name. +using SharedStringSet = + absl::flat_hash_set; } // namespace Envoy diff --git a/source/common/common/hex.cc b/source/common/common/hex.cc index 8f57315ac061c..b987282d58094 100644 --- a/source/common/common/hex.cc +++ b/source/common/common/hex.cc @@ -27,7 +27,7 @@ std::string Hex::encode(const uint8_t* data, size_t length) { } std::vector Hex::decode(const std::string& hex_string) { - if (hex_string.size() == 0 || hex_string.size() % 2 != 0) { + if (hex_string.empty() || hex_string.size() % 2 != 0) { return {}; } @@ -35,7 +35,7 @@ std::vector Hex::decode(const std::string& hex_string) { for (size_t i = 0; i < hex_string.size(); i += 2) { std::string hex_byte = hex_string.substr(i, 2); uint64_t out; - if (!StringUtil::atoul(hex_byte.c_str(), out, 16)) { + if (!StringUtil::atoull(hex_byte.c_str(), out, 16)) { return {}; } diff --git a/source/common/common/logger.h b/source/common/common/logger.h index 3165fcf4f04a4..d349d9f6310f9 100644 --- a/source/common/common/logger.h +++ b/source/common/common/logger.h @@ -36,6 +36,8 @@ namespace Logger { FUNCTION(http) \ FUNCTION(http2) \ FUNCTION(hystrix) \ + FUNCTION(init) \ + FUNCTION(kafka) \ FUNCTION(lua) \ FUNCTION(main) \ FUNCTION(misc) \ diff --git a/source/common/common/logger_delegates.h b/source/common/common/logger_delegates.h index 1dcd4d6926943..67264d29c3a33 100644 --- a/source/common/common/logger_delegates.h +++ b/source/common/common/logger_delegates.h @@ -5,7 +5,6 @@ #include #include "envoy/access_log/access_log.h" -#include "envoy/filesystem/filesystem.h" #include "common/common/logger.h" #include "common/common/macros.h" @@ -31,7 +30,7 @@ class FileSinkDelegate : public SinkDelegate { void flush() override; private: - Filesystem::FileSharedPtr log_file_; + AccessLog::AccessLogFileSharedPtr log_file_; }; } // namespace Logger diff --git a/source/common/common/posix/thread_impl.cc b/source/common/common/posix/thread_impl.cc index 6421e32f2212f..897a55d62040b 100644 --- a/source/common/common/posix/thread_impl.cc +++ b/source/common/common/posix/thread_impl.cc @@ -33,12 +33,13 @@ bool ThreadIdImplPosix::isCurrentThreadId() const { return id_ == getCurrentThre ThreadImplPosix::ThreadImplPosix(std::function thread_routine) : thread_routine_(thread_routine) { RELEASE_ASSERT(Logger::Registry::initialized(), ""); - const int rc = pthread_create(&thread_handle_, nullptr, - [](void* arg) -> void* { - static_cast(arg)->thread_routine_(); - return nullptr; - }, - this); + const int rc = pthread_create( + &thread_handle_, nullptr, + [](void* arg) -> void* { + static_cast(arg)->thread_routine_(); + return nullptr; + }, + this); RELEASE_ASSERT(rc == 0, ""); } diff --git a/source/common/common/token_bucket_impl.cc b/source/common/common/token_bucket_impl.cc index 539044d8a1905..5e7de9e6bb1a7 100644 --- a/source/common/common/token_bucket_impl.cc +++ b/source/common/common/token_bucket_impl.cc @@ -8,7 +8,7 @@ TokenBucketImpl::TokenBucketImpl(uint64_t max_tokens, TimeSource& time_source, d : max_tokens_(max_tokens), fill_rate_(std::abs(fill_rate)), tokens_(max_tokens), last_fill_(time_source.monotonicTime()), time_source_(time_source) {} -bool TokenBucketImpl::consume(uint64_t tokens) { +uint64_t TokenBucketImpl::consume(uint64_t tokens, bool allow_partial) { if (tokens_ < max_tokens_) { const auto time_now = time_source_.monotonicTime(); tokens_ = std::min((std::chrono::duration(time_now - last_fill_).count() * fill_rate_) + @@ -17,21 +17,31 @@ bool TokenBucketImpl::consume(uint64_t tokens) { last_fill_ = time_now; } + if (allow_partial) { + tokens = std::min(tokens, static_cast(std::floor(tokens_))); + } + if (tokens_ < tokens) { - return false; + return 0; } tokens_ -= tokens; - return true; + return tokens; } -uint64_t TokenBucketImpl::nextTokenAvailableMs() { +std::chrono::milliseconds TokenBucketImpl::nextTokenAvailable() { // If there are tokens available, return immediately. if (tokens_ >= 1) { - return 0; + return std::chrono::milliseconds(0); } // TODO(ramaraochavali): implement a more precise way that works for very low rate limits. - return (1 / fill_rate_) * 1000; + return std::chrono::milliseconds(static_cast(std::ceil((1 / fill_rate_) * 1000))); +} + +void TokenBucketImpl::reset(uint64_t num_tokens) { + ASSERT(num_tokens <= max_tokens_); + tokens_ = num_tokens; + last_fill_ = time_source_.monotonicTime(); } } // namespace Envoy diff --git a/source/common/common/token_bucket_impl.h b/source/common/common/token_bucket_impl.h index 4176370a60680..644a4185dd5ab 100644 --- a/source/common/common/token_bucket_impl.h +++ b/source/common/common/token_bucket_impl.h @@ -20,9 +20,10 @@ class TokenBucketImpl : public TokenBucket { */ explicit TokenBucketImpl(uint64_t max_tokens, TimeSource& time_source, double fill_rate = 1); - bool consume(uint64_t tokens = 1) override; - - uint64_t nextTokenAvailableMs() override; + // TokenBucket + uint64_t consume(uint64_t tokens, bool allow_partial) override; + std::chrono::milliseconds nextTokenAvailable() override; + void reset(uint64_t num_tokens) override; private: const double max_tokens_; diff --git a/source/common/common/utility.cc b/source/common/common/utility.cc index 6de294c8f41ac..83ed53c3b9526 100644 --- a/source/common/common/utility.cc +++ b/source/common/common/utility.cc @@ -18,6 +18,7 @@ #include "absl/strings/match.h" #include "absl/strings/str_join.h" #include "absl/strings/str_split.h" +#include "absl/time/time.h" #include "spdlog/spdlog.h" namespace Envoy { @@ -41,10 +42,11 @@ std::string DateFormatter::fromTime(const SystemTime& time) const { // is 10. size_t seconds_length; - // A container object to hold a strftime'd string, its timestamp (in seconds) and a list of - // position offsets for each specifier found in a format string. + // A container object to hold a absl::FormatTime string, its timestamp (in seconds) and a list + // of position offsets for each specifier found in a format string. struct Formatted { - // The resulted string after format string is passed to strftime at a given point in time. + // The resulted string after format string is passed to absl::FormatTime at a given point in + // time. std::string str; // A timestamp (in seconds) when this object is created. @@ -52,7 +54,7 @@ std::string DateFormatter::fromTime(const SystemTime& time) const { // List of offsets for each specifier found in a format string. This is needed to compensate // the position of each recorded specifier due to the possible size change of the previous - // segment (after strftime'd). + // segment (after being formatted). SpecifierOffsets specifier_offsets; }; // A map is used to keep different formatted format strings at a given second. @@ -163,31 +165,28 @@ void DateFormatter::parse(const std::string& format_string) { std::string DateFormatter::fromTimeAndPrepareSpecifierOffsets(time_t time, SpecifierOffsets& specifier_offsets, const std::string& seconds_str) const { - tm current_tm; - gmtime_r(&time, ¤t_tm); - - std::array buf; - std::string formatted; + std::string formatted_time; size_t previous = 0; specifier_offsets.reserve(specifiers_.size()); for (const auto& specifier : specifiers_) { - const size_t formatted_length = - strftime(&buf[0], buf.size(), specifier.segment_.c_str(), ¤t_tm); - absl::StrAppend(&formatted, absl::string_view(&buf[0], formatted_length), + std::string current_format = + absl::FormatTime(specifier.segment_, absl::FromTimeT(time), absl::UTCTimeZone()); + absl::StrAppend(&formatted_time, current_format, specifier.second_ ? seconds_str : std::string(specifier.width_, '?')); // This computes and saves offset of each specifier's pattern to correct its position after the // previous string segment is formatted. An offset can be a negative value. // // If the current specifier is a second specifier (%s), it needs to be corrected by 2. - const int32_t offset = (formatted_length + (specifier.second_ ? (seconds_str.size() - 2) : 0)) - - specifier.segment_.size(); + const int32_t offset = + (current_format.length() + (specifier.second_ ? (seconds_str.size() - 2) : 0)) - + specifier.segment_.size(); specifier_offsets.emplace_back(previous + offset); previous += offset; } - return formatted; + return formatted_time; } std::string DateFormatter::now(TimeSource& time_source) { @@ -217,23 +216,23 @@ bool DateUtil::timePointValid(MonotonicTime time_point) { const char StringUtil::WhitespaceChars[] = " \t\f\v\n\r"; -const char* StringUtil::strtoul(const char* str, uint64_t& out, int base) { +const char* StringUtil::strtoull(const char* str, uint64_t& out, int base) { if (strlen(str) == 0) { return nullptr; } char* end_ptr; errno = 0; - out = ::strtoul(str, &end_ptr, base); - if (end_ptr == str || (out == ULONG_MAX && errno == ERANGE)) { + out = std::strtoull(str, &end_ptr, base); + if (end_ptr == str || (out == ULLONG_MAX && errno == ERANGE)) { return nullptr; } else { return end_ptr; } } -bool StringUtil::atoul(const char* str, uint64_t& out, int base) { - const char* end_ptr = StringUtil::strtoul(str, out, base); +bool StringUtil::atoull(const char* str, uint64_t& out, int base) { + const char* end_ptr = StringUtil::strtoull(str, out, base); if (end_ptr == nullptr || *end_ptr != '\0') { return false; } else { @@ -241,21 +240,6 @@ bool StringUtil::atoul(const char* str, uint64_t& out, int base) { } } -bool StringUtil::atol(const char* str, int64_t& out, int base) { - if (strlen(str) == 0) { - return false; - } - - char* end_ptr; - errno = 0; - out = strtol(str, &end_ptr, base); - if (*end_ptr != '\0' || ((out == LONG_MAX || out == LONG_MIN) && errno == ERANGE)) { - return false; - } else { - return true; - } -} - absl::string_view StringUtil::ltrim(absl::string_view source) { const absl::string_view::size_type pos = source.find_first_not_of(WhitespaceChars); if (pos != absl::string_view::npos) { @@ -408,46 +392,38 @@ std::string StringUtil::escape(const std::string& source) { return ret; } -std::string AccessLogDateTimeFormatter::fromTime(const SystemTime& time) { - static const char DefaultDateFormat[] = "%Y-%m-%dT%H:%M:%S.000Z"; +std::string AccessLogDateTimeFormatter::fromTime(const SystemTime& system_time) { + static const std::string DefaultDateFormat = "%Y-%m-%dT%H:%M:%E3SZ"; struct CachedTime { std::chrono::seconds epoch_time_seconds; - size_t formatted_time_length{0}; - char formatted_time[32]; + std::string formatted_time; }; static thread_local CachedTime cached_time; const std::chrono::milliseconds epoch_time_ms = - std::chrono::duration_cast(time.time_since_epoch()); + std::chrono::duration_cast(system_time.time_since_epoch()); const std::chrono::seconds epoch_time_seconds = std::chrono::duration_cast(epoch_time_ms); - if (cached_time.formatted_time_length == 0 || - cached_time.epoch_time_seconds != epoch_time_seconds) { - time_t time = static_cast(epoch_time_seconds.count()); - tm date_time; - gmtime_r(&time, &date_time); - cached_time.formatted_time_length = - strftime(cached_time.formatted_time, sizeof(cached_time.formatted_time), DefaultDateFormat, - &date_time); + if (cached_time.formatted_time.empty() || cached_time.epoch_time_seconds != epoch_time_seconds) { + cached_time.formatted_time = + absl::FormatTime(DefaultDateFormat, absl::FromChrono(system_time), absl::UTCTimeZone()); cached_time.epoch_time_seconds = epoch_time_seconds; + } else { + // Overwrite the digits in the ".000Z" at the end of the string with the + // millisecond count from the input time. + ASSERT(cached_time.formatted_time.length() == 24); + size_t offset = cached_time.formatted_time.length() - 4; + uint32_t msec = epoch_time_ms.count() % 1000; + cached_time.formatted_time[offset++] = ('0' + (msec / 100)); + msec %= 100; + cached_time.formatted_time[offset++] = ('0' + (msec / 10)); + msec %= 10; + cached_time.formatted_time[offset++] = ('0' + msec); } - ASSERT(cached_time.formatted_time_length == 24 && - cached_time.formatted_time_length < sizeof(cached_time.formatted_time)); - - // Overwrite the digits in the ".000Z" at the end of the string with the - // millisecond count from the input time. - size_t offset = cached_time.formatted_time_length - 4; - uint32_t msec = epoch_time_ms.count() % 1000; - cached_time.formatted_time[offset++] = ('0' + (msec / 100)); - msec %= 100; - cached_time.formatted_time[offset++] = ('0' + (msec / 10)); - msec %= 10; - cached_time.formatted_time[offset++] = ('0' + msec); - return cached_time.formatted_time; } diff --git a/source/common/common/utility.h b/source/common/common/utility.h index 9be158ad431fb..0de2bb32709d8 100644 --- a/source/common/common/utility.h +++ b/source/common/common/utility.h @@ -19,7 +19,7 @@ namespace Envoy { /** - * Utility class for formatting dates given a strftime style format string. + * Utility class for formatting dates given an absl::FormatTime style format string. */ class DateFormatter { public: @@ -68,8 +68,9 @@ class DateFormatter { const size_t width_; // The string before the current specifier's position and after the previous found specifier. A - // segment may include strftime accepted specifiers. E.g. given "%3f-this-i%s-a-segment-%4f", - // the current specifier is "%4f" and the segment is "-this-i%s-a-segment-". + // segment may include absl::FormatTime accepted specifiers. E.g. given + // "%3f-this-i%s-a-segment-%4f", the current specifier is "%4f" and the segment is + // "-this-i%s-a-segment-". const std::string segment_; // As an indication that this specifier is a %s (expect to be replaced by seconds since the @@ -149,19 +150,16 @@ class StringUtil { * Convert a string to an unsigned long, checking for error. * @return pointer to the remainder of 'str' if successful, nullptr otherwise. */ - static const char* strtoul(const char* str, uint64_t& out, int base = 10); + static const char* strtoull(const char* str, uint64_t& out, int base = 10); /** * Convert a string to an unsigned long, checking for error. + * + * Consider absl::SimpleAtoi instead if using base 10. + * * @param return true if successful, false otherwise. */ - static bool atoul(const char* str, uint64_t& out, int base = 10); - - /** - * Convert a string to a long, checking for error. - * @param return true if successful, false otherwise. - */ - static bool atol(const char* str, int64_t& out, int base = 10); + static bool atoull(const char* str, uint64_t& out, int base = 10); /** * Convert an unsigned integer to a base 10 string as fast as possible. @@ -341,6 +339,8 @@ class StringUtil { * @return true if strings are semantically the same and false otherwise. */ struct CaseInsensitiveCompare { + // Enable heterogeneous lookup (https://abseil.io/tips/144) + using is_transparent = void; bool operator()(absl::string_view lhs, absl::string_view rhs) const; }; @@ -350,13 +350,15 @@ class StringUtil { * @return uint64_t hash representation of the supplied string view. */ struct CaseInsensitiveHash { + // Enable heterogeneous lookup (https://abseil.io/tips/144) + using is_transparent = void; uint64_t operator()(absl::string_view key) const; }; /** * Definition of unordered set of case-insensitive std::string. */ - typedef std::unordered_set + typedef absl::flat_hash_set CaseUnorderedSet; /** @@ -567,17 +569,23 @@ template struct TrieLookupTable { * Adds an entry to the Trie at the given Key. * @param key the key used to add the entry. * @param value the value to be associated with the key. + * @param overwrite_existing will overwrite the value when the value for a given key already + * exists. + * @return false when a value already exists for the given key. */ - void add(const char* key, Value value) { + bool add(absl::string_view key, Value value, bool overwrite_existing = true) { TrieEntry* current = &root_; - while (uint8_t c = *key) { + for (uint8_t c : key) { if (!current->entries_[c]) { current->entries_[c] = std::make_unique>(); } current = current->entries_[c].get(); - key++; + } + if (current->value_ && !overwrite_existing) { + return false; } current->value_ = value; + return true; } /** @@ -585,19 +593,42 @@ template struct TrieLookupTable { * @param key the key used to find. * @return the value associated with the key. */ - Value find(const char* key) const { + Value find(absl::string_view key) const { const TrieEntry* current = &root_; - while (uint8_t c = *key) { + for (uint8_t c : key) { current = current->entries_[c].get(); - if (current) { - key++; - } else { + if (current == nullptr) { return nullptr; } } return current->value_; } + /** + * Finds the entry associated with the longest prefix. Complexity is O(min(longest key prefix, key + * length)) + * @param key the key used to find. + * @return the value matching the longest prefix based on the key. + */ + Value findLongestPrefix(const char* key) const { + const TrieEntry* current = &root_; + const TrieEntry* result = nullptr; + while (uint8_t c = *key) { + if (current->value_) { + result = current; + } + + // https://github.com/facebook/mcrouter/blob/master/mcrouter/lib/fbi/cpp/Trie-inl.h#L126-L143 + current = current->entries_[c].get(); + if (current == nullptr) { + return result ? result->value_ : nullptr; + } + + key++; + } + return current ? current->value_ : result->value_; + } + TrieEntry root_; }; diff --git a/source/common/common/version.cc b/source/common/common/version.cc index e57ce16c75c64..ec11ffc489a3b 100644 --- a/source/common/common/version.cc +++ b/source/common/common/version.cc @@ -4,6 +4,7 @@ #include "common/common/fmt.h" #include "common/common/macros.h" +#include "common/common/version_linkstamp.h" extern const char build_scm_revision[]; extern const char build_scm_status[]; diff --git a/source/common/common/win32/thread_impl.cc b/source/common/common/win32/thread_impl.cc index 28bd8b189d211..bee7b9f2f9799 100644 --- a/source/common/common/win32/thread_impl.cc +++ b/source/common/common/win32/thread_impl.cc @@ -15,13 +15,13 @@ bool ThreadIdImplWin32::isCurrentThreadId() const { return id_ == ::GetCurrentTh ThreadImplWin32::ThreadImplWin32(std::function thread_routine) : thread_routine_(thread_routine) { RELEASE_ASSERT(Logger::Registry::initialized(), ""); - thread_handle_ = reinterpret_cast( - ::_beginthreadex(nullptr, 0, - [](void* arg) -> unsigned int { - static_cast(arg)->thread_routine_(); - return 0; - }, - this, 0, nullptr)); + thread_handle_ = reinterpret_cast(::_beginthreadex( + nullptr, 0, + [](void* arg) -> unsigned int { + static_cast(arg)->thread_routine_(); + return 0; + }, + this, 0, nullptr)); RELEASE_ASSERT(thread_handle_ != 0, ""); } diff --git a/source/common/config/BUILD b/source/common/config/BUILD index 428c014b194db..7bb35c79a7dbd 100644 --- a/source/common/config/BUILD +++ b/source/common/config/BUILD @@ -35,7 +35,6 @@ envoy_cc_library( "//source/common/common:assert_lib", "//source/common/json:config_schemas_lib", "//source/common/protobuf:utility_lib", - "//source/common/stats:stats_lib", "//source/extensions/stat_sinks:well_known_names", "@envoy_api//envoy/config/bootstrap/v2:bootstrap_cc", ], @@ -67,7 +66,6 @@ envoy_cc_library( "//source/common/common:assert_lib", "//source/common/json:config_schemas_lib", "//source/common/network:utility_lib", - "//source/common/stats:stats_lib", "@envoy_api//envoy/api/v2:cds_cc", "@envoy_api//envoy/api/v2/cluster:circuit_breaker_cc", ], @@ -75,6 +73,7 @@ envoy_cc_library( envoy_cc_library( name = "filesystem_subscription_lib", + srcs = ["filesystem_subscription_impl.cc"], hdrs = ["filesystem_subscription_impl.h"], deps = [ "//include/envoy/config:subscription_interface", @@ -97,6 +96,42 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "delta_subscription_lib", + srcs = ["delta_subscription_impl.cc"], + hdrs = ["delta_subscription_impl.h"], + deps = [ + ":delta_subscription_state_lib", + ":grpc_stream_lib", + ":utility_lib", + "//include/envoy/config:subscription_interface", + "//include/envoy/grpc:async_client_interface", + "//include/envoy/local_info:local_info_interface", + "//include/envoy/upstream:cluster_manager_interface", + "//source/common/common:backoff_lib", + "//source/common/common:minimal_logger_lib", + "//source/common/common:token_bucket_impl_lib", + "//source/common/protobuf", + ], +) + +envoy_cc_library( + name = "delta_subscription_state_lib", + srcs = ["delta_subscription_state.cc"], + hdrs = ["delta_subscription_state.h"], + deps = [ + "//include/envoy/config:subscription_interface", + "//include/envoy/event:dispatcher_interface", + "//source/common/common:assert_lib", + "//source/common/common:backoff_lib", + "//source/common/common:minimal_logger_lib", + "//source/common/common:token_bucket_impl_lib", + "//source/common/grpc:common_lib", + "//source/common/protobuf", + "@envoy_api//envoy/api/v2:discovery_cc", + ], +) + envoy_cc_library( name = "grpc_stream_lib", hdrs = ["grpc_stream.h"], @@ -104,6 +139,7 @@ envoy_cc_library( ":utility_lib", "//include/envoy/config:grpc_mux_interface", "//include/envoy/config:subscription_interface", + "//include/envoy/config:xds_grpc_context_interface", "//include/envoy/grpc:async_client_interface", "//include/envoy/upstream:cluster_manager_interface", "//source/common/common:backoff_lib", @@ -169,10 +205,12 @@ envoy_cc_library( envoy_cc_library( name = "grpc_mux_subscription_lib", + srcs = ["grpc_mux_subscription_impl.cc"], hdrs = ["grpc_mux_subscription_impl.h"], deps = [ "//include/envoy/config:grpc_mux_interface", "//include/envoy/config:subscription_interface", + "//include/envoy/event:dispatcher_interface", "//source/common/common:assert_lib", "//source/common/common:minimal_logger_lib", "//source/common/grpc:common_lib", @@ -196,12 +234,14 @@ envoy_cc_library( envoy_cc_library( name = "http_subscription_lib", + srcs = ["http_subscription_impl.cc"], hdrs = ["http_subscription_impl.h"], external_deps = [ "http_api_protos", ], deps = [ "//include/envoy/config:subscription_interface", + "//include/envoy/event:dispatcher_interface", "//source/common/buffer:buffer_lib", "//source/common/common:minimal_logger_lib", "//source/common/config:utility_lib", @@ -236,7 +276,6 @@ envoy_cc_library( "//source/common/common:assert_lib", "//source/common/json:config_schemas_lib", "//source/common/network:utility_lib", - "//source/common/stats:stats_lib", "//source/extensions/filters/network:well_known_names", "@envoy_api//envoy/api/v2:lds_cc", ], @@ -294,7 +333,6 @@ envoy_cc_library( "//source/common/common:assert_lib", "//source/common/config:utility_lib", "//source/common/json:config_schemas_lib", - "//source/common/stats:stats_lib", "//source/extensions/filters/http:well_known_names", "@envoy_api//envoy/api/v2:rds_cc", ], @@ -304,6 +342,7 @@ envoy_cc_library( name = "subscription_factory_lib", hdrs = ["subscription_factory.h"], deps = [ + ":delta_subscription_lib", ":filesystem_subscription_lib", ":grpc_mux_subscription_lib", ":grpc_subscription_lib", @@ -375,11 +414,12 @@ envoy_cc_library( ":utility_lib", "//include/envoy/config:config_provider_interface", "//include/envoy/config:config_provider_manager_interface", - "//include/envoy/init:init_interface", + "//include/envoy/init:manager_interface", "//include/envoy/server:admin_interface", "//include/envoy/server:config_tracker_interface", "//include/envoy/singleton:instance_interface", "//include/envoy/thread_local:thread_local_interface", + "//source/common/init:target_lib", "//source/common/protobuf", ], ) diff --git a/source/common/config/bootstrap_json.cc b/source/common/config/bootstrap_json.cc index 31dbc6686bfe9..e42c8fe53b983 100644 --- a/source/common/config/bootstrap_json.cc +++ b/source/common/config/bootstrap_json.cc @@ -1,7 +1,5 @@ #include "common/config/bootstrap_json.h" -#include "envoy/stats/stats_options.h" - #include "common/common/assert.h" #include "common/config/address_json.h" #include "common/config/cds_json.h" @@ -17,8 +15,7 @@ namespace Envoy { namespace Config { void BootstrapJson::translateClusterManagerBootstrap( - const Json::Object& json_cluster_manager, envoy::config::bootstrap::v2::Bootstrap& bootstrap, - const Stats::StatsOptions& stats_options) { + const Json::Object& json_cluster_manager, envoy::config::bootstrap::v2::Bootstrap& bootstrap) { json_cluster_manager.validateSchema(Json::Schema::CLUSTER_MANAGER_SCHEMA); absl::optional eds_config; @@ -27,14 +24,13 @@ void BootstrapJson::translateClusterManagerBootstrap( auto* cluster = bootstrap.mutable_static_resources()->mutable_clusters()->Add(); Config::CdsJson::translateCluster(*json_sds->getObject("cluster"), absl::optional(), - *cluster, stats_options); + *cluster); } if (json_cluster_manager.hasObject("cds")) { const auto json_cds = json_cluster_manager.getObject("cds"); auto* cluster = bootstrap.mutable_static_resources()->mutable_clusters()->Add(); - Config::CdsJson::translateCluster(*json_cds->getObject("cluster"), eds_config, *cluster, - stats_options); + Config::CdsJson::translateCluster(*json_cds->getObject("cluster"), eds_config, *cluster); Config::Utility::translateCdsConfig( *json_cds, *bootstrap.mutable_dynamic_resources()->mutable_cds_config()); } @@ -42,7 +38,7 @@ void BootstrapJson::translateClusterManagerBootstrap( for (const Json::ObjectSharedPtr& json_cluster : json_cluster_manager.getObjectArray("clusters")) { auto* cluster = bootstrap.mutable_static_resources()->mutable_clusters()->Add(); - Config::CdsJson::translateCluster(*json_cluster, eds_config, *cluster, stats_options); + Config::CdsJson::translateCluster(*json_cluster, eds_config, *cluster); } auto* cluster_manager = bootstrap.mutable_cluster_manager(); @@ -54,12 +50,10 @@ void BootstrapJson::translateClusterManagerBootstrap( } void BootstrapJson::translateBootstrap(const Json::Object& json_config, - envoy::config::bootstrap::v2::Bootstrap& bootstrap, - const Stats::StatsOptions& stats_options) { + envoy::config::bootstrap::v2::Bootstrap& bootstrap) { json_config.validateSchema(Json::Schema::TOP_LEVEL_CONFIG_SCHEMA); - translateClusterManagerBootstrap(*json_config.getObject("cluster_manager"), bootstrap, - stats_options); + translateClusterManagerBootstrap(*json_config.getObject("cluster_manager"), bootstrap); if (json_config.hasObject("lds")) { auto* lds_config = bootstrap.mutable_dynamic_resources()->mutable_lds_config(); @@ -68,7 +62,7 @@ void BootstrapJson::translateBootstrap(const Json::Object& json_config, for (const auto json_listener : json_config.getObjectArray("listeners")) { auto* listener = bootstrap.mutable_static_resources()->mutable_listeners()->Add(); - Config::LdsJson::translateListener(*json_listener, *listener, stats_options); + Config::LdsJson::translateListener(*json_listener, *listener); } JSON_UTIL_SET_STRING(json_config, bootstrap, flags_path); diff --git a/source/common/config/bootstrap_json.h b/source/common/config/bootstrap_json.h index e55397521c593..80c567d7264ad 100644 --- a/source/common/config/bootstrap_json.h +++ b/source/common/config/bootstrap_json.h @@ -2,7 +2,6 @@ #include "envoy/config/bootstrap/v2/bootstrap.pb.h" #include "envoy/json/json_object.h" -#include "envoy/stats/stats_options.h" namespace Envoy { namespace Config { @@ -15,8 +14,7 @@ class BootstrapJson { * @param bootstrap destination v2 envoy::config::bootstrap::v2::Bootstrap. */ static void translateClusterManagerBootstrap(const Json::Object& json_cluster_manager, - envoy::config::bootstrap::v2::Bootstrap& bootstrap, - const Stats::StatsOptions& stats_options); + envoy::config::bootstrap::v2::Bootstrap& bootstrap); /** * Translate a v1 JSON static config object to v2 envoy::config::bootstrap::v2::Bootstrap. @@ -24,8 +22,7 @@ class BootstrapJson { * @param bootstrap destination v2 envoy::config::bootstrap::v2::Bootstrap. */ static void translateBootstrap(const Json::Object& json_config, - envoy::config::bootstrap::v2::Bootstrap& bootstrap, - const Stats::StatsOptions& stats_options); + envoy::config::bootstrap::v2::Bootstrap& bootstrap); }; } // namespace Config diff --git a/source/common/config/cds_json.cc b/source/common/config/cds_json.cc index 6addcc437a5d3..95fede2e33c45 100644 --- a/source/common/config/cds_json.cc +++ b/source/common/config/cds_json.cc @@ -15,8 +15,6 @@ void CdsJson::translateRingHashLbConfig( const Json::Object& json_ring_hash_lb_config, envoy::api::v2::Cluster::RingHashLbConfig& ring_hash_lb_config) { JSON_UTIL_SET_INTEGER(json_ring_hash_lb_config, ring_hash_lb_config, minimum_ring_size); - JSON_UTIL_SET_BOOL(json_ring_hash_lb_config, *ring_hash_lb_config.mutable_deprecated_v1(), - use_std_hash); } void CdsJson::translateHealthCheck(const Json::Object& json_health_check, @@ -101,12 +99,10 @@ void CdsJson::translateOutlierDetection( void CdsJson::translateCluster(const Json::Object& json_cluster, const absl::optional& eds_config, - envoy::api::v2::Cluster& cluster, - const Stats::StatsOptions& stats_options) { + envoy::api::v2::Cluster& cluster) { json_cluster.validateSchema(Json::Schema::CLUSTER_SCHEMA); const std::string name = json_cluster.getString("name"); - Utility::checkObjNameLength("Invalid cluster name", name, stats_options); cluster.set_name(name); const std::string string_type = json_cluster.getString("type"); diff --git a/source/common/config/cds_json.h b/source/common/config/cds_json.h index 1cb47f2555bcc..f2995074f79ac 100644 --- a/source/common/config/cds_json.h +++ b/source/common/config/cds_json.h @@ -3,7 +3,6 @@ #include "envoy/api/v2/cds.pb.h" #include "envoy/api/v2/cluster/circuit_breaker.pb.h" #include "envoy/json/json_object.h" -#include "envoy/stats/stats_options.h" #include "envoy/upstream/cluster_manager.h" #include "absl/types/optional.h" @@ -65,8 +64,7 @@ class CdsJson { */ static void translateCluster(const Json::Object& json_cluster, const absl::optional& eds_config, - envoy::api::v2::Cluster& cluster, - const Stats::StatsOptions& stats_options); + envoy::api::v2::Cluster& cluster); }; } // namespace Config diff --git a/source/common/config/config_provider_impl.cc b/source/common/config/config_provider_impl.cc index 541c767412aac..f13058d631dfa 100644 --- a/source/common/config/config_provider_impl.cc +++ b/source/common/config/config_provider_impl.cc @@ -3,36 +3,51 @@ namespace Envoy { namespace Config { -ImmutableConfigProviderImplBase::ImmutableConfigProviderImplBase( +ImmutableConfigProviderBase::ImmutableConfigProviderBase( Server::Configuration::FactoryContext& factory_context, - ConfigProviderManagerImplBase& config_provider_manager, ConfigProviderInstanceType type) + ConfigProviderManagerImplBase& config_provider_manager, + ConfigProviderInstanceType instance_type, ApiType api_type) : last_updated_(factory_context.timeSource().systemTime()), - config_provider_manager_(config_provider_manager), type_(type) { + config_provider_manager_(config_provider_manager), instance_type_(instance_type), + api_type_(api_type) { config_provider_manager_.bindImmutableConfigProvider(this); } -ImmutableConfigProviderImplBase::~ImmutableConfigProviderImplBase() { +ImmutableConfigProviderBase::~ImmutableConfigProviderBase() { config_provider_manager_.unbindImmutableConfigProvider(this); } -ConfigSubscriptionInstanceBase::~ConfigSubscriptionInstanceBase() { - runInitializeCallbackIfAny(); +ConfigSubscriptionCommonBase::~ConfigSubscriptionCommonBase() { + init_target_.ready(); config_provider_manager_.unbindSubscription(manager_identifier_); } -void ConfigSubscriptionInstanceBase::runInitializeCallbackIfAny() { - if (initialize_callback_) { - initialize_callback_(); - initialize_callback_ = nullptr; - } +void ConfigSubscriptionCommonBase::bindConfigProvider(MutableConfigProviderCommonBase* provider) { + // All config providers bound to a ConfigSubscriptionCommonBase must be of the same concrete + // type; this is assumed by ConfigSubscriptionInstance::checkAndApplyConfigUpdate() and is + // verified by the assertion below. NOTE: an inlined statement ASSERT() triggers a potentially + // evaluated expression warning from clang due to `typeid(**mutable_config_providers_.begin())`. + // To avoid this, we use a lambda to separate the first mutable provider dereference from the + // typeid() statement. + ASSERT([&]() { + if (!mutable_config_providers_.empty()) { + const auto& first_provider = **mutable_config_providers_.begin(); + return typeid(*provider) == typeid(first_provider); + } + return true; + }()); + mutable_config_providers_.insert(provider); } -bool ConfigSubscriptionInstanceBase::checkAndApplyConfig(const Protobuf::Message& config_proto, - const std::string& config_name, - const std::string& version_info) { +bool ConfigSubscriptionInstance::checkAndApplyConfigUpdate(const Protobuf::Message& config_proto, + const std::string& config_name, + const std::string& version_info) { const uint64_t new_hash = MessageUtil::hash(config_proto); - if (config_info_ && config_info_.value().last_config_hash_ == new_hash) { - return false; + if (config_info_) { + ASSERT(config_info_.value().last_config_hash_.has_value()); + if (config_info_.value().last_config_hash_.value() == new_hash) { + return false; + } } config_info_ = {new_hash, version_info}; @@ -46,31 +61,39 @@ bool ConfigSubscriptionInstanceBase::checkAndApplyConfig(const Protobuf::Message // bindConfigProvider()). // This makes it safe to call any of the provider's onConfigProtoUpdate() to get a new config // impl, which can then be passed to all providers. + auto* typed_provider = static_cast(provider); if (new_config == nullptr) { - if ((new_config = provider->onConfigProtoUpdate(config_proto)) == nullptr) { + if ((new_config = typed_provider->onConfigProtoUpdate(config_proto)) == nullptr) { return false; } } - provider->onConfigUpdate(new_config); + typed_provider->onConfigUpdate(new_config); } return true; } -void ConfigSubscriptionInstanceBase::bindConfigProvider(MutableConfigProviderImplBase* provider) { - // All config providers bound to a ConfigSubscriptionInstanceBase must be of the same concrete - // type; this is assumed by checkAndApplyConfig() and is verified by the assertion below. - // NOTE: an inlined statement ASSERT() triggers a potentially evaluated expression warning from - // clang due to `typeid(**mutable_config_providers_.begin())`. To avoid this, we use a lambda to - // separate the first mutable provider dereference from the typeid() statement. - ASSERT([&]() { - if (!mutable_config_providers_.empty()) { - const auto& first_provider = **mutable_config_providers_.begin(); - return typeid(*provider) == typeid(first_provider); - } - return true; - }()); - mutable_config_providers_.insert(provider); +void DeltaConfigSubscriptionInstance::applyDeltaConfigUpdate( + const std::function& update_fn) { + // The Config implementation is assumed to be shared across the config providers bound to this + // subscription, therefore, simply propagating the update to all worker threads for a single bound + // provider will be sufficient. + if (mutable_config_providers_.size() > 1) { + ASSERT(static_cast(*mutable_config_providers_.begin()) + ->getConfig() == static_cast( + *std::next(mutable_config_providers_.begin())) + ->getConfig()); + } + + // TODO(AndresGuedez): currently, the caller has to compute the differences in resources between + // DS API config updates and passes a granular update_fn() that adds/modifies/removes resources as + // needed. Such logic could be generalized as part of this framework such that this function owns + // the diffing and issues the corresponding call to add/modify/remove a resource according to a + // vector of functions passed by the caller. + auto* typed_provider = + static_cast(getAnyBoundMutableConfigProvider()); + ConfigSharedPtr config = typed_provider->getConfig(); + typed_provider->onConfigUpdate([config, update_fn]() { update_fn(config); }); } ConfigProviderManagerImplBase::ConfigProviderManagerImplBase(Server::Admin& admin, @@ -94,14 +117,14 @@ ConfigProviderManagerImplBase::immutableConfigProviders(ConfigProviderInstanceTy } void ConfigProviderManagerImplBase::bindImmutableConfigProvider( - ImmutableConfigProviderImplBase* provider) { - ASSERT(provider->type() == ConfigProviderInstanceType::Static || - provider->type() == ConfigProviderInstanceType::Inline); + ImmutableConfigProviderBase* provider) { + ASSERT(provider->instanceType() == ConfigProviderInstanceType::Static || + provider->instanceType() == ConfigProviderInstanceType::Inline); ConfigProviderMap::iterator it; - if ((it = immutable_config_providers_map_.find(provider->type())) == + if ((it = immutable_config_providers_map_.find(provider->instanceType())) == immutable_config_providers_map_.end()) { immutable_config_providers_map_.insert(std::make_pair( - provider->type(), + provider->instanceType(), std::make_unique(std::initializer_list({provider})))); } else { it->second->insert(provider); @@ -109,10 +132,10 @@ void ConfigProviderManagerImplBase::bindImmutableConfigProvider( } void ConfigProviderManagerImplBase::unbindImmutableConfigProvider( - ImmutableConfigProviderImplBase* provider) { - ASSERT(provider->type() == ConfigProviderInstanceType::Static || - provider->type() == ConfigProviderInstanceType::Inline); - auto it = immutable_config_providers_map_.find(provider->type()); + ImmutableConfigProviderBase* provider) { + ASSERT(provider->instanceType() == ConfigProviderInstanceType::Static || + provider->instanceType() == ConfigProviderInstanceType::Inline); + auto it = immutable_config_providers_map_.find(provider->instanceType()); ASSERT(it != immutable_config_providers_map_.end()); it->second->erase(provider); } diff --git a/source/common/config/config_provider_impl.h b/source/common/config/config_provider_impl.h index 6a53795481f15..ddcd1232caa18 100644 --- a/source/common/config/config_provider_impl.h +++ b/source/common/config/config_provider_impl.h @@ -4,7 +4,7 @@ #include "envoy/config/config_provider.h" #include "envoy/config/config_provider_manager.h" -#include "envoy/init/init.h" +#include "envoy/init/manager.h" #include "envoy/server/admin.h" #include "envoy/server/config_tracker.h" #include "envoy/singleton/instance.h" @@ -13,19 +13,21 @@ #include "common/common/thread.h" #include "common/common/utility.h" #include "common/config/utility.h" +#include "common/init/target_impl.h" #include "common/protobuf/protobuf.h" namespace Envoy { namespace Config { -// This file provides a set of base classes, (ImmutableConfigProviderImplBase, -// MutableConfigProviderImplBase, ConfigProviderManagerImplBase, ConfigSubscriptionInstanceBase), -// conforming to the ConfigProvider/ConfigProviderManager interfaces, which in tandem provide a -// framework for implementing statically defined (i.e., immutable) and dynamic (mutable via -// subscriptions) configuration for Envoy. +// This file provides a set of base classes, (ImmutableConfigProviderBase, +// MutableConfigProviderCommonBase, MutableConfigProviderBase, DeltaMutableConfigProviderBase, +// ConfigProviderManagerImplBase, ConfigSubscriptionCommonBase, ConfigSubscriptionInstance, +// DeltaConfigSubscriptionInstance), conforming to the ConfigProvider/ConfigProviderManager +// interfaces, which in tandem provide a framework for implementing statically defined (i.e., +// immutable) and dynamic (mutable via subscriptions) configuration for Envoy. // // The mutability property applies to the ConfigProvider itself and _not_ the underlying config -// proto, which is always immutable. MutableConfigProviderImplBase objects receive config proto +// proto, which is always immutable. MutableConfigProviderCommonBase objects receive config proto // updates via xDS subscriptions, resulting in new ConfigProvider::Config objects being instantiated // with the corresponding change in behavior corresponding to updated config. ConfigProvider::Config // objects must be latched/associated with the appropriate objects in the connection and request @@ -48,27 +50,36 @@ namespace Config { // 1) Create a class derived from ConfigProviderManagerImplBase and implement the required // interface. // When implementing createXdsConfigProvider(), it is expected that getSubscription() will -// be called to fetch either an existing ConfigSubscriptionInstanceBase if the config source -// configuration matches, or a newly instantiated subscription otherwise. +// be called to fetch either an existing ConfigSubscriptionCommonBase if the config +// source configuration matches, or a newly instantiated subscription otherwise. // // For immutable providers: -// 1) Create a class derived from ImmutableConfigProviderImplBase and implement the required +// 1) Create a class derived from ImmutableConfigProviderBase and implement the required // interface. // // For mutable (xDS) providers: -// 1) Create a class derived from MutableConfigProviderImplBase and implement the required -// interface. -// 2) Create a class derived from ConfigSubscriptionInstanceBase; this is the entity -// responsible for owning and managing the Envoy::Config::Subscription that provides -// the underlying config subscription. +// 1) According to the API type, create a class derived from MutableConfigProviderBase or +// DeltaMutableConfigProviderBase and implement the required interface. +// 2) According to the API type, create a class derived from ConfigSubscriptionInstance or +// DeltaConfigSubscriptionInstance; this is the entity responsible for owning and managing the +// Envoy::Config::Subscription that provides the underlying config subscription. +// a) For a ConfigProvider::ApiType::Full subscription instance (i.e., a +// ConfigSubscriptionInstance child): // - When subscription callbacks (onConfigUpdate, onConfigUpdateFailed) are issued by the -// underlying subscription, the corresponding ConfigSubscriptionInstanceBase functions must be -// called as well. -// - On a successful config update, checkAndApplyConfig() should be called to instantiate the -// new config implementation and propagate it to the shared config providers and all -// worker threads. -// - On a successful return from checkAndApplyConfig(), the config proto must be latched into -// this class and returned via the getConfigProto() override. +// underlying subscription, the corresponding ConfigSubscriptionInstance functions +// must be called as well. +// - On a successful config update, checkAndApplyConfigUpdate() should be called to instantiate +// the new config implementation and propagate it to the shared config providers and all worker +// threads. +// - On a successful return from checkAndApplyConfigUpdate(), the config proto must be latched +// into this class and returned via the getConfigProto() override. +// b) For a ConfigProvider::ApiType::Delta subscription instance (i.e., a +// DeltaConfigSubscriptionInstance child): +// - When subscription callbacks (onConfigUpdate, onConfigUpdateFailed) are issued by the +// underlying subscription, the corresponding ConfigSubscriptionInstance functions must be called +// as well. +// - On a successful config update, applyConfigUpdate() should be called to propagate the config +// updates to all bound config providers and worker threads. class ConfigProviderManagerImplBase; @@ -89,38 +100,40 @@ enum class ConfigProviderInstanceType { * ConfigProvider implementation for immutable configuration. * * TODO(AndresGuedez): support sharing of config protos and config impls, as is - * done with the MutableConfigProviderImplBase. + * done with the MutableConfigProviderCommonBase. * * This class can not be instantiated directly; instead, it provides the foundation for * immutable config provider implementations which derive from it. */ -class ImmutableConfigProviderImplBase : public ConfigProvider { +class ImmutableConfigProviderBase : public ConfigProvider { public: - ~ImmutableConfigProviderImplBase() override; + ~ImmutableConfigProviderBase() override; // Envoy::Config::ConfigProvider SystemTime lastUpdated() const override { return last_updated_; } + ApiType apiType() const override { return api_type_; } - ConfigProviderInstanceType type() const { return type_; } + ConfigProviderInstanceType instanceType() const { return instance_type_; } protected: - ImmutableConfigProviderImplBase(Server::Configuration::FactoryContext& factory_context, - ConfigProviderManagerImplBase& config_provider_manager, - ConfigProviderInstanceType type); + ImmutableConfigProviderBase(Server::Configuration::FactoryContext& factory_context, + ConfigProviderManagerImplBase& config_provider_manager, + ConfigProviderInstanceType instance_type, ApiType api_type); private: SystemTime last_updated_; ConfigProviderManagerImplBase& config_provider_manager_; - ConfigProviderInstanceType type_; + ConfigProviderInstanceType instance_type_; + ApiType api_type_; }; -class MutableConfigProviderImplBase; +class MutableConfigProviderCommonBase; /** - * Provides generic functionality required by all xDS ConfigProvider subscriptions, including - * shared lifetime management via shared_ptr. + * Provides common DS API subscription functionality required by the ConfigProvider::ApiType + * specific base classes (see ConfigSubscriptionInstance and DeltaConfigSubscriptionInstance). * - * To do so, this class keeps track of a set of MutableConfigProviderImplBase instances associated + * To do so, this class keeps track of a set of MutableConfigProviderCommonBase instances associated * with an underlying subscription; providers are bound/unbound as needed as they are created and * destroyed. * @@ -133,21 +146,14 @@ class MutableConfigProviderImplBase; * This class can not be instantiated directly; instead, it provides the foundation for * config subscription implementations which derive from it. */ -class ConfigSubscriptionInstanceBase : public Init::Target, - protected Logger::Loggable { +class ConfigSubscriptionCommonBase : protected Logger::Loggable { public: struct LastConfigInfo { - uint64_t last_config_hash_; + absl::optional last_config_hash_; std::string last_config_version_; }; - ~ConfigSubscriptionInstanceBase() override; - - // Init::Target - void initialize(std::function callback) override { - initialize_callback_ = callback; - start(); - } + virtual ~ConfigSubscriptionCommonBase(); /** * Starts the subscription corresponding to a config source. @@ -166,96 +172,167 @@ class ConfigSubscriptionInstanceBase : public Init::Target, */ void onConfigUpdate() { setLastUpdated(); - runInitializeCallbackIfAny(); + init_target_.ready(); } /** * Must be called by derived classes when the onConfigUpdateFailed() callback associated with the * underlying subscription is issued. */ - void onConfigUpdateFailed() { runInitializeCallbackIfAny(); } - - /** - * Determines whether a configuration proto is a new update, and if so, propagates it to all - * config providers associated with this subscription. - * @param config_proto supplies the newly received config proto. - * @param config_name supplies the name associated with the config. - * @param version_info supplies the version associated with the config. - * @return bool false when the config proto has no delta from the previous config, true otherwise. - */ - bool checkAndApplyConfig(const Protobuf::Message& config_proto, const std::string& config_name, - const std::string& version_info); + void onConfigUpdateFailed() { + setLastUpdated(); + init_target_.ready(); + } /** * Returns one of the bound mutable config providers. - * @return const MutableConfigProviderImplBase* a const pointer to a - * bound MutableConfigProviderImplBase or nullptr when there are none. + * @return const MutableConfigProviderCommonBase* a const pointer to a + * bound MutableConfigProviderCommonBase or nullptr when there are none. */ - const MutableConfigProviderImplBase* getAnyBoundMutableConfigProvider() const { + MutableConfigProviderCommonBase* getAnyBoundMutableConfigProvider() const { return !mutable_config_providers_.empty() ? *mutable_config_providers_.begin() : nullptr; } protected: - ConfigSubscriptionInstanceBase(const std::string& name, const uint64_t manager_identifier, - ConfigProviderManagerImplBase& config_provider_manager, - TimeSource& time_source, const SystemTime& last_updated, - const LocalInfo::LocalInfo& local_info) - : name_(name), manager_identifier_(manager_identifier), - config_provider_manager_(config_provider_manager), time_source_(time_source), - last_updated_(last_updated) { + ConfigSubscriptionCommonBase(const std::string& name, const uint64_t manager_identifier, + ConfigProviderManagerImplBase& config_provider_manager, + TimeSource& time_source, const SystemTime& last_updated, + const LocalInfo::LocalInfo& local_info) + : name_(name), + init_target_(fmt::format("ConfigSubscriptionCommonBase {}", name_), [this]() { start(); }), + manager_identifier_(manager_identifier), config_provider_manager_(config_provider_manager), + time_source_(time_source), last_updated_(last_updated) { Envoy::Config::Utility::checkLocalInfo(name, local_info); } void setLastUpdated() { last_updated_ = time_source_.systemTime(); } - void runInitializeCallbackIfAny(); + void setLastConfigInfo(absl::optional&& config_info) { + config_info_ = std::move(config_info); + } + + const std::string name_; + std::unordered_set mutable_config_providers_; + absl::optional config_info_; private: - void registerInitTarget(Init::Manager& init_manager) { init_manager.registerTarget(*this); } + void bindConfigProvider(MutableConfigProviderCommonBase* provider); - void bindConfigProvider(MutableConfigProviderImplBase* provider); - - void unbindConfigProvider(MutableConfigProviderImplBase* provider) { + void unbindConfigProvider(MutableConfigProviderCommonBase* provider) { mutable_config_providers_.erase(provider); } - const std::string name_; - std::function initialize_callback_; - std::unordered_set mutable_config_providers_; + Init::TargetImpl init_target_; const uint64_t manager_identifier_; ConfigProviderManagerImplBase& config_provider_manager_; TimeSource& time_source_; SystemTime last_updated_; - absl::optional config_info_; - // ConfigSubscriptionInstanceBase, MutableConfigProviderImplBase and ConfigProviderManagerImplBase - // are tightly coupled with the current shared ownership model; use friend classes to explicitly - // denote the binding between them. + // ConfigSubscriptionCommonBase, MutableConfigProviderCommonBase and + // ConfigProviderManagerImplBase are tightly coupled with the current shared ownership model; use + // friend classes to explicitly denote the binding between them. // // TODO(AndresGuedez): Investigate whether a shared ownership model avoiding the s and // instead centralizing lifetime management in the ConfigProviderManagerImplBase with explicit // reference counting would be more maintainable. - friend class MutableConfigProviderImplBase; + friend class MutableConfigProviderCommonBase; + friend class MutableConfigProviderBase; + friend class DeltaMutableConfigProviderBase; friend class ConfigProviderManagerImplBase; + friend class MockMutableConfigProviderBase; }; -using ConfigSubscriptionInstanceBaseSharedPtr = std::shared_ptr; +using ConfigSubscriptionCommonBaseSharedPtr = std::shared_ptr; /** - * Provides generic functionality required by all dynamic config providers, including distribution - * of config updates to all workers. + * Provides common subscription functionality required by ConfigProvider::ApiType::Full DS APIs. + */ +class ConfigSubscriptionInstance : public ConfigSubscriptionCommonBase { +protected: + ConfigSubscriptionInstance(const std::string& name, const uint64_t manager_identifier, + ConfigProviderManagerImplBase& config_provider_manager, + TimeSource& time_source, const SystemTime& last_updated, + const LocalInfo::LocalInfo& local_info) + : ConfigSubscriptionCommonBase(name, manager_identifier, config_provider_manager, time_source, + last_updated, local_info) {} + + ~ConfigSubscriptionInstance() override = default; + + /** + * Determines whether a configuration proto is a new update, and if so, propagates it to all + * config providers associated with this subscription. + * @param config_proto supplies the newly received config proto. + * @param config_name supplies the name associated with the config. + * @param version_info supplies the version associated with the config. + * @return bool false when the config proto has no delta from the previous config, true otherwise. + */ + bool checkAndApplyConfigUpdate(const Protobuf::Message& config_proto, + const std::string& config_name, const std::string& version_info); +}; + +using ConfigSharedPtr = std::shared_ptr; + +/** + * Provides common subscription functionality required by ConfigProvider::ApiType::Delta DS APIs. + */ +class DeltaConfigSubscriptionInstance : public ConfigSubscriptionCommonBase { +protected: + DeltaConfigSubscriptionInstance(const std::string& name, const uint64_t manager_identifier, + ConfigProviderManagerImplBase& config_provider_manager, + TimeSource& time_source, const SystemTime& last_updated, + const LocalInfo::LocalInfo& local_info) + : ConfigSubscriptionCommonBase(name, manager_identifier, config_provider_manager, time_source, + last_updated, local_info) {} + + ~DeltaConfigSubscriptionInstance() override = default; + + /** + * Propagates a config update to the config providers and worker threads associated with the + * subscription. + * + * @param update_fn the callback to run on each worker thread. + */ + void applyDeltaConfigUpdate(const std::function& update_fn); +}; + +/** + * Provides generic functionality required by the ConfigProvider::ApiType specific dynamic config + * providers (see MutableConfigProviderBase and DeltaMutableConfigProviderBase). * * This class can not be instantiated directly; instead, it provides the foundation for * dynamic config provider implementations which derive from it. */ -class MutableConfigProviderImplBase : public ConfigProvider { +class MutableConfigProviderCommonBase : public ConfigProvider { public: - ~MutableConfigProviderImplBase() override { subscription_->unbindConfigProvider(this); } + ~MutableConfigProviderCommonBase() override { subscription_->unbindConfigProvider(this); } // Envoy::Config::ConfigProvider SystemTime lastUpdated() const override { return subscription_->lastUpdated(); } + ApiType apiType() const override { return api_type_; } + +protected: + MutableConfigProviderCommonBase(ConfigSubscriptionCommonBaseSharedPtr&& subscription, + Server::Configuration::FactoryContext& factory_context, + ApiType api_type) + : tls_(factory_context.threadLocal().allocateSlot()), subscription_(subscription), + api_type_(api_type) {} + + ThreadLocal::SlotPtr tls_; + ConfigSubscriptionCommonBaseSharedPtr subscription_; +private: + ApiType api_type_; +}; + +/** + * Provides common mutable (dynamic) config provider functionality required by + * ConfigProvider::ApiType::Full DS APIs. + */ +class MutableConfigProviderBase : public MutableConfigProviderCommonBase { +public: // Envoy::Config::ConfigProvider + // NOTE: This is being promoted to public for internal uses to avoid an unnecessary dynamic_cast + // in the public API (ConfigProvider::config()). ConfigConstSharedPtr getConfig() const override { return tls_->getTyped().config_; } @@ -289,16 +366,20 @@ class MutableConfigProviderImplBase : public ConfigProvider { * @param config supplies the newly instantiated config. */ void onConfigUpdate(const ConfigConstSharedPtr& config) { + if (getConfig() == config) { + return; + } tls_->runOnAllThreads( [this, config]() -> void { tls_->getTyped().config_ = config; }); } protected: - MutableConfigProviderImplBase(ConfigSubscriptionInstanceBaseSharedPtr&& subscription, - Server::Configuration::FactoryContext& factory_context) - : subscription_(subscription), tls_(factory_context.threadLocal().allocateSlot()) {} + MutableConfigProviderBase(ConfigSubscriptionCommonBaseSharedPtr&& subscription, + Server::Configuration::FactoryContext& factory_context, + ApiType api_type) + : MutableConfigProviderCommonBase(std::move(subscription), factory_context, api_type) {} - const ConfigSubscriptionInstanceBaseSharedPtr& subscription() const { return subscription_; } + ~MutableConfigProviderBase() override = default; private: struct ThreadLocalConfig : public ThreadLocal::ThreadLocalObject { @@ -307,9 +388,50 @@ class MutableConfigProviderImplBase : public ConfigProvider { ConfigProvider::ConfigConstSharedPtr config_; }; +}; - ConfigSubscriptionInstanceBaseSharedPtr subscription_; - ThreadLocal::SlotPtr tls_; +/** + * Provides common mutable (dynamic) config provider functionality required by + * ConfigProvider::ApiType::Delta DS APIs. + */ +class DeltaMutableConfigProviderBase : public MutableConfigProviderCommonBase { +public: + // Envoy::Config::ConfigProvider + // This promotes getConfig() to public so that internal uses can avoid an unnecessary dynamic_cast + // in the public API (ConfigProvider::config()). + ConfigConstSharedPtr getConfig() const override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } + + /** + * Non-const overload for use within the framework. + * @return ConfigSharedPtr the config implementation associated with the provider. + */ + virtual ConfigSharedPtr getConfig() PURE; + + /** + * Propagates a delta config update to all workers. + * @param updateCb the callback to run on each worker. + */ + void onConfigUpdate(Envoy::Event::PostCb update_cb) { + tls_->runOnAllThreads(std::move(update_cb)); + } + +protected: + DeltaMutableConfigProviderBase(ConfigSubscriptionCommonBaseSharedPtr&& subscription, + Server::Configuration::FactoryContext& factory_context, + ApiType api_type) + : MutableConfigProviderCommonBase(std::move(subscription), factory_context, api_type) {} + + ~DeltaMutableConfigProviderBase() override = default; + + /** + * Must be called by the derived class' constructor. + * @param initializeCb supplies the initialization callback to be issued for each worker + * thread. + */ + void initialize(ThreadLocal::Slot::InitializeCb initializeCb) { + subscription_->bindConfigProvider(this); + tls_->set(std::move(initializeCb)); + } }; /** @@ -317,9 +439,9 @@ class MutableConfigProviderImplBase : public ConfigProvider { * lifetime of subscriptions and dynamic config providers, along with determining which * subscriptions should be associated with newly instantiated providers. * - * The implementation of this class is not thread safe. Note that ImmutableConfigProviderImplBase - * and ConfigSubscriptionInstanceBase call the corresponding {bind,unbind}* functions exposed by - * this class. + * The implementation of this class is not thread safe. Note that ImmutableConfigProviderBase + * and ConfigSubscriptionCommonBase call the corresponding {bind,unbind}* functions exposed + * by this class. * * All config processing is done on the main thread, so instantiation of *ConfigProvider* objects * via createStaticConfigProvider() and createXdsConfigProvider() is naturally thread safe. Care @@ -342,18 +464,19 @@ class ConfigProviderManagerImplBase : public ConfigProviderManager, public Singl virtual ProtobufTypes::MessagePtr dumpConfigs() const PURE; protected: - using ConfigProviderSet = std::unordered_set; + // Ordered set for deterministic config dump output. + using ConfigProviderSet = std::set; using ConfigProviderMap = std::unordered_map, EnumClassHash>; using ConfigSubscriptionMap = - std::unordered_map>; + std::unordered_map>; ConfigProviderManagerImplBase(Server::Admin& admin, const std::string& config_name); const ConfigSubscriptionMap& configSubscriptions() const { return config_subscriptions_; } /** - * Returns the set of bound ImmutableConfigProviderImplBase-derived providers of a given type. + * Returns the set of bound ImmutableConfigProviderBase-derived providers of a given type. * @param type supplies the type of config providers to return. * @return const ConfigProviderSet* the set of config providers corresponding to the type. */ @@ -371,12 +494,12 @@ class ConfigProviderManagerImplBase : public ConfigProviderManager, public Singl template std::shared_ptr getSubscription(const Protobuf::Message& config_source_proto, Init::Manager& init_manager, - const std::function& subscription_factory_fn) { - static_assert(std::is_base_of::value, - "T must be a subclass of ConfigSubscriptionInstanceBase"); + static_assert(std::is_base_of::value, + "T must be a subclass of ConfigSubscriptionCommonBase"); - ConfigSubscriptionInstanceBaseSharedPtr subscription; + ConfigSubscriptionCommonBaseSharedPtr subscription; const uint64_t manager_identifier = MessageUtil::hash(config_source_proto); auto it = config_subscriptions_.find(manager_identifier); @@ -385,13 +508,12 @@ class ConfigProviderManagerImplBase : public ConfigProviderManager, public Singl // around it. However, since this is not a performance critical path we err on the side // of simplicity. subscription = subscription_factory_fn(manager_identifier, *this); - - subscription->registerInitTarget(init_manager); + init_manager.add(subscription->init_target_); bindSubscription(manager_identifier, subscription); } else { // Because the ConfigProviderManagerImplBase's weak_ptrs only get cleaned up - // in the ConfigSubscriptionInstanceBase destructor, and the single threaded nature + // in the ConfigSubscriptionCommonBase destructor, and the single threaded nature // of this code, locking the weak_ptr will not fail. subscription = it->second.lock(); } @@ -402,7 +524,7 @@ class ConfigProviderManagerImplBase : public ConfigProviderManager, public Singl private: void bindSubscription(const uint64_t manager_identifier, - ConfigSubscriptionInstanceBaseSharedPtr& subscription) { + ConfigSubscriptionCommonBaseSharedPtr& subscription) { config_subscriptions_.insert({manager_identifier, subscription}); } @@ -410,8 +532,8 @@ class ConfigProviderManagerImplBase : public ConfigProviderManager, public Singl config_subscriptions_.erase(manager_identifier); } - void bindImmutableConfigProvider(ImmutableConfigProviderImplBase* provider); - void unbindImmutableConfigProvider(ImmutableConfigProviderImplBase* provider); + void bindImmutableConfigProvider(ImmutableConfigProviderBase* provider); + void unbindImmutableConfigProvider(ImmutableConfigProviderBase* provider); // TODO(jsedgwick) These two members are prime candidates for the owned-entry list/map // as in ConfigTracker. I.e. the ProviderImpls would have an EntryOwner for these lists @@ -421,10 +543,10 @@ class ConfigProviderManagerImplBase : public ConfigProviderManager, public Singl Server::ConfigTracker::EntryOwnerPtr config_tracker_entry_; - // See comment for friend classes in the ConfigSubscriptionInstanceBase for more details on the - // use of friends. - friend class ConfigSubscriptionInstanceBase; - friend class ImmutableConfigProviderImplBase; + // See comment for friend classes in the ConfigSubscriptionCommonBase for more details on + // the use of friends. + friend class ConfigSubscriptionCommonBase; + friend class ImmutableConfigProviderBase; }; } // namespace Config diff --git a/source/common/config/delta_subscription_impl.cc b/source/common/config/delta_subscription_impl.cc new file mode 100644 index 0000000000000..e3d285ec80106 --- /dev/null +++ b/source/common/config/delta_subscription_impl.cc @@ -0,0 +1,108 @@ +#include "common/config/delta_subscription_impl.h" + +#include "common/common/assert.h" +#include "common/common/backoff_strategy.h" +#include "common/common/token_bucket_impl.h" +#include "common/config/utility.h" +#include "common/protobuf/protobuf.h" +#include "common/protobuf/utility.h" + +namespace Envoy { +namespace Config { + +DeltaSubscriptionImpl::DeltaSubscriptionImpl( + const LocalInfo::LocalInfo& local_info, Grpc::AsyncClientPtr async_client, + Event::Dispatcher& dispatcher, const Protobuf::MethodDescriptor& service_method, + absl::string_view type_url, Runtime::RandomGenerator& random, Stats::Scope& scope, + const RateLimitSettings& rate_limit_settings, SubscriptionStats stats, + std::chrono::milliseconds init_fetch_timeout) + : grpc_stream_(this, std::move(async_client), service_method, random, dispatcher, scope, + rate_limit_settings), + type_url_(type_url), local_info_(local_info), stats_(stats), dispatcher_(dispatcher), + init_fetch_timeout_(init_fetch_timeout) {} + +void DeltaSubscriptionImpl::pause() { state_->pause(); } +void DeltaSubscriptionImpl::resume() { + state_->resume(); + trySendDiscoveryRequests(); +} + +// Config::Subscription +void DeltaSubscriptionImpl::start(const std::set& resources, + SubscriptionCallbacks& callbacks) { + state_ = std::make_unique(type_url_, resources, callbacks, local_info_, + init_fetch_timeout_, dispatcher_, stats_); + grpc_stream_.establishNewStream(); + updateResources(resources); +} + +void DeltaSubscriptionImpl::updateResources(const std::set& update_to_these_names) { + state_->updateResourceInterest(update_to_these_names); + // Tell the server about our new interests, if there are any. + trySendDiscoveryRequests(); +} + +// Config::GrpcStreamCallbacks +void DeltaSubscriptionImpl::onStreamEstablished() { + state_->markStreamFresh(); + trySendDiscoveryRequests(); +} + +void DeltaSubscriptionImpl::onEstablishmentFailure() { state_->handleEstablishmentFailure(); } + +void DeltaSubscriptionImpl::onDiscoveryResponse( + std::unique_ptr&& message) { + ENVOY_LOG(debug, "Received gRPC message for {} at version {}", type_url_, + message->system_version_info()); + kickOffAck(state_->handleResponse(*message)); +} + +void DeltaSubscriptionImpl::onWriteable() { trySendDiscoveryRequests(); } + +void DeltaSubscriptionImpl::kickOffAck(UpdateAck ack) { + ack_queue_.push(ack); + trySendDiscoveryRequests(); +} + +// Checks whether external conditions allow sending a DeltaDiscoveryRequest. (Does not check +// whether we *want* to send a DeltaDiscoveryRequest). +bool DeltaSubscriptionImpl::canSendDiscoveryRequest() { + if (state_->paused()) { + ENVOY_LOG(trace, "API {} paused; discovery request on hold for now.", type_url_); + return false; + } else if (!grpc_stream_.grpcStreamAvailable()) { + ENVOY_LOG(trace, "No stream available to send a DiscoveryRequest for {}.", type_url_); + return false; + } else if (!grpc_stream_.checkRateLimitAllowsDrain()) { + ENVOY_LOG(trace, "{} DiscoveryRequest hit rate limit; will try later.", type_url_); + return false; + } + return true; +} + +// Checks whether we have something to say in a DeltaDiscoveryRequest, which can be an ACK and/or +// a subscription update. (Does not check whether we *can* send a DeltaDiscoveryRequest). +bool DeltaSubscriptionImpl::wantToSendDiscoveryRequest() { + return !ack_queue_.empty() || state_->subscriptionUpdatePending(); +} + +void DeltaSubscriptionImpl::trySendDiscoveryRequests() { + while (wantToSendDiscoveryRequest() && canSendDiscoveryRequest()) { + envoy::api::v2::DeltaDiscoveryRequest request = state_->getNextRequest(); + if (!ack_queue_.empty()) { + const UpdateAck& ack = ack_queue_.front(); + request.set_response_nonce(ack.nonce_); + if (ack.error_detail_.code() != Grpc::Status::GrpcStatus::Ok) { + // Don't needlessly make the field present-but-empty if status is ok. + request.mutable_error_detail()->CopyFrom(ack.error_detail_); + } + ack_queue_.pop(); + } + ENVOY_LOG(trace, "Sending DiscoveryRequest for {}: {}", type_url_, request.DebugString()); + grpc_stream_.sendMessage(request); + } + grpc_stream_.maybeUpdateQueueSizeStat(ack_queue_.size()); +} + +} // namespace Config +} // namespace Envoy diff --git a/source/common/config/delta_subscription_impl.h b/source/common/config/delta_subscription_impl.h new file mode 100644 index 0000000000000..846de51d9eefb --- /dev/null +++ b/source/common/config/delta_subscription_impl.h @@ -0,0 +1,91 @@ +#pragma once + +#include + +#include "envoy/api/v2/discovery.pb.h" +#include "envoy/common/token_bucket.h" +#include "envoy/config/subscription.h" +#include "envoy/config/xds_grpc_context.h" +#include "envoy/local_info/local_info.h" + +#include "common/common/logger.h" +#include "common/config/delta_subscription_state.h" +#include "common/config/grpc_stream.h" +#include "common/grpc/common.h" + +namespace Envoy { +namespace Config { + +/** + * Manages the logic of a (non-aggregated) delta xDS subscription. + * TODO(fredlas) add aggregation support. The plan is for that to happen in XdsGrpcContext, + * which this class will then "have a" rather than "be a". + */ +class DeltaSubscriptionImpl : public Subscription, + public GrpcStreamCallbacks, + public Logger::Loggable { +public: + DeltaSubscriptionImpl(const LocalInfo::LocalInfo& local_info, Grpc::AsyncClientPtr async_client, + Event::Dispatcher& dispatcher, + const Protobuf::MethodDescriptor& service_method, + absl::string_view type_url, Runtime::RandomGenerator& random, + Stats::Scope& scope, const RateLimitSettings& rate_limit_settings, + SubscriptionStats stats, std::chrono::milliseconds init_fetch_timeout); + + void pause(); + void resume(); + + // Config::Subscription + void start(const std::set& resources, SubscriptionCallbacks& callbacks) override; + void updateResources(const std::set& update_to_these_names) override; + + // Config::GrpcStreamCallbacks + void onStreamEstablished() override; + void onEstablishmentFailure() override; + void + onDiscoveryResponse(std::unique_ptr&& message) override; + + void onWriteable() override; + +private: + void kickOffAck(UpdateAck ack); + + // Checks whether external conditions allow sending a DeltaDiscoveryRequest. (Does not check + // whether we *want* to send a DeltaDiscoveryRequest). + bool canSendDiscoveryRequest(); + + // Checks whether we have something to say in a DeltaDiscoveryRequest, which can be an ACK and/or + // a subscription update. (Does not check whether we *can* send a DeltaDiscoveryRequest). + bool wantToSendDiscoveryRequest(); + + void trySendDiscoveryRequests(); + + GrpcStream + grpc_stream_; + + const std::string type_url_; + + // An item in the queue represents a DeltaDiscoveryRequest that must be sent. If an item is not + // empty, it is the ACK (nonce + error_detail) to set on that request. An empty entry should + // still send a request; it just won't have an ACK. + // + // More details: DeltaDiscoveryRequest plays two independent roles: + // 1) informing the server of what resources we're interested in, and + // 2) acknowledging resources the server has sent us. + // Each entry in this queue was added for exactly one of those purposes, but since the + // subscription interest is tracked separately, in a non-queue way, subscription changes can get + // mixed in with an ACK request. In that case, the entry that the subscription change originally + // queued up *does* still get sent, just empty and pointless. (TODO(fredlas) we would like to skip + // those no-op requests). + std::queue ack_queue_; + + const LocalInfo::LocalInfo& local_info_; + SubscriptionStats stats_; + Event::Dispatcher& dispatcher_; + std::chrono::milliseconds init_fetch_timeout_; + + std::unique_ptr state_; +}; + +} // namespace Config +} // namespace Envoy diff --git a/source/common/config/delta_subscription_state.cc b/source/common/config/delta_subscription_state.cc new file mode 100644 index 0000000000000..c709f785d5449 --- /dev/null +++ b/source/common/config/delta_subscription_state.cc @@ -0,0 +1,200 @@ +#include "common/config/delta_subscription_state.h" + +#include "common/common/assert.h" + +namespace Envoy { +namespace Config { + +DeltaSubscriptionState::DeltaSubscriptionState(const std::string& type_url, + const std::set& resource_names, + SubscriptionCallbacks& callbacks, + const LocalInfo::LocalInfo& local_info, + std::chrono::milliseconds init_fetch_timeout, + Event::Dispatcher& dispatcher, + SubscriptionStats& stats) + : type_url_(type_url), callbacks_(callbacks), local_info_(local_info), + init_fetch_timeout_(init_fetch_timeout), stats_(stats) { + // In normal usage of updateResourceInterest(), the caller is supposed to cause a discovery + // request to be queued if it returns true. We don't need to do that because we know that the + // subscription gRPC stream is not yet established, and establishment causes a request. + updateResourceInterest(resource_names); + setInitFetchTimeout(dispatcher); +} + +void DeltaSubscriptionState::setInitFetchTimeout(Event::Dispatcher& dispatcher) { + if (init_fetch_timeout_.count() > 0 && !init_fetch_timeout_timer_) { + init_fetch_timeout_timer_ = dispatcher.createTimer([this]() -> void { + ENVOY_LOG(warn, "delta config: initial fetch timed out for {}", type_url_); + callbacks_.onConfigUpdateFailed(nullptr); + }); + init_fetch_timeout_timer_->enableTimer(init_fetch_timeout_); + } +} + +void DeltaSubscriptionState::pause() { + ENVOY_LOG(debug, "Pausing discovery requests for {}", type_url_); + ASSERT(!paused_); + paused_ = true; +} + +void DeltaSubscriptionState::resume() { + ENVOY_LOG(debug, "Resuming discovery requests for {}", type_url_); + ASSERT(paused_); + paused_ = false; +} + +// Returns true if there is any meaningful change in our subscription interest, worth reporting to +// the server. +void DeltaSubscriptionState::updateResourceInterest( + const std::set& update_to_these_names) { + std::vector cur_added; + std::vector cur_removed; + + std::set_difference(update_to_these_names.begin(), update_to_these_names.end(), + resource_names_.begin(), resource_names_.end(), + std::inserter(cur_added, cur_added.begin())); + std::set_difference(resource_names_.begin(), resource_names_.end(), update_to_these_names.begin(), + update_to_these_names.end(), std::inserter(cur_removed, cur_removed.begin())); + + for (const auto& a : cur_added) { + setResourceWaitingForServer(a); + // Removed->added requires us to keep track of it as a "new" addition, since our user may have + // forgotten its copy of the resource after instructing us to remove it, and so needs to be + // reminded of it. + names_removed_.erase(a); + names_added_.insert(a); + } + for (const auto& r : cur_removed) { + setLostInterestInResource(r); + // Ideally, when a resource is added-then-removed in between requests, we would avoid putting + // a superfluous "unsubscribe [resource that was never subscribed]" in the request. However, + // the removed-then-added case *does* need to go in the request, and due to how we accomplish + // that, it's difficult to distinguish remove-add-remove from add-remove (because "remove-add" + // has to be treated as equivalent to just "add"). + names_added_.erase(r); + names_removed_.insert(r); + } +} + +// Not having sent any requests yet counts as an "update pending" since you're supposed to resend +// the entirety of your interest at the start of a stream, even if nothing has changed. +bool DeltaSubscriptionState::subscriptionUpdatePending() const { + return !names_added_.empty() || !names_removed_.empty() || + !any_request_sent_yet_in_current_stream_; +} + +UpdateAck +DeltaSubscriptionState::handleResponse(const envoy::api::v2::DeltaDiscoveryResponse& message) { + // We *always* copy the response's nonce into the next request, even if we're going to make that + // request a NACK by setting error_detail. + UpdateAck ack(message.nonce()); + stats_.update_attempt_.inc(); + try { + handleGoodResponse(message); + } catch (const EnvoyException& e) { + handleBadResponse(e, ack); + } + return ack; +} + +void DeltaSubscriptionState::handleGoodResponse( + const envoy::api::v2::DeltaDiscoveryResponse& message) { + disableInitFetchTimeoutTimer(); + callbacks_.onConfigUpdate(message.resources(), message.removed_resources(), + message.system_version_info()); + for (const auto& resource : message.resources()) { + setResourceVersion(resource.name(), resource.version()); + } + // If a resource is gone, there is no longer a meaningful version for it that makes sense to + // provide to the server upon stream reconnect: either it will continue to not exist, in which + // case saying nothing is fine, or the server will bring back something new, which we should + // receive regardless (which is the logic that not specifying a version will get you). + // + // So, leave the version map entry present but blank. It will be left out of + // initial_resource_versions messages, but will remind us to explicitly tell the server "I'm + // cancelling my subscription" when we lose interest. + for (const auto& resource_name : message.removed_resources()) { + if (resource_names_.find(resource_name) != resource_names_.end()) { + setResourceWaitingForServer(resource_name); + } + } + stats_.update_success_.inc(); + stats_.version_.set(HashUtil::xxHash64(message.system_version_info())); + ENVOY_LOG(debug, "Delta config for {} accepted with {} resources added, {} removed", type_url_, + message.resources().size(), message.removed_resources().size()); +} + +void DeltaSubscriptionState::handleBadResponse(const EnvoyException& e, UpdateAck& ack) { + // Note that error_detail being set is what indicates that a DeltaDiscoveryRequest is a NACK. + ack.error_detail_.set_code(Grpc::Status::GrpcStatus::Internal); + ack.error_detail_.set_message(e.what()); + disableInitFetchTimeoutTimer(); + stats_.update_rejected_.inc(); + ENVOY_LOG(warn, "delta config for {} rejected: {}", type_url_, e.what()); + callbacks_.onConfigUpdateFailed(&e); +} + +void DeltaSubscriptionState::handleEstablishmentFailure() { + disableInitFetchTimeoutTimer(); + stats_.update_failure_.inc(); + stats_.update_attempt_.inc(); + callbacks_.onConfigUpdateFailed(nullptr); +} + +envoy::api::v2::DeltaDiscoveryRequest DeltaSubscriptionState::getNextRequest() { + envoy::api::v2::DeltaDiscoveryRequest request; + if (!any_request_sent_yet_in_current_stream_) { + any_request_sent_yet_in_current_stream_ = true; + // initial_resource_versions "must be populated for first request in a stream". + // Also, since this might be a new server, we must explicitly state *all* of our subscription + // interest. + for (auto const& resource : resource_versions_) { + // Populate initial_resource_versions with the resource versions we currently have. + // Resources we are interested in, but are still waiting to get any version of from the + // server, do not belong in initial_resource_versions. (But do belong in new subscriptions!) + if (!resource.second.waitingForServer()) { + (*request.mutable_initial_resource_versions())[resource.first] = resource.second.version(); + } + // As mentioned above, fill resource_names_subscribe with everything, including names we + // have yet to receive any resource for. + names_added_.insert(resource.first); + } + names_removed_.clear(); + } + std::copy(names_added_.begin(), names_added_.end(), + Protobuf::RepeatedFieldBackInserter(request.mutable_resource_names_subscribe())); + std::copy(names_removed_.begin(), names_removed_.end(), + Protobuf::RepeatedFieldBackInserter(request.mutable_resource_names_unsubscribe())); + names_added_.clear(); + names_removed_.clear(); + + request.set_type_url(type_url_); + request.mutable_node()->MergeFrom(local_info_.node()); + return request; +} + +void DeltaSubscriptionState::disableInitFetchTimeoutTimer() { + if (init_fetch_timeout_timer_) { + init_fetch_timeout_timer_->disableTimer(); + init_fetch_timeout_timer_.reset(); + } +} + +void DeltaSubscriptionState::setResourceVersion(const std::string& resource_name, + const std::string& resource_version) { + resource_versions_[resource_name] = ResourceVersion(resource_version); + resource_names_.insert(resource_name); +} + +void DeltaSubscriptionState::setResourceWaitingForServer(const std::string& resource_name) { + resource_versions_[resource_name] = ResourceVersion(); + resource_names_.insert(resource_name); +} + +void DeltaSubscriptionState::setLostInterestInResource(const std::string& resource_name) { + resource_versions_.erase(resource_name); + resource_names_.erase(resource_name); +} + +} // namespace Config +} // namespace Envoy diff --git a/source/common/config/delta_subscription_state.h b/source/common/config/delta_subscription_state.h new file mode 100644 index 0000000000000..5fbb6f79f5a19 --- /dev/null +++ b/source/common/config/delta_subscription_state.h @@ -0,0 +1,107 @@ +#pragma once + +#include "envoy/api/v2/discovery.pb.h" +#include "envoy/config/subscription.h" +#include "envoy/event/dispatcher.h" +#include "envoy/grpc/status.h" +#include "envoy/local_info/local_info.h" + +#include "common/common/assert.h" +#include "common/common/hash.h" +#include "common/common/logger.h" + +namespace Envoy { +namespace Config { + +struct UpdateAck { + UpdateAck(absl::string_view nonce) : nonce_(nonce) {} + std::string nonce_; + ::google::rpc::Status error_detail_; +}; + +// Tracks the xDS protocol state of an individual ongoing delta xDS session. +class DeltaSubscriptionState : public Logger::Loggable { +public: + DeltaSubscriptionState(const std::string& type_url, const std::set& resource_names, + SubscriptionCallbacks& callbacks, const LocalInfo::LocalInfo& local_info, + std::chrono::milliseconds init_fetch_timeout, + Event::Dispatcher& dispatcher, SubscriptionStats& stats); + + void setInitFetchTimeout(Event::Dispatcher& dispatcher); + + void pause(); + void resume(); + bool paused() const { return paused_; } + + // Update which resources we're interested in subscribing to. + void updateResourceInterest(const std::set& update_to_these_names); + + // Whether there was a change in our subscription interest we have yet to inform the server of. + bool subscriptionUpdatePending() const; + + void markStreamFresh() { any_request_sent_yet_in_current_stream_ = false; } + + UpdateAck handleResponse(const envoy::api::v2::DeltaDiscoveryResponse& message); + + void handleEstablishmentFailure(); + + envoy::api::v2::DeltaDiscoveryRequest getNextRequest(); + +private: + void handleGoodResponse(const envoy::api::v2::DeltaDiscoveryResponse& message); + void handleBadResponse(const EnvoyException& e, UpdateAck& ack); + void disableInitFetchTimeoutTimer(); + + class ResourceVersion { + public: + explicit ResourceVersion(absl::string_view version) : version_(version) {} + // Builds a ResourceVersion in the waitingForServer state. + ResourceVersion() {} + + // If true, we currently have no version of this resource - we are waiting for the server to + // provide us with one. + bool waitingForServer() const { return version_ == absl::nullopt; } + // Must not be called if waitingForServer() == true. + std::string version() const { + ASSERT(version_.has_value()); + return version_.value_or(""); + } + + private: + absl::optional version_; + }; + + // Use these helpers to ensure resource_versions_ and resource_names_ get updated together. + void setResourceVersion(const std::string& resource_name, const std::string& resource_version); + void setResourceWaitingForServer(const std::string& resource_name); + void setLostInterestInResource(const std::string& resource_name); + + // A map from resource name to per-resource version. The keys of this map are exactly the resource + // names we are currently interested in. Those in the waitingForServer state currently don't have + // any version for that resource: we need to inform the server if we lose interest in them, but we + // also need to *not* include them in the initial_resource_versions map upon a reconnect. + std::unordered_map resource_versions_; + // The keys of resource_versions_. Only tracked separately because std::map does not provide an + // iterator into just its keys, e.g. for use in std::set_difference. + std::set resource_names_; + + const std::string type_url_; + SubscriptionCallbacks& callbacks_; + const LocalInfo::LocalInfo& local_info_; + std::chrono::milliseconds init_fetch_timeout_; + Event::TimerPtr init_fetch_timeout_timer_; + + bool paused_{}; + bool any_request_sent_yet_in_current_stream_{}; + + // Tracks changes in our subscription interest since the previous DeltaDiscoveryRequest we sent. + // Can't use unordered_set due to ordering issues in gTest expectation matching. + // Feel free to change to unordered if you can figure out how to make it work. + std::set names_added_; + std::set names_removed_; + + SubscriptionStats& stats_; +}; + +} // namespace Config +} // namespace Envoy diff --git a/source/common/config/filesystem_subscription_impl.cc b/source/common/config/filesystem_subscription_impl.cc new file mode 100644 index 0000000000000..fa9c1c42aae4f --- /dev/null +++ b/source/common/config/filesystem_subscription_impl.cc @@ -0,0 +1,66 @@ +#include "common/config/filesystem_subscription_impl.h" + +#include "common/common/macros.h" +#include "common/config/utility.h" +#include "common/protobuf/protobuf.h" +#include "common/protobuf/utility.h" + +namespace Envoy { +namespace Config { + +FilesystemSubscriptionImpl::FilesystemSubscriptionImpl(Event::Dispatcher& dispatcher, + absl::string_view path, + SubscriptionStats stats, Api::Api& api) + : path_(path), watcher_(dispatcher.createFilesystemWatcher()), stats_(stats), api_(api) { + watcher_->addWatch(path_, Filesystem::Watcher::Events::MovedTo, [this](uint32_t events) { + UNREFERENCED_PARAMETER(events); + if (started_) { + refresh(); + } + }); +} + +// Config::Subscription +void FilesystemSubscriptionImpl::start(const std::set& resources, + Config::SubscriptionCallbacks& callbacks) { + // We report all discovered resources in the watched file. + UNREFERENCED_PARAMETER(resources); + callbacks_ = &callbacks; + started_ = true; + // Attempt to read in case there is a file there already. + refresh(); +} + +void FilesystemSubscriptionImpl::updateResources(const std::set& resources) { + // We report all discovered resources in the watched file. + UNREFERENCED_PARAMETER(resources); + // Bump stats for consistence behavior with other xDS. + stats_.update_attempt_.inc(); +} + +void FilesystemSubscriptionImpl::refresh() { + ENVOY_LOG(debug, "Filesystem config refresh for {}", path_); + stats_.update_attempt_.inc(); + bool config_update_available = false; + try { + envoy::api::v2::DiscoveryResponse message; + MessageUtil::loadFromFile(path_, message, api_); + config_update_available = true; + callbacks_->onConfigUpdate(message.resources(), message.version_info()); + stats_.version_.set(HashUtil::xxHash64(message.version_info())); + stats_.update_success_.inc(); + ENVOY_LOG(debug, "Filesystem config update accepted for {}: {}", path_, message.DebugString()); + } catch (const EnvoyException& e) { + if (config_update_available) { + ENVOY_LOG(warn, "Filesystem config update rejected: {}", e.what()); + stats_.update_rejected_.inc(); + } else { + ENVOY_LOG(warn, "Filesystem config update failure: {}", e.what()); + stats_.update_failure_.inc(); + } + callbacks_->onConfigUpdateFailed(&e); + } +} + +} // namespace Config +} // namespace Envoy diff --git a/source/common/config/filesystem_subscription_impl.h b/source/common/config/filesystem_subscription_impl.h index 27b7540d27f3c..87249fc273aad 100644 --- a/source/common/config/filesystem_subscription_impl.h +++ b/source/common/config/filesystem_subscription_impl.h @@ -7,10 +7,6 @@ #include "envoy/filesystem/filesystem.h" #include "common/common/logger.h" -#include "common/common/macros.h" -#include "common/config/utility.h" -#include "common/protobuf/protobuf.h" -#include "common/protobuf/utility.h" namespace Envoy { namespace Config { @@ -18,72 +14,27 @@ namespace Config { /** * Filesystem inotify implementation of the API Subscription interface. This allows the API to be * consumed on filesystem changes to files containing the JSON canonical representation of - * lists of ResourceType. + * lists of xDS resources. */ -template -class FilesystemSubscriptionImpl : public Config::Subscription, +class FilesystemSubscriptionImpl : public Config::Subscription, Logger::Loggable { public: - FilesystemSubscriptionImpl(Event::Dispatcher& dispatcher, const std::string& path, - SubscriptionStats stats, Api::Api& api) - : path_(path), watcher_(dispatcher.createFilesystemWatcher()), stats_(stats), api_(api) { - watcher_->addWatch(path_, Filesystem::Watcher::Events::MovedTo, [this](uint32_t events) { - UNREFERENCED_PARAMETER(events); - if (started_) { - refresh(); - } - }); - } + FilesystemSubscriptionImpl(Event::Dispatcher& dispatcher, absl::string_view path, + SubscriptionStats stats, Api::Api& api); // Config::Subscription - void start(const std::vector& resources, - Config::SubscriptionCallbacks& callbacks) override { - // We report all discovered resources in the watched file. - UNREFERENCED_PARAMETER(resources); - callbacks_ = &callbacks; - started_ = true; - // Attempt to read in case there is a file there already. - refresh(); - } + void start(const std::set& resources, + Config::SubscriptionCallbacks& callbacks) override; - void updateResources(const std::vector& resources) override { - // We report all discovered resources in the watched file. - UNREFERENCED_PARAMETER(resources); - // Bump stats for consistence behavior with other xDS. - stats_.update_attempt_.inc(); - } + void updateResources(const std::set& resources) override; private: - void refresh() { - ENVOY_LOG(debug, "Filesystem config refresh for {}", path_); - stats_.update_attempt_.inc(); - bool config_update_available = false; - try { - envoy::api::v2::DiscoveryResponse message; - MessageUtil::loadFromFile(path_, message, api_); - const auto typed_resources = Config::Utility::getTypedResources(message); - config_update_available = true; - callbacks_->onConfigUpdate(typed_resources, message.version_info()); - stats_.version_.set(HashUtil::xxHash64(message.version_info())); - stats_.update_success_.inc(); - ENVOY_LOG(debug, "Filesystem config update accepted for {}: {}", path_, - message.DebugString()); - } catch (const EnvoyException& e) { - if (config_update_available) { - ENVOY_LOG(warn, "Filesystem config update rejected: {}", e.what()); - stats_.update_rejected_.inc(); - } else { - ENVOY_LOG(warn, "Filesystem config update failure: {}", e.what()); - stats_.update_failure_.inc(); - } - callbacks_->onConfigUpdateFailed(&e); - } - } + void refresh(); bool started_{}; const std::string path_; std::unique_ptr watcher_; - SubscriptionCallbacks* callbacks_{}; + SubscriptionCallbacks* callbacks_{}; SubscriptionStats stats_; Api::Api& api_; }; diff --git a/source/common/config/filter_json.cc b/source/common/config/filter_json.cc index 1ee3c2c1f7215..f46cf38275cbb 100644 --- a/source/common/config/filter_json.cc +++ b/source/common/config/filter_json.cc @@ -1,7 +1,6 @@ #include "common/config/filter_json.h" #include "envoy/config/accesslog/v2/file.pb.h" -#include "envoy/stats/stats_options.h" #include "common/common/assert.h" #include "common/common/utility.h" @@ -127,8 +126,7 @@ void FilterJson::translateAccessLog(const Json::Object& json_config, void FilterJson::translateHttpConnectionManager( const Json::Object& json_config, envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager& - proto_config, - const Stats::StatsOptions& stats_options) { + proto_config) { json_config.validateSchema(Json::Schema::HTTP_CONN_NETWORK_FILTER_SCHEMA); envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager::CodecType @@ -140,8 +138,7 @@ void FilterJson::translateHttpConnectionManager( JSON_UTIL_SET_STRING(json_config, proto_config, stat_prefix); if (json_config.hasObject("rds")) { - Utility::translateRdsConfig(*json_config.getObject("rds"), *proto_config.mutable_rds(), - stats_options); + Utility::translateRdsConfig(*json_config.getObject("rds"), *proto_config.mutable_rds()); } if (json_config.hasObject("route_config")) { if (json_config.hasObject("rds")) { @@ -149,7 +146,7 @@ void FilterJson::translateHttpConnectionManager( "http connection manager must have either rds or route_config but not both"); } RdsJson::translateRouteConfiguration(*json_config.getObject("route_config"), - *proto_config.mutable_route_config(), stats_options); + *proto_config.mutable_route_config()); } for (const auto& json_filter : json_config.getObjectArray("filters", true)) { @@ -164,10 +161,8 @@ void FilterJson::translateHttpConnectionManager( "{\"deprecated_v1\": true, \"value\": " + json_filter->getObject("config")->asJsonString() + "}"; - const auto status = - Protobuf::util::JsonStringToMessage(deprecated_config, filter->mutable_config()); // JSON schema has already validated that this is a valid JSON object. - ASSERT(status.ok()); + MessageUtil::loadFromJson(deprecated_config, *filter->mutable_config()); } JSON_UTIL_SET_BOOL(json_config, proto_config, add_user_agent); @@ -254,7 +249,6 @@ void FilterJson::translateMongoProxy( auto* delay = proto_config.mutable_delay(); auto* percentage = delay->mutable_percentage(); - delay->set_type(envoy::config::filter::fault::v2::FaultDelay::FIXED); percentage->set_numerator(static_cast(json_fault->getInteger("percent"))); percentage->set_denominator(envoy::type::FractionalPercent::HUNDRED); JSON_UTIL_SET_DURATION_FROM_FIELD(*json_fault, *delay, fixed_delay, duration); @@ -284,7 +278,6 @@ void FilterJson::translateFaultFilter( if (!json_config_delay->empty()) { auto* delay = proto_config.mutable_delay(); auto* percentage = delay->mutable_percentage(); - delay->set_type(envoy::config::filter::fault::v2::FaultDelay::FIXED); percentage->set_numerator( static_cast(json_config_delay->getInteger("fixed_delay_percent"))); percentage->set_denominator(envoy::type::FractionalPercent::HUNDRED); diff --git a/source/common/config/filter_json.h b/source/common/config/filter_json.h index 1b59317ff2925..73ca0f7085dc3 100644 --- a/source/common/config/filter_json.h +++ b/source/common/config/filter_json.h @@ -15,7 +15,6 @@ #include "envoy/config/filter/network/redis_proxy/v2/redis_proxy.pb.h" #include "envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.pb.h" #include "envoy/json/json_object.h" -#include "envoy/stats/stats_options.h" namespace Envoy { namespace Config { @@ -50,8 +49,7 @@ class FilterJson { static void translateHttpConnectionManager( const Json::Object& json_config, envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager& - proto_config, - const Stats::StatsOptions& stats_options); + proto_config); /** * Translate a v1 JSON Redis proxy object to v2 diff --git a/source/common/config/grpc_mux_impl.cc b/source/common/config/grpc_mux_impl.cc index 1ea87a07c2643..fc654d7e1c1d6 100644 --- a/source/common/config/grpc_mux_impl.cc +++ b/source/common/config/grpc_mux_impl.cc @@ -13,8 +13,9 @@ GrpcMuxImpl::GrpcMuxImpl(const LocalInfo::LocalInfo& local_info, Grpc::AsyncClie const Protobuf::MethodDescriptor& service_method, Runtime::RandomGenerator& random, Stats::Scope& scope, const RateLimitSettings& rate_limit_settings) - : GrpcStream( - std::move(async_client), service_method, random, dispatcher, scope, rate_limit_settings), + : grpc_stream_(this, std::move(async_client), service_method, random, dispatcher, scope, + rate_limit_settings), + local_info_(local_info) { Config::Utility::checkLocalInfo("ads", local_info); } @@ -27,10 +28,10 @@ GrpcMuxImpl::~GrpcMuxImpl() { } } -void GrpcMuxImpl::start() { establishNewStream(); } +void GrpcMuxImpl::start() { grpc_stream_.establishNewStream(); } void GrpcMuxImpl::sendDiscoveryRequest(const std::string& type_url) { - if (!grpcStreamAvailable()) { + if (!grpc_stream_.grpcStreamAvailable()) { ENVOY_LOG(debug, "No stream available to sendDiscoveryRequest for {}", type_url); return; // Drop this request; the reconnect will enqueue a new one. } @@ -57,7 +58,7 @@ void GrpcMuxImpl::sendDiscoveryRequest(const std::string& type_url) { } ENVOY_LOG(trace, "Sending DiscoveryRequest for {}: {}", type_url, request.DebugString()); - sendMessage(request); + grpc_stream_.sendMessage(request); // clear error_detail after the request is sent if it exists. if (api_state_[type_url].request_.has_error_detail()) { @@ -66,7 +67,7 @@ void GrpcMuxImpl::sendDiscoveryRequest(const std::string& type_url) { } GrpcMuxWatchPtr GrpcMuxImpl::subscribe(const std::string& type_url, - const std::vector& resources, + const std::set& resources, GrpcMuxCallbacks& callbacks) { auto watch = std::unique_ptr(new GrpcMuxWatchImpl(resources, callbacks, type_url, *this)); @@ -113,29 +114,30 @@ void GrpcMuxImpl::resume(const std::string& type_url) { } } -void GrpcMuxImpl::handleResponse(std::unique_ptr&& message) { +void GrpcMuxImpl::onDiscoveryResponse( + std::unique_ptr&& message) { const std::string& type_url = message->type_url(); ENVOY_LOG(debug, "Received gRPC message for {} at version {}", type_url, message->version_info()); if (api_state_.count(type_url) == 0) { ENVOY_LOG(warn, "Ignoring the message for type URL {} as it has no current subscribers.", type_url); - // TODO(yuval-k): This should never happen. consider dropping the stream as this is a protocol - // violation + // TODO(yuval-k): This should never happen. consider dropping the stream as this is a + // protocol violation return; } if (api_state_[type_url].watches_.empty()) { // update the nonce as we are processing this response. api_state_[type_url].request_.set_response_nonce(message->nonce()); if (message->resources().empty()) { - // No watches and no resources. This can happen when envoy unregisters from a resource - // that's removed from the server as well. For example, a deleted cluster triggers un-watching - // the ClusterLoadAssignment watch, and at the same time the xDS server sends an empty list of - // ClusterLoadAssignment resources. we'll accept this update. no need to send a discovery - // request, as we don't watch for anything. + // No watches and no resources. This can happen when envoy unregisters from a + // resource that's removed from the server as well. For example, a deleted cluster + // triggers un-watching the ClusterLoadAssignment watch, and at the same time the + // xDS server sends an empty list of ClusterLoadAssignment resources. we'll accept + // this update. no need to send a discovery request, as we don't watch for anything. api_state_[type_url].request_.set_version_info(message->version_info()); } else { - // No watches and we have resources - this should not happen. send a NACK (by not updating - // the version). + // No watches and we have resources - this should not happen. send a NACK (by not + // updating the version). ENVOY_LOG(warn, "Ignoring unwatched type URL {}", type_url); queueDiscoveryRequest(type_url); } @@ -157,6 +159,9 @@ void GrpcMuxImpl::handleResponse(std::unique_ptrresources_.empty()) { watch->callbacks_.onConfigUpdate(message->resources(), message->version_info()); continue; @@ -168,10 +173,14 @@ void GrpcMuxImpl::handleResponse(std::unique_ptrMergeFrom(it->second); } } - watch->callbacks_.onConfigUpdate(found_resources, message->version_info()); + // onConfigUpdate should be called only on watches(clusters/routes) that have + // updates in the message for EDS/RDS. + if (!found_resources.empty()) { + watch->callbacks_.onConfigUpdate(found_resources, message->version_info()); + } } - // TODO(mattklein123): In the future if we start tracking per-resource versions, we would do - // that tracking here. + // TODO(mattklein123): In the future if we start tracking per-resource versions, we + // would do that tracking here. api_state_[type_url].request_.set_version_info(message->version_info()); } catch (const EnvoyException& e) { for (auto watch : api_state_[type_url].watches_) { @@ -185,13 +194,15 @@ void GrpcMuxImpl::handleResponse(std::unique_ptrcallbacks_.onConfigUpdateFailed(nullptr); @@ -199,5 +210,27 @@ void GrpcMuxImpl::handleEstablishmentFailure() { } } +void GrpcMuxImpl::queueDiscoveryRequest(const std::string& queue_item) { + request_queue_.push(queue_item); + drainRequests(); +} + +void GrpcMuxImpl::clearRequestQueue() { + grpc_stream_.maybeUpdateQueueSizeStat(0); + // TODO(fredlas) when we have C++17: request_queue_ = {}; + while (!request_queue_.empty()) { + request_queue_.pop(); + } +} + +void GrpcMuxImpl::drainRequests() { + while (!request_queue_.empty() && grpc_stream_.checkRateLimitAllowsDrain()) { + // Process the request, if rate limiting is not enabled at all or if it is under rate limit. + sendDiscoveryRequest(request_queue_.front()); + request_queue_.pop(); + } + grpc_stream_.maybeUpdateQueueSizeStat(request_queue_.size()); +} + } // namespace Config } // namespace Envoy diff --git a/source/common/config/grpc_mux_impl.h b/source/common/config/grpc_mux_impl.h index 5a3672e5fe132..8702662fc683d 100644 --- a/source/common/config/grpc_mux_impl.h +++ b/source/common/config/grpc_mux_impl.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include "envoy/common/time.h" @@ -19,11 +20,9 @@ namespace Config { /** * ADS API implementation that fetches via gRPC. */ -class GrpcMuxImpl - : public GrpcMux, - public GrpcStream // this string is a type URL -{ +class GrpcMuxImpl : public GrpcMux, + public GrpcStreamCallbacks, + public Logger::Loggable { public: GrpcMuxImpl(const LocalInfo::LocalInfo& local_info, Grpc::AsyncClientPtr async_client, Event::Dispatcher& dispatcher, const Protobuf::MethodDescriptor& service_method, @@ -32,23 +31,29 @@ class GrpcMuxImpl ~GrpcMuxImpl(); void start() override; - GrpcMuxWatchPtr subscribe(const std::string& type_url, const std::vector& resources, + GrpcMuxWatchPtr subscribe(const std::string& type_url, const std::set& resources, GrpcMuxCallbacks& callbacks) override; void pause(const std::string& type_url) override; void resume(const std::string& type_url) override; - void sendDiscoveryRequest(const std::string& type_url) override; + void sendDiscoveryRequest(const std::string& type_url); - // GrpcStream - void handleResponse(std::unique_ptr&& message) override; - void handleStreamEstablished() override; - void handleEstablishmentFailure() override; + // Config::GrpcStreamCallbacks + void onStreamEstablished() override; + void onEstablishmentFailure() override; + void onDiscoveryResponse(std::unique_ptr&& message) override; + void onWriteable() override; + + GrpcStream& + grpcStreamForTest() { + return grpc_stream_; + } private: void setRetryTimer(); struct GrpcMuxWatchImpl : public GrpcMuxWatch { - GrpcMuxWatchImpl(const std::vector& resources, GrpcMuxCallbacks& callbacks, + GrpcMuxWatchImpl(const std::set& resources, GrpcMuxCallbacks& callbacks, const std::string& type_url, GrpcMuxImpl& parent) : resources_(resources), callbacks_(callbacks), type_url_(type_url), parent_(parent), inserted_(true) { @@ -63,7 +68,7 @@ class GrpcMuxImpl } } } - std::vector resources_; + std::set resources_; GrpcMuxCallbacks& callbacks_; const std::string type_url_; GrpcMuxImpl& parent_; @@ -85,16 +90,27 @@ class GrpcMuxImpl bool subscribed_{}; }; + // Request queue management logic. + void queueDiscoveryRequest(const std::string& queue_item); + void clearRequestQueue(); + void drainRequests(); + + GrpcStream grpc_stream_; const LocalInfo::LocalInfo& local_info_; std::unordered_map api_state_; // Envoy's dependency ordering. std::list subscriptions_; + + // A queue to store requests while rate limited. Note that when requests cannot be sent due to the + // gRPC stream being down, this queue does not store them; rather, they are simply dropped. + // This string is a type URL. + std::queue request_queue_; }; class NullGrpcMuxImpl : public GrpcMux { public: void start() override {} - GrpcMuxWatchPtr subscribe(const std::string&, const std::vector&, + GrpcMuxWatchPtr subscribe(const std::string&, const std::set&, GrpcMuxCallbacks&) override { throw EnvoyException("ADS must be configured to support an ADS config source"); } diff --git a/source/common/config/grpc_mux_subscription_impl.cc b/source/common/config/grpc_mux_subscription_impl.cc new file mode 100644 index 0000000000000..5b7ffe2d084d0 --- /dev/null +++ b/source/common/config/grpc_mux_subscription_impl.cc @@ -0,0 +1,91 @@ +#include "common/config/grpc_mux_subscription_impl.h" + +#include "common/common/assert.h" +#include "common/common/logger.h" +#include "common/grpc/common.h" +#include "common/protobuf/protobuf.h" +#include "common/protobuf/utility.h" + +namespace Envoy { +namespace Config { + +GrpcMuxSubscriptionImpl::GrpcMuxSubscriptionImpl(GrpcMux& grpc_mux, SubscriptionStats stats, + absl::string_view type_url, + Event::Dispatcher& dispatcher, + std::chrono::milliseconds init_fetch_timeout) + : grpc_mux_(grpc_mux), stats_(stats), type_url_(type_url), dispatcher_(dispatcher), + init_fetch_timeout_(init_fetch_timeout) {} + +// Config::Subscription +void GrpcMuxSubscriptionImpl::start(const std::set& resources, + SubscriptionCallbacks& callbacks) { + callbacks_ = &callbacks; + + if (init_fetch_timeout_.count() > 0) { + init_fetch_timeout_timer_ = dispatcher_.createTimer([this]() -> void { + ENVOY_LOG(warn, "gRPC config: initial fetch timed out for {}", type_url_); + callbacks_->onConfigUpdateFailed(nullptr); + }); + init_fetch_timeout_timer_->enableTimer(init_fetch_timeout_); + } + + watch_ = grpc_mux_.subscribe(type_url_, resources, *this); + // The attempt stat here is maintained for the purposes of having consistency between ADS and + // gRPC/filesystem/REST Subscriptions. Since ADS is push based and muxed, the notion of an + // "attempt" for a given xDS API combined by ADS is not really that meaningful. + stats_.update_attempt_.inc(); +} + +void GrpcMuxSubscriptionImpl::updateResources(const std::set& update_to_these_names) { + // First destroy the watch, so that this subscribe doesn't send a request for both the + // previously watched resources and the new ones (we may have lost interest in some of the + // previously watched ones). + watch_.reset(); + watch_ = grpc_mux_.subscribe(type_url_, update_to_these_names, *this); + stats_.update_attempt_.inc(); +} + +// Config::GrpcMuxCallbacks +void GrpcMuxSubscriptionImpl::onConfigUpdate( + const Protobuf::RepeatedPtrField& resources, + const std::string& version_info) { + disableInitFetchTimeoutTimer(); + // TODO(mattklein123): In the future if we start tracking per-resource versions, we need to + // supply those versions to onConfigUpdate() along with the xDS response ("system") + // version_info. This way, both types of versions can be tracked and exposed for debugging by + // the configuration update targets. + callbacks_->onConfigUpdate(resources, version_info); + stats_.update_success_.inc(); + stats_.update_attempt_.inc(); + stats_.version_.set(HashUtil::xxHash64(version_info)); + ENVOY_LOG(debug, "gRPC config for {} accepted with {} resources with version {}", type_url_, + resources.size(), version_info); +} + +void GrpcMuxSubscriptionImpl::onConfigUpdateFailed(const EnvoyException* e) { + disableInitFetchTimeoutTimer(); + // TODO(htuch): Less fragile signal that this is failure vs. reject. + if (e == nullptr) { + stats_.update_failure_.inc(); + ENVOY_LOG(debug, "gRPC update for {} failed", type_url_); + } else { + stats_.update_rejected_.inc(); + ENVOY_LOG(warn, "gRPC config for {} rejected: {}", type_url_, e->what()); + } + stats_.update_attempt_.inc(); + callbacks_->onConfigUpdateFailed(e); +} + +std::string GrpcMuxSubscriptionImpl::resourceName(const ProtobufWkt::Any& resource) { + return callbacks_->resourceName(resource); +} + +void GrpcMuxSubscriptionImpl::disableInitFetchTimeoutTimer() { + if (init_fetch_timeout_timer_) { + init_fetch_timeout_timer_->disableTimer(); + init_fetch_timeout_timer_.reset(); + } +} + +} // namespace Config +} // namespace Envoy diff --git a/source/common/config/grpc_mux_subscription_impl.h b/source/common/config/grpc_mux_subscription_impl.h index 2bd0808118270..933ddb8cff1dc 100644 --- a/source/common/config/grpc_mux_subscription_impl.h +++ b/source/common/config/grpc_mux_subscription_impl.h @@ -3,12 +3,9 @@ #include "envoy/api/v2/discovery.pb.h" #include "envoy/config/grpc_mux.h" #include "envoy/config/subscription.h" +#include "envoy/event/dispatcher.h" -#include "common/common/assert.h" #include "common/common/logger.h" -#include "common/grpc/common.h" -#include "common/protobuf/protobuf.h" -#include "common/protobuf/utility.h" namespace Envoy { namespace Config { @@ -16,73 +13,35 @@ namespace Config { /** * Adapter from typed Subscription to untyped GrpcMux. Also handles per-xDS API stats/logging. */ -template -class GrpcMuxSubscriptionImpl : public Subscription, +class GrpcMuxSubscriptionImpl : public Subscription, GrpcMuxCallbacks, Logger::Loggable { public: - GrpcMuxSubscriptionImpl(GrpcMux& grpc_mux, SubscriptionStats stats) - : grpc_mux_(grpc_mux), stats_(stats), - type_url_(Grpc::Common::typeUrl(ResourceType().GetDescriptor()->full_name())) {} + GrpcMuxSubscriptionImpl(GrpcMux& grpc_mux, SubscriptionStats stats, absl::string_view type_url, + Event::Dispatcher& dispatcher, + std::chrono::milliseconds init_fetch_timeout); // Config::Subscription - void start(const std::vector& resources, - SubscriptionCallbacks& callbacks) override { - callbacks_ = &callbacks; - watch_ = grpc_mux_.subscribe(type_url_, resources, *this); - // The attempt stat here is maintained for the purposes of having consistency between ADS and - // gRPC/filesystem/REST Subscriptions. Since ADS is push based and muxed, the notion of an - // "attempt" for a given xDS API combined by ADS is not really that meaningful. - stats_.update_attempt_.inc(); - } - - void updateResources(const std::vector& resources) override { - watch_ = grpc_mux_.subscribe(type_url_, resources, *this); - stats_.update_attempt_.inc(); - } + void start(const std::set& resources, SubscriptionCallbacks& callbacks) override; + void updateResources(const std::set& update_to_these_names) override; // Config::GrpcMuxCallbacks void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, - const std::string& version_info) override { - Protobuf::RepeatedPtrField typed_resources; - std::transform(resources.cbegin(), resources.cend(), - Protobuf::RepeatedPtrFieldBackInserter(&typed_resources), - MessageUtil::anyConvert); - // TODO(mattklein123): In the future if we start tracking per-resource versions, we need to - // supply those versions to onConfigUpdate() along with the xDS response ("system") - // version_info. This way, both types of versions can be tracked and exposed for debugging by - // the configuration update targets. - callbacks_->onConfigUpdate(typed_resources, version_info); - stats_.update_success_.inc(); - stats_.update_attempt_.inc(); - stats_.version_.set(HashUtil::xxHash64(version_info)); - ENVOY_LOG(debug, "gRPC config for {} accepted with {} resources: {}", type_url_, - resources.size(), RepeatedPtrUtil::debugString(typed_resources)); - } - - void onConfigUpdateFailed(const EnvoyException* e) override { - // TODO(htuch): Less fragile signal that this is failure vs. reject. - if (e == nullptr) { - stats_.update_failure_.inc(); - ENVOY_LOG(debug, "gRPC update for {} failed", type_url_); - } else { - stats_.update_rejected_.inc(); - ENVOY_LOG(warn, "gRPC config for {} rejected: {}", type_url_, e->what()); - } - stats_.update_attempt_.inc(); - callbacks_->onConfigUpdateFailed(e); - } - - std::string resourceName(const ProtobufWkt::Any& resource) override { - return callbacks_->resourceName(resource); - } + const std::string& version_info) override; + void onConfigUpdateFailed(const EnvoyException* e) override; + std::string resourceName(const ProtobufWkt::Any& resource) override; private: + void disableInitFetchTimeoutTimer(); + GrpcMux& grpc_mux_; SubscriptionStats stats_; const std::string type_url_; - SubscriptionCallbacks* callbacks_{}; + SubscriptionCallbacks* callbacks_{}; GrpcMuxWatchPtr watch_{}; + Event::Dispatcher& dispatcher_; + std::chrono::milliseconds init_fetch_timeout_; + Event::TimerPtr init_fetch_timeout_timer_; }; } // namespace Config diff --git a/source/common/config/grpc_stream.h b/source/common/config/grpc_stream.h index c380b4b62588b..b80f3f3e9afae 100644 --- a/source/common/config/grpc_stream.h +++ b/source/common/config/grpc_stream.h @@ -1,8 +1,8 @@ #pragma once #include -#include +#include "envoy/config/xds_grpc_context.h" #include "envoy/grpc/async_client.h" #include "common/common/backoff_strategy.h" @@ -12,71 +12,51 @@ namespace Envoy { namespace Config { -// Oversees communication for gRPC xDS implementations (parent to both regular xDS and incremental +// Oversees communication for gRPC xDS implementations (parent to both regular xDS and delta // xDS variants). Reestablishes the gRPC channel when necessary, and provides rate limiting of // requests. -template +template class GrpcStream : public Grpc::TypedAsyncStreamCallbacks, public Logger::Loggable { public: - GrpcStream(Grpc::AsyncClientPtr async_client, const Protobuf::MethodDescriptor& service_method, - Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, Stats::Scope& scope, + GrpcStream(GrpcStreamCallbacks* callbacks, Grpc::AsyncClientPtr async_client, + const Protobuf::MethodDescriptor& service_method, Runtime::RandomGenerator& random, + Event::Dispatcher& dispatcher, Stats::Scope& scope, const RateLimitSettings& rate_limit_settings) - : async_client_(std::move(async_client)), service_method_(service_method), - control_plane_stats_(generateControlPlaneStats(scope)), random_(random), - time_source_(dispatcher.timeSystem()), + : callbacks_(callbacks), async_client_(std::move(async_client)), + service_method_(service_method), control_plane_stats_(generateControlPlaneStats(scope)), + random_(random), time_source_(dispatcher.timeSource()), rate_limiting_enabled_(rate_limit_settings.enabled_) { retry_timer_ = dispatcher.createTimer([this]() -> void { establishNewStream(); }); if (rate_limiting_enabled_) { // Default Bucket contains 100 tokens maximum and refills at 10 tokens/sec. limit_request_ = std::make_unique( rate_limit_settings.max_tokens_, time_source_, rate_limit_settings.fill_rate_); - drain_request_timer_ = dispatcher.createTimer([this]() { drainRequests(); }); + drain_request_timer_ = dispatcher.createTimer([this]() { callbacks_->onWriteable(); }); } backoff_strategy_ = std::make_unique(RETRY_INITIAL_DELAY_MS, RETRY_MAX_DELAY_MS, random_); } - virtual void handleResponse(std::unique_ptr&& message) PURE; - virtual void handleStreamEstablished() PURE; - virtual void handleEstablishmentFailure() PURE; - - // Returns whether the request was actually sent (and so can leave the queue). - virtual void sendDiscoveryRequest(const RequestQueueItem& queue_item) PURE; - - void queueDiscoveryRequest(const RequestQueueItem& queue_item) { - request_queue_.push(queue_item); - drainRequests(); - } - void establishNewStream() { ENVOY_LOG(debug, "Establishing new gRPC bidi stream for {}", service_method_.DebugString()); + if (stream_ != nullptr) { + ENVOY_LOG(warn, "gRPC bidi stream for {} already exists!", service_method_.DebugString()); + return; + } stream_ = async_client_->start(service_method_, *this); if (stream_ == nullptr) { ENVOY_LOG(warn, "Unable to establish new stream"); - handleEstablishmentFailure(); + callbacks_->onEstablishmentFailure(); setRetryTimer(); return; } control_plane_stats_.connected_state_.set(1); - handleStreamEstablished(); + callbacks_->onStreamEstablished(); } bool grpcStreamAvailable() const { return stream_ != nullptr; } - bool checkRateLimitAllowsDrain(int queue_size) { - if (!rate_limiting_enabled_ || limit_request_->consume()) { - return true; - } - ASSERT(drain_request_timer_ != nullptr); - control_plane_stats_.rate_limit_enforced_.inc(); - control_plane_stats_.pending_requests_.set(queue_size); - // Enable the drain request timer. - drain_request_timer_->enableTimer( - std::chrono::milliseconds(limit_request_->nextTokenAvailableMs())); - return false; - } - void sendMessage(const RequestProto& request) { stream_->sendMessage(request, false); } // Grpc::AsyncStreamCallbacks @@ -91,7 +71,11 @@ class GrpcStream : public Grpc::TypedAsyncStreamCallbacks, void onReceiveMessage(std::unique_ptr&& message) override { // Reset here so that it starts with fresh backoff interval on next disconnect. backoff_strategy_->reset(); - handleResponse(std::move(message)); + // Sometimes during hot restarts this stat's value becomes inconsistent and will continue to + // have 0 until it is reconnected. Setting here ensures that it is consistent with the state of + // management server connection. + control_plane_stats_.connected_state_.set(1); + callbacks_->onDiscoveryResponse(std::move(message)); } void onReceiveTrailingMetadata(Http::HeaderMapPtr&& metadata) override { @@ -102,20 +86,35 @@ class GrpcStream : public Grpc::TypedAsyncStreamCallbacks, ENVOY_LOG(warn, "gRPC config stream closed: {}, {}", status, message); stream_ = nullptr; control_plane_stats_.connected_state_.set(0); - handleEstablishmentFailure(); + callbacks_->onEstablishmentFailure(); setRetryTimer(); } -private: - void drainRequests() { - ENVOY_LOG(trace, "draining discovery requests {}", request_queue_.size()); - while (!request_queue_.empty() && checkRateLimitAllowsDrain(request_queue_.size())) { - // Process the request, if rate limiting is not enabled at all or if it is under rate limit. - sendDiscoveryRequest(request_queue_.front()); - request_queue_.pop(); + void maybeUpdateQueueSizeStat(uint64_t size) { + // Although request_queue_.push() happens elsewhere, the only time the queue is non-transiently + // non-empty is when it remains non-empty after a drain attempt. (The push() doesn't matter + // because we always attempt this drain immediately after the push). Basically, a change in + // queue length is not "meaningful" until it has persisted until here. We need the + // if(>0 || used) to keep this stat from being wrongly marked interesting by a pointless set(0) + // and needlessly taking up space. The first time we set(123), used becomes true, and so we will + // subsequently always do the set (including set(0)). + if (size > 0 || control_plane_stats_.pending_requests_.used()) { + control_plane_stats_.pending_requests_.set(size); } } + bool checkRateLimitAllowsDrain() { + if (!rate_limiting_enabled_ || limit_request_->consume(1, false)) { + return true; + } + ASSERT(drain_request_timer_ != nullptr); + control_plane_stats_.rate_limit_enforced_.inc(); + // Enable the drain request timer. + drain_request_timer_->enableTimer(limit_request_->nextTokenAvailable()); + return false; + } + +private: void setRetryTimer() { retry_timer_->enableTimer(std::chrono::milliseconds(backoff_strategy_->nextBackOffMs())); } @@ -126,12 +125,12 @@ class GrpcStream : public Grpc::TypedAsyncStreamCallbacks, POOL_GAUGE_PREFIX(scope, control_plane_prefix))}; } + GrpcStreamCallbacks* const callbacks_; + // TODO(htuch): Make this configurable or some static. const uint32_t RETRY_INITIAL_DELAY_MS = 500; const uint32_t RETRY_MAX_DELAY_MS = 30000; // Do not cross more than 30s - std::queue request_queue_; - Grpc::AsyncClientPtr async_client_; Grpc::AsyncStream* stream_{}; const Protobuf::MethodDescriptor& service_method_; diff --git a/source/common/config/grpc_subscription_impl.h b/source/common/config/grpc_subscription_impl.h index aedbf5adc756e..0c90b8a47f065 100644 --- a/source/common/config/grpc_subscription_impl.h +++ b/source/common/config/grpc_subscription_impl.h @@ -12,34 +12,35 @@ namespace Envoy { namespace Config { -template -class GrpcSubscriptionImpl : public Config::Subscription { +class GrpcSubscriptionImpl : public Config::Subscription { public: GrpcSubscriptionImpl(const LocalInfo::LocalInfo& local_info, Grpc::AsyncClientPtr async_client, Event::Dispatcher& dispatcher, Runtime::RandomGenerator& random, - const Protobuf::MethodDescriptor& service_method, SubscriptionStats stats, - Stats::Scope& scope, const RateLimitSettings& rate_limit_settings) + const Protobuf::MethodDescriptor& service_method, absl::string_view type_url, + SubscriptionStats stats, Stats::Scope& scope, + const RateLimitSettings& rate_limit_settings, + std::chrono::milliseconds init_fetch_timeout) : grpc_mux_(local_info, std::move(async_client), dispatcher, service_method, random, scope, rate_limit_settings), - grpc_mux_subscription_(grpc_mux_, stats) {} + grpc_mux_subscription_(grpc_mux_, stats, type_url, dispatcher, init_fetch_timeout) {} // Config::Subscription - void start(const std::vector& resources, - Config::SubscriptionCallbacks& callbacks) override { + void start(const std::set& resources, + Config::SubscriptionCallbacks& callbacks) override { // Subscribe first, so we get failure callbacks if grpc_mux_.start() fails. grpc_mux_subscription_.start(resources, callbacks); grpc_mux_.start(); } - void updateResources(const std::vector& resources) override { - grpc_mux_subscription_.updateResources(resources); + void updateResources(const std::set& update_to_these_names) override { + grpc_mux_subscription_.updateResources(update_to_these_names); } GrpcMuxImpl& grpcMux() { return grpc_mux_; } private: GrpcMuxImpl grpc_mux_; - GrpcMuxSubscriptionImpl grpc_mux_subscription_; + GrpcMuxSubscriptionImpl grpc_mux_subscription_; }; } // namespace Config diff --git a/source/common/config/http_subscription_impl.cc b/source/common/config/http_subscription_impl.cc new file mode 100644 index 0000000000000..971d342e8c4e4 --- /dev/null +++ b/source/common/config/http_subscription_impl.cc @@ -0,0 +1,115 @@ +#include "common/config/http_subscription_impl.h" + +#include + +#include "common/buffer/buffer_impl.h" +#include "common/common/assert.h" +#include "common/common/macros.h" +#include "common/config/utility.h" +#include "common/http/headers.h" +#include "common/protobuf/protobuf.h" +#include "common/protobuf/utility.h" + +#include "google/api/annotations.pb.h" + +namespace Envoy { +namespace Config { + +HttpSubscriptionImpl::HttpSubscriptionImpl( + const LocalInfo::LocalInfo& local_info, Upstream::ClusterManager& cm, + const std::string& remote_cluster_name, Event::Dispatcher& dispatcher, + Runtime::RandomGenerator& random, std::chrono::milliseconds refresh_interval, + std::chrono::milliseconds request_timeout, const Protobuf::MethodDescriptor& service_method, + SubscriptionStats stats, std::chrono::milliseconds init_fetch_timeout) + : Http::RestApiFetcher(cm, remote_cluster_name, dispatcher, random, refresh_interval, + request_timeout), + stats_(stats), dispatcher_(dispatcher), init_fetch_timeout_(init_fetch_timeout) { + request_.mutable_node()->CopyFrom(local_info.node()); + ASSERT(service_method.options().HasExtension(google::api::http)); + const auto& http_rule = service_method.options().GetExtension(google::api::http); + path_ = http_rule.post(); + ASSERT(http_rule.body() == "*"); +} + +// Config::Subscription +void HttpSubscriptionImpl::start(const std::set& resources, + Config::SubscriptionCallbacks& callbacks) { + ASSERT(callbacks_ == nullptr); + + if (init_fetch_timeout_.count() > 0) { + init_fetch_timeout_timer_ = dispatcher_.createTimer([this]() -> void { + ENVOY_LOG(warn, "REST config: initial fetch timed out for", path_); + callbacks_->onConfigUpdateFailed(nullptr); + }); + init_fetch_timeout_timer_->enableTimer(init_fetch_timeout_); + } + + Protobuf::RepeatedPtrField resources_vector(resources.begin(), resources.end()); + request_.mutable_resource_names()->Swap(&resources_vector); + callbacks_ = &callbacks; + initialize(); +} + +void HttpSubscriptionImpl::updateResources(const std::set& update_to_these_names) { + Protobuf::RepeatedPtrField resources_vector(update_to_these_names.begin(), + update_to_these_names.end()); + request_.mutable_resource_names()->Swap(&resources_vector); +} + +// Http::RestApiFetcher +void HttpSubscriptionImpl::createRequest(Http::Message& request) { + ENVOY_LOG(debug, "Sending REST request for {}", path_); + stats_.update_attempt_.inc(); + request.headers().insertMethod().value().setReference(Http::Headers::get().MethodValues.Post); + request.headers().insertPath().value(path_); + request.body() = + std::make_unique(MessageUtil::getJsonStringFromMessage(request_)); + request.headers().insertContentType().value().setReference( + Http::Headers::get().ContentTypeValues.Json); + request.headers().insertContentLength().value(request.body()->length()); +} + +void HttpSubscriptionImpl::parseResponse(const Http::Message& response) { + disableInitFetchTimeoutTimer(); + envoy::api::v2::DiscoveryResponse message; + try { + MessageUtil::loadFromJson(response.bodyAsString(), message); + } catch (const EnvoyException& e) { + ENVOY_LOG(warn, "REST config JSON conversion error: {}", e.what()); + handleFailure(nullptr); + return; + } + try { + callbacks_->onConfigUpdate(message.resources(), message.version_info()); + request_.set_version_info(message.version_info()); + stats_.version_.set(HashUtil::xxHash64(request_.version_info())); + stats_.update_success_.inc(); + } catch (const EnvoyException& e) { + ENVOY_LOG(warn, "REST config update rejected: {}", e.what()); + stats_.update_rejected_.inc(); + callbacks_->onConfigUpdateFailed(&e); + } +} + +void HttpSubscriptionImpl::onFetchComplete() {} + +void HttpSubscriptionImpl::onFetchFailure(const EnvoyException* e) { + disableInitFetchTimeoutTimer(); + ENVOY_LOG(warn, "REST config update failed: {}", e != nullptr ? e->what() : "fetch failure"); + handleFailure(e); +} + +void HttpSubscriptionImpl::handleFailure(const EnvoyException* e) { + stats_.update_failure_.inc(); + callbacks_->onConfigUpdateFailed(e); +} + +void HttpSubscriptionImpl::disableInitFetchTimeoutTimer() { + if (init_fetch_timeout_timer_) { + init_fetch_timeout_timer_->disableTimer(); + init_fetch_timeout_timer_.reset(); + } +} + +} // namespace Config +} // namespace Envoy diff --git a/source/common/config/http_subscription_impl.h b/source/common/config/http_subscription_impl.h index b6ac42863bce4..97612b3897153 100644 --- a/source/common/config/http_subscription_impl.h +++ b/source/common/config/http_subscription_impl.h @@ -1,21 +1,10 @@ #pragma once -#include - #include "envoy/api/v2/core/base.pb.h" #include "envoy/config/subscription.h" +#include "envoy/event/dispatcher.h" -#include "common/buffer/buffer_impl.h" -#include "common/common/assert.h" -#include "common/common/logger.h" -#include "common/common/macros.h" -#include "common/config/utility.h" -#include "common/http/headers.h" #include "common/http/rest_api_fetcher.h" -#include "common/protobuf/protobuf.h" -#include "common/protobuf/utility.h" - -#include "google/api/annotations.pb.h" namespace Envoy { namespace Config { @@ -27,95 +16,40 @@ namespace Config { * canonical representation of DiscoveryResponse. This implementation is responsible for translating * between the proto serializable objects in the Subscription API and the REST JSON representation. */ -template class HttpSubscriptionImpl : public Http::RestApiFetcher, - public Config::Subscription, + public Config::Subscription, Logger::Loggable { public: HttpSubscriptionImpl(const LocalInfo::LocalInfo& local_info, Upstream::ClusterManager& cm, const std::string& remote_cluster_name, Event::Dispatcher& dispatcher, Runtime::RandomGenerator& random, std::chrono::milliseconds refresh_interval, std::chrono::milliseconds request_timeout, - const Protobuf::MethodDescriptor& service_method, SubscriptionStats stats) - : Http::RestApiFetcher(cm, remote_cluster_name, dispatcher, random, refresh_interval, - request_timeout), - stats_(stats) { - request_.mutable_node()->CopyFrom(local_info.node()); - ASSERT(service_method.options().HasExtension(google::api::http)); - const auto& http_rule = service_method.options().GetExtension(google::api::http); - path_ = http_rule.post(); - ASSERT(http_rule.body() == "*"); - } + const Protobuf::MethodDescriptor& service_method, SubscriptionStats stats, + std::chrono::milliseconds init_fetch_timeout); // Config::Subscription - void start(const std::vector& resources, - Config::SubscriptionCallbacks& callbacks) override { - ASSERT(callbacks_ == nullptr); - Protobuf::RepeatedPtrField resources_vector(resources.begin(), - resources.end()); - request_.mutable_resource_names()->Swap(&resources_vector); - callbacks_ = &callbacks; - initialize(); - } - - void updateResources(const std::vector& resources) override { - Protobuf::RepeatedPtrField resources_vector(resources.begin(), - resources.end()); - request_.mutable_resource_names()->Swap(&resources_vector); - } + void start(const std::set& resources, + Config::SubscriptionCallbacks& callbacks) override; + void updateResources(const std::set& update_to_these_names) override; // Http::RestApiFetcher - void createRequest(Http::Message& request) override { - ENVOY_LOG(debug, "Sending REST request for {}", path_); - stats_.update_attempt_.inc(); - request.headers().insertMethod().value().setReference(Http::Headers::get().MethodValues.Post); - request.headers().insertPath().value(path_); - request.body() = - std::make_unique(MessageUtil::getJsonStringFromMessage(request_)); - request.headers().insertContentType().value().setReference( - Http::Headers::get().ContentTypeValues.Json); - request.headers().insertContentLength().value(request.body()->length()); - } - - void parseResponse(const Http::Message& response) override { - envoy::api::v2::DiscoveryResponse message; - const auto status = Protobuf::util::JsonStringToMessage(response.bodyAsString(), &message); - if (!status.ok()) { - ENVOY_LOG(warn, "REST config JSON conversion error: {}", status.ToString()); - handleFailure(nullptr); - return; - } - const auto typed_resources = Config::Utility::getTypedResources(message); - try { - callbacks_->onConfigUpdate(typed_resources, message.version_info()); - request_.set_version_info(message.version_info()); - stats_.version_.set(HashUtil::xxHash64(request_.version_info())); - stats_.update_success_.inc(); - } catch (const EnvoyException& e) { - ENVOY_LOG(warn, "REST config update rejected: {}", e.what()); - stats_.update_rejected_.inc(); - callbacks_->onConfigUpdateFailed(&e); - } - } - - void onFetchComplete() override {} - - void onFetchFailure(const EnvoyException* e) override { - ENVOY_LOG(warn, "REST config update failed: {}", e != nullptr ? e->what() : "fetch failure"); - handleFailure(e); - } + void createRequest(Http::Message& request) override; + void parseResponse(const Http::Message& response) override; + void onFetchComplete() override; + void onFetchFailure(const EnvoyException* e) override; private: - void handleFailure(const EnvoyException* e) { - stats_.update_failure_.inc(); - callbacks_->onConfigUpdateFailed(e); - } + void handleFailure(const EnvoyException* e); + void disableInitFetchTimeoutTimer(); std::string path_; - Protobuf::RepeatedPtrField resources_; - Config::SubscriptionCallbacks* callbacks_{}; + Protobuf::RepeatedPtrField resources_; + Config::SubscriptionCallbacks* callbacks_{}; envoy::api::v2::DiscoveryRequest request_; SubscriptionStats stats_; + Event::Dispatcher& dispatcher_; + std::chrono::milliseconds init_fetch_timeout_; + Event::TimerPtr init_fetch_timeout_timer_; }; } // namespace Config diff --git a/source/common/config/lds_json.cc b/source/common/config/lds_json.cc index aa9d0c312f596..a8bfab61d99b5 100644 --- a/source/common/config/lds_json.cc +++ b/source/common/config/lds_json.cc @@ -1,7 +1,5 @@ #include "common/config/lds_json.h" -#include "envoy/stats/stats_options.h" - #include "common/common/assert.h" #include "common/config/address_json.h" #include "common/config/json_utility.h" @@ -9,6 +7,7 @@ #include "common/config/utility.h" #include "common/json/config_schemas.h" #include "common/network/utility.h" +#include "common/protobuf/utility.h" #include "extensions/filters/network/well_known_names.h" @@ -16,12 +15,10 @@ namespace Envoy { namespace Config { void LdsJson::translateListener(const Json::Object& json_listener, - envoy::api::v2::Listener& listener, - const Stats::StatsOptions& stats_options) { + envoy::api::v2::Listener& listener) { json_listener.validateSchema(Json::Schema::LISTENER_SCHEMA); const std::string name = json_listener.getString("name", ""); - Utility::checkObjNameLength("Invalid listener name", name, stats_options); listener.set_name(name); AddressJson::translateAddress(json_listener.getString("address"), true, true, @@ -43,10 +40,8 @@ void LdsJson::translateListener(const Json::Object& json_listener, const std::string json_config = "{\"deprecated_v1\": true, \"value\": " + json_filter->getObject("config")->asJsonString() + "}"; - - const auto status = Protobuf::util::JsonStringToMessage(json_config, filter->mutable_config()); // JSON schema has already validated that this is a valid JSON object. - ASSERT(status.ok()); + MessageUtil::loadFromJson(json_config, *filter->mutable_config()); } const std::string drain_type = json_listener.getString("drain_type", "default"); diff --git a/source/common/config/lds_json.h b/source/common/config/lds_json.h index 4bd1d425987af..4848192acf570 100644 --- a/source/common/config/lds_json.h +++ b/source/common/config/lds_json.h @@ -3,7 +3,6 @@ #include "envoy/api/v2/lds.pb.h" #include "envoy/api/v2/listener/listener.pb.h" #include "envoy/json/json_object.h" -#include "envoy/stats/stats_options.h" namespace Envoy { namespace Config { @@ -16,8 +15,7 @@ class LdsJson { * @param listener destination v2 envoy::api::v2::Listener. */ static void translateListener(const Json::Object& json_listener, - envoy::api::v2::Listener& listener, - const Stats::StatsOptions& stats_options); + envoy::api::v2::Listener& listener); }; } // namespace Config diff --git a/source/common/config/rds_json.cc b/source/common/config/rds_json.cc index 036ed45ff26a9..658ae6b145b20 100644 --- a/source/common/config/rds_json.cc +++ b/source/common/config/rds_json.cc @@ -116,13 +116,12 @@ void RdsJson::translateQueryParameterMatcher( } void RdsJson::translateRouteConfiguration(const Json::Object& json_route_config, - envoy::api::v2::RouteConfiguration& route_config, - const Stats::StatsOptions& stats_options) { + envoy::api::v2::RouteConfiguration& route_config) { json_route_config.validateSchema(Json::Schema::ROUTE_CONFIGURATION_SCHEMA); for (const auto json_virtual_host : json_route_config.getObjectArray("virtual_hosts", true)) { auto* virtual_host = route_config.mutable_virtual_hosts()->Add(); - translateVirtualHost(*json_virtual_host, *virtual_host, stats_options); + translateVirtualHost(*json_virtual_host, *virtual_host); } for (const std::string& header : @@ -150,12 +149,10 @@ void RdsJson::translateRouteConfiguration(const Json::Object& json_route_config, } void RdsJson::translateVirtualHost(const Json::Object& json_virtual_host, - envoy::api::v2::route::VirtualHost& virtual_host, - const Stats::StatsOptions& stats_options) { + envoy::api::v2::route::VirtualHost& virtual_host) { json_virtual_host.validateSchema(Json::Schema::VIRTUAL_HOST_CONFIGURATION_SCHEMA); const std::string name = json_virtual_host.getString("name", ""); - Utility::checkObjNameLength("Invalid virtual host name", name, stats_options); virtual_host.set_name(name); for (const std::string& domain : json_virtual_host.getStringArray("domains", true)) { @@ -312,7 +309,7 @@ void RdsJson::translateRoute(const Json::Object& json_route, envoy::api::v2::rou action->set_priority(priority); for (const auto header_value : json_route.getObjectArray("request_headers_to_add", true)) { - auto* header_value_option = action->mutable_request_headers_to_add()->Add(); + auto* header_value_option = route.mutable_request_headers_to_add()->Add(); BaseJson::translateHeaderValueOption(*header_value, *header_value_option); } diff --git a/source/common/config/rds_json.h b/source/common/config/rds_json.h index 1334eddca4c7a..dba4c8b5b6188 100644 --- a/source/common/config/rds_json.h +++ b/source/common/config/rds_json.h @@ -3,7 +3,6 @@ #include "envoy/api/v2/rds.pb.h" #include "envoy/api/v2/route/route.pb.h" #include "envoy/json/json_object.h" -#include "envoy/stats/stats_options.h" namespace Envoy { namespace Config { @@ -65,8 +64,7 @@ class RdsJson { * @param route_config destination v2 envoy::api::v2::RouteConfiguration. */ static void translateRouteConfiguration(const Json::Object& json_route_config, - envoy::api::v2::RouteConfiguration& route_config, - const Stats::StatsOptions& stats_options); + envoy::api::v2::RouteConfiguration& route_config); /** * Translate a v1 JSON virtual host object to v2 envoy::api::v2::route::VirtualHost. @@ -74,8 +72,7 @@ class RdsJson { * @param virtual_host destination v2 envoy::api::v2::route::VirtualHost. */ static void translateVirtualHost(const Json::Object& json_virtual_host, - envoy::api::v2::route::VirtualHost& virtual_host, - const Stats::StatsOptions& stats_options); + envoy::api::v2::route::VirtualHost& virtual_host); /** * Translate a v1 JSON decorator object to v2 envoy::api::v2::route::Decorator. diff --git a/source/common/config/resources.h b/source/common/config/resources.h index 69ed2d91a46dc..9f35373b511e3 100644 --- a/source/common/config/resources.h +++ b/source/common/config/resources.h @@ -17,6 +17,7 @@ class TypeUrlValues { const std::string ClusterLoadAssignment{"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment"}; const std::string Secret{"type.googleapis.com/envoy.api.v2.auth.Secret"}; const std::string RouteConfiguration{"type.googleapis.com/envoy.api.v2.RouteConfiguration"}; + const std::string VirtualHost{"type.googleapis.com/envoy.api.v2.route.VirtualHost"}; }; typedef ConstSingleton TypeUrl; diff --git a/source/common/config/subscription_factory.h b/source/common/config/subscription_factory.h index ce6a1e8d69a51..2228ee36c37b8 100644 --- a/source/common/config/subscription_factory.h +++ b/source/common/config/subscription_factory.h @@ -8,6 +8,7 @@ #include "envoy/stats/scope.h" #include "envoy/upstream/cluster_manager.h" +#include "common/config/delta_subscription_impl.h" #include "common/config/filesystem_subscription_impl.h" #include "common/config/grpc_mux_subscription_impl.h" #include "common/config/grpc_subscription_impl.h" @@ -36,19 +37,18 @@ class SubscriptionFactory { * service description). * @param api reference to the Api object */ - template - static std::unique_ptr> subscriptionFromConfigSource( + static std::unique_ptr subscriptionFromConfigSource( const envoy::api::v2::core::ConfigSource& config, const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispatcher, Upstream::ClusterManager& cm, Runtime::RandomGenerator& random, Stats::Scope& scope, const std::string& rest_method, const std::string& grpc_method, - Api::Api& api) { - std::unique_ptr> result; + absl::string_view type_url, Api::Api& api) { + std::unique_ptr result; SubscriptionStats stats = Utility::generateStats(scope); switch (config.config_source_specifier_case()) { case envoy::api::v2::core::ConfigSource::kPath: { Utility::checkFilesystemSubscriptionBackingPath(config.path(), api); - result.reset(new Config::FilesystemSubscriptionImpl(dispatcher, config.path(), - stats, api)); + result = std::make_unique(dispatcher, config.path(), + stats, api); break; } case envoy::api::v2::core::ConfigSource::kApiConfigSource: { @@ -61,21 +61,34 @@ class SubscriptionFactory { "Please specify an explicit supported api_type in the following config:\n" + config.DebugString()); case envoy::api::v2::core::ApiConfigSource::REST: - result.reset(new HttpSubscriptionImpl( + result = std::make_unique( local_info, cm, api_config_source.cluster_names()[0], dispatcher, random, Utility::apiConfigSourceRefreshDelay(api_config_source), Utility::apiConfigSourceRequestTimeout(api_config_source), - *Protobuf::DescriptorPool::generated_pool()->FindMethodByName(rest_method), stats)); + *Protobuf::DescriptorPool::generated_pool()->FindMethodByName(rest_method), stats, + Utility::configSourceInitialFetchTimeout(config)); break; - case envoy::api::v2::core::ApiConfigSource::GRPC: { - result.reset(new GrpcSubscriptionImpl( + case envoy::api::v2::core::ApiConfigSource::GRPC: + result = std::make_unique( local_info, Config::Utility::factoryForGrpcApiConfigSource(cm.grpcAsyncClientManager(), - config.api_config_source(), scope) + api_config_source, scope) ->create(), dispatcher, random, - *Protobuf::DescriptorPool::generated_pool()->FindMethodByName(grpc_method), stats, - scope, Utility::parseRateLimitSettings(api_config_source))); + *Protobuf::DescriptorPool::generated_pool()->FindMethodByName(grpc_method), type_url, + stats, scope, Utility::parseRateLimitSettings(api_config_source), + Utility::configSourceInitialFetchTimeout(config)); + break; + case envoy::api::v2::core::ApiConfigSource::DELTA_GRPC: { + Utility::checkApiConfigSourceSubscriptionBackingCluster(cm.clusters(), api_config_source); + result = std::make_unique( + local_info, + Config::Utility::factoryForGrpcApiConfigSource(cm.grpcAsyncClientManager(), + api_config_source, scope) + ->create(), + dispatcher, *Protobuf::DescriptorPool::generated_pool()->FindMethodByName(grpc_method), + type_url, random, scope, Utility::parseRateLimitSettings(api_config_source), stats, + Utility::configSourceInitialFetchTimeout(config)); break; } default: @@ -84,7 +97,9 @@ class SubscriptionFactory { break; } case envoy::api::v2::core::ConfigSource::kAds: { - result.reset(new GrpcMuxSubscriptionImpl(cm.adsMux(), stats)); + result = std::make_unique( + cm.adsMux(), stats, type_url, dispatcher, + Utility::configSourceInitialFetchTimeout(config)); break; } default: diff --git a/source/common/config/utility.cc b/source/common/config/utility.cc index cfe2fdbc06af3..6ebc2d43d3ef1 100644 --- a/source/common/config/utility.cc +++ b/source/common/config/utility.cc @@ -89,7 +89,8 @@ void Utility::checkFilesystemSubscriptionBackingPath(const std::string& path, Ap void Utility::checkApiConfigSourceNames( const envoy::api::v2::core::ApiConfigSource& api_config_source) { const bool is_grpc = - (api_config_source.api_type() == envoy::api::v2::core::ApiConfigSource::GRPC); + (api_config_source.api_type() == envoy::api::v2::core::ApiConfigSource::GRPC || + api_config_source.api_type() == envoy::api::v2::core::ApiConfigSource::DELTA_GRPC); if (api_config_source.cluster_names().empty() && api_config_source.grpc_services().empty()) { throw EnvoyException( @@ -99,19 +100,19 @@ void Utility::checkApiConfigSourceNames( if (is_grpc) { if (!api_config_source.cluster_names().empty()) { - throw EnvoyException(fmt::format( - "envoy::api::v2::core::ConfigSource::GRPC must not have a cluster name specified: {}", - api_config_source.DebugString())); + throw EnvoyException(fmt::format("envoy::api::v2::core::ConfigSource::(DELTA_)GRPC " + "must not have a cluster name specified: {}", + api_config_source.DebugString())); } if (api_config_source.grpc_services().size() > 1) { - throw EnvoyException(fmt::format( - "envoy::api::v2::core::ConfigSource::GRPC must have a single gRPC service specified: {}", - api_config_source.DebugString())); + throw EnvoyException(fmt::format("envoy::api::v2::core::ConfigSource::(DELTA_)GRPC " + "must have a single gRPC service specified: {}", + api_config_source.DebugString())); } } else { if (!api_config_source.grpc_services().empty()) { throw EnvoyException( - fmt::format("envoy::api::v2::core::ConfigSource, if not of type gRPC, must not have " + fmt::format("envoy::api::v2::core::ConfigSource, if not a gRPC type, must not have " "a gRPC service specified: {}", api_config_source.DebugString())); } @@ -126,6 +127,7 @@ void Utility::checkApiConfigSourceNames( void Utility::validateClusterName(const Upstream::ClusterManager::ClusterInfoMap& clusters, const std::string& cluster_name) { const auto& it = clusters.find(cluster_name); + if (it == clusters.end() || it->second.get().info()->addedViaApi() || it->second.get().info()->type() == envoy::api::v2::Cluster::EDS) { throw EnvoyException(fmt::format( @@ -176,6 +178,12 @@ std::chrono::milliseconds Utility::apiConfigSourceRequestTimeout( PROTOBUF_GET_MS_OR_DEFAULT(api_config_source, request_timeout, 1000)); } +std::chrono::milliseconds +Utility::configSourceInitialFetchTimeout(const envoy::api::v2::core::ConfigSource& config_source) { + return std::chrono::milliseconds( + PROTOBUF_GET_MS_OR_DEFAULT(config_source, initial_fetch_timeout, 0)); +} + void Utility::translateCdsConfig(const Json::Object& json_config, envoy::api::v2::core::ConfigSource& cds_config) { translateApiConfigSource(json_config.getObject("cluster")->getString("name"), @@ -186,12 +194,10 @@ void Utility::translateCdsConfig(const Json::Object& json_config, void Utility::translateRdsConfig( const Json::Object& json_rds, - envoy::config::filter::network::http_connection_manager::v2::Rds& rds, - const Stats::StatsOptions& stats_options) { + envoy::config::filter::network::http_connection_manager::v2::Rds& rds) { json_rds.validateSchema(Json::Schema::RDS_CONFIGURATION_SCHEMA); const std::string name = json_rds.getString("route_config_name", ""); - checkObjNameLength("Invalid route_config name", name, stats_options); rds.set_route_config_name(name); translateApiConfigSource(json_rds.getString("cluster"), @@ -234,22 +240,14 @@ Utility::createStatsMatcher(const envoy::config::bootstrap::v2::Bootstrap& boots return std::make_unique(bootstrap.stats_config()); } -void Utility::checkObjNameLength(const std::string& error_prefix, const std::string& name, - const Stats::StatsOptions& stats_options) { - if (name.length() > stats_options.maxNameLength()) { - throw EnvoyException(fmt::format("{}: Length of {} ({}) exceeds allowed maximum length ({})", - error_prefix, name, name.length(), - stats_options.maxNameLength())); - } -} - Grpc::AsyncClientFactoryPtr Utility::factoryForGrpcApiConfigSource( Grpc::AsyncClientManager& async_client_manager, const envoy::api::v2::core::ApiConfigSource& api_config_source, Stats::Scope& scope) { Utility::checkApiConfigSourceNames(api_config_source); - if (api_config_source.api_type() != envoy::api::v2::core::ApiConfigSource::GRPC) { - throw EnvoyException(fmt::format("envoy::api::v2::core::ConfigSource type must be GRPC: {}", + if (api_config_source.api_type() != envoy::api::v2::core::ApiConfigSource::GRPC && + api_config_source.api_type() != envoy::api::v2::core::ApiConfigSource::DELTA_GRPC) { + throw EnvoyException(fmt::format("envoy::api::v2::core::ConfigSource type must be gRPC: {}", api_config_source.DebugString())); } diff --git a/source/common/config/utility.h b/source/common/config/utility.h index 60c36db3197cc..4e4861994d456 100644 --- a/source/common/config/utility.h +++ b/source/common/config/utility.h @@ -12,7 +12,6 @@ #include "envoy/server/filter_config.h" #include "envoy/stats/scope.h" #include "envoy/stats/stats_matcher.h" -#include "envoy/stats/stats_options.h" #include "envoy/stats/tag_producer.h" #include "envoy/upstream/cluster_manager.h" @@ -104,6 +103,14 @@ class Utility { static std::chrono::milliseconds apiConfigSourceRequestTimeout(const envoy::api::v2::core::ApiConfigSource& api_config_source); + /** + * Extract initial_fetch_timeout as a std::chrono::milliseconds from + * envoy::api::v2::core::ConfigSource. If request_timeout isn't set in the config source, a + * default value of 0s will be returned. + */ + static std::chrono::milliseconds + configSourceInitialFetchTimeout(const envoy::api::v2::core::ConfigSource& config_source); + /** * Populate an envoy::api::v2::core::ApiConfigSource. * @param cluster supplies the cluster name for the ApiConfigSource. @@ -196,8 +203,7 @@ class Utility { */ static void translateRdsConfig(const Json::Object& json_rds, - envoy::config::filter::network::http_connection_manager::v2::Rds& rds, - const Stats::StatsOptions& stats_options); + envoy::config::filter::network::http_connection_manager::v2::Rds& rds); /** * Convert a v1 LDS JSON config to v2 LDS envoy::api::v2::core::ConfigSource. @@ -230,7 +236,7 @@ class Utility { * @param name string identifier for the particular implementation. Note: this is a proto string * because it is assumed that this value will be pulled directly from the configuration proto. */ - template static Factory& getAndCheckFactory(const ProtobufTypes::String& name) { + template static Factory& getAndCheckFactory(const std::string& name) { if (name.empty()) { throw EnvoyException("Provided name for static registration lookup was empty."); } @@ -281,17 +287,6 @@ class Utility { static Stats::StatsMatcherPtr createStatsMatcher(const envoy::config::bootstrap::v2::Bootstrap& bootstrap); - /** - * Check user supplied name in RDS/CDS/LDS for sanity. - * It should be within the configured length limit. Throws on error. - * @param error_prefix supplies the prefix to use in error messages. - * @param name supplies the name to check for length limits. - * @param stats_options the top-level statsOptions struct, which contains the max stat name / - * suffix lengths for stats. - */ - static void checkObjNameLength(const std::string& error_prefix, const std::string& name, - const Stats::StatsOptions& stats_options); - /** * Obtain gRPC async client factory from a envoy::api::v2::core::ApiConfigSource. * @param async_client_manager gRPC async client manager. diff --git a/source/common/config/well_known_names.cc b/source/common/config/well_known_names.cc index 40d3ed42e4c31..12b6072195ed6 100644 --- a/source/common/config/well_known_names.cc +++ b/source/common/config/well_known_names.cc @@ -106,6 +106,12 @@ TagNameValues::TagNameValues() { // mongo.(.)* addRegex(MONGO_PREFIX, "^mongo\\.((.*?)\\.)"); + + // http.[.]rds.(.) + addRegex(RDS_ROUTE_CONFIG, "^http(?=\\.).*?\\.rds\\.((.*?)\\.)\\w+?$", ".rds."); + + // listener_manager.(worker_.)* + addRegex(WORKER_ID, "^listener_manager\\.((worker_\\d+)\\.)", "listener_manager.worker_"); } void TagNameValues::addRegex(const std::string& name, const std::string& regex, diff --git a/source/common/config/well_known_names.h b/source/common/config/well_known_names.h index c4e2abe61c945..748400b84dd6c 100644 --- a/source/common/config/well_known_names.h +++ b/source/common/config/well_known_names.h @@ -145,6 +145,10 @@ class TagNameValues { const std::string RESPONSE_CODE = "envoy.response_code"; // Request response code class const std::string RESPONSE_CODE_CLASS = "envoy.response_code_class"; + // Route config name for RDS updates + const std::string RDS_ROUTE_CONFIG = "envoy.rds_route_config"; + // Listener manager worker id + const std::string WORKER_ID = "envoy.worker_id"; // Mapping from the names above to their respective regex strings. const std::vector> name_regex_pairs_; diff --git a/source/common/event/BUILD b/source/common/event/BUILD index 00afdf22613e2..478fc28eb4c87 100644 --- a/source/common/event/BUILD +++ b/source/common/event/BUILD @@ -20,6 +20,7 @@ envoy_cc_library( ], deps = [ ":dispatcher_includes", + ":libevent_scheduler_lib", ":real_time_system_lib", "//include/envoy/common:time_interface", "//include/envoy/event:signal_interface", @@ -35,19 +36,21 @@ envoy_cc_library( ) envoy_cc_library( - name = "real_time_system_lib", - srcs = [ - "event_impl_base.cc", - "real_time_system.cc", - "timer_impl.cc", - ], - hdrs = [ - "event_impl_base.h", - "real_time_system.h", - "timer_impl.h", + name = "event_impl_base_lib", + srcs = ["event_impl_base.cc"], + hdrs = ["event_impl_base.h"], + external_deps = [ + "event", ], +) + +envoy_cc_library( + name = "real_time_system_lib", + srcs = ["real_time_system.cc"], + hdrs = ["real_time_system.h"], deps = [ - ":libevent_lib", + ":event_impl_base_lib", + ":timer_lib", "//include/envoy/event:timer_interface", "//source/common/common:utility_lib", "//source/common/event:dispatcher_includes", @@ -63,6 +66,7 @@ envoy_cc_library( ], deps = [ ":libevent_lib", + ":libevent_scheduler_lib", "//include/envoy/api:api_interface", "//include/envoy/event:deferred_deletable", "//include/envoy/event:dispatcher_interface", @@ -86,6 +90,32 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "libevent_scheduler_lib", + srcs = ["libevent_scheduler.cc"], + hdrs = ["libevent_scheduler.h"], + external_deps = ["event"], + deps = [ + ":libevent_lib", + ":timer_lib", + "//include/envoy/event:dispatcher_interface", + "//include/envoy/event:timer_interface", + "//source/common/common:assert_lib", + ], +) + +envoy_cc_library( + name = "timer_lib", + srcs = ["timer_impl.cc"], + hdrs = ["timer_impl.h"], + external_deps = ["event"], + deps = [ + ":event_impl_base_lib", + ":libevent_lib", + "//include/envoy/event:timer_interface", + ], +) + envoy_cc_library( name = "dispatched_thread_lib", srcs = ["dispatched_thread.cc"], diff --git a/source/common/event/dispatched_thread.h b/source/common/event/dispatched_thread.h index 8ea8d3beeb9b3..9dc5db53d80d5 100644 --- a/source/common/event/dispatched_thread.h +++ b/source/common/event/dispatched_thread.h @@ -39,7 +39,7 @@ namespace Event { */ class DispatchedThreadImpl : Logger::Loggable { public: - explicit DispatchedThreadImpl(Api::Api& api) : api_(api), dispatcher_(new DispatcherImpl(api_)) {} + DispatchedThreadImpl(Api::Api& api) : api_(api), dispatcher_(api_.allocateDispatcher()) {} /** * Start the thread. diff --git a/source/common/event/dispatcher_impl.cc b/source/common/event/dispatcher_impl.cc index 9c146ea2f518b..c7d2c03653965 100644 --- a/source/common/event/dispatcher_impl.cc +++ b/source/common/event/dispatcher_impl.cc @@ -14,7 +14,9 @@ #include "common/common/lock_guard.h" #include "common/common/thread.h" #include "common/event/file_event_impl.h" +#include "common/event/libevent_scheduler.h" #include "common/event/signal_impl.h" +#include "common/event/timer_impl.h" #include "common/filesystem/watcher_impl.h" #include "common/network/connection_impl.h" #include "common/network/dns_impl.h" @@ -26,23 +28,30 @@ namespace Envoy { namespace Event { -DispatcherImpl::DispatcherImpl(Api::Api& api) - : DispatcherImpl(Buffer::WatermarkFactoryPtr{new Buffer::WatermarkBufferFactory}, api) { - // The dispatcher won't work as expected if libevent hasn't been configured to use threads. - RELEASE_ASSERT(Libevent::Global::initialized(), ""); -} +DispatcherImpl::DispatcherImpl(Api::Api& api, Event::TimeSystem& time_system) + : DispatcherImpl(std::make_unique(), api, time_system) {} -DispatcherImpl::DispatcherImpl(Buffer::WatermarkFactoryPtr&& factory, Api::Api& api) - : api_(api), buffer_factory_(std::move(factory)), base_(event_base_new()), - scheduler_(api.timeSystem().createScheduler(base_)), +DispatcherImpl::DispatcherImpl(Buffer::WatermarkFactoryPtr&& factory, Api::Api& api, + Event::TimeSystem& time_system) + : api_(api), buffer_factory_(std::move(factory)), + scheduler_(time_system.createScheduler(base_scheduler_)), deferred_delete_timer_(createTimer([this]() -> void { clearDeferredDeleteList(); })), post_timer_(createTimer([this]() -> void { runPostCallbacks(); })), - current_to_delete_(&to_delete_1_) { - RELEASE_ASSERT(Libevent::Global::initialized(), ""); -} + current_to_delete_(&to_delete_1_) {} DispatcherImpl::~DispatcherImpl() {} +void DispatcherImpl::initializeStats(Stats::Scope& scope, const std::string& prefix) { + // This needs to be run in the dispatcher's thread, so that we have a thread id to log. + post([this, &scope, prefix] { + stats_prefix_ = prefix + "dispatcher"; + stats_ = std::make_unique( + DispatcherStats{ALL_DISPATCHER_STATS(POOL_HISTOGRAM_PREFIX(scope, stats_prefix_ + "."))}); + base_scheduler_.initializeStats(stats_.get()); + ENVOY_LOG(debug, "running {} on thread {}", stats_prefix_, run_tid_->debugString()); + }); +} + void DispatcherImpl::clearDeferredDeleteList() { ASSERT(isThreadSafe()); std::vector* to_delete = current_to_delete_; @@ -138,7 +147,7 @@ void DispatcherImpl::deferredDelete(DeferredDeletablePtr&& to_delete) { } } -void DispatcherImpl::exit() { event_base_loopexit(base_.get(), nullptr); } +void DispatcherImpl::exit() { base_scheduler_.loopExit(); } SignalEventPtr DispatcherImpl::listenForSignal(int signal_num, SignalCb cb) { ASSERT(isThreadSafe()); @@ -166,8 +175,7 @@ void DispatcherImpl::run(RunType type) { // not guarantee that events are run in any particular order. So even if we post() and call // event_base_once() before some other event, the other event might get called first. runPostCallbacks(); - - event_base_loop(base_.get(), type == RunType::NonBlock ? EVLOOP_NONBLOCK : 0); + base_scheduler_.run(type); } void DispatcherImpl::runPostCallbacks() { diff --git a/source/common/event/dispatcher_impl.h b/source/common/event/dispatcher_impl.h index 99306ced0502b..b712f22d879e4 100644 --- a/source/common/event/dispatcher_impl.h +++ b/source/common/event/dispatcher_impl.h @@ -3,6 +3,7 @@ #include #include #include +#include #include #include "envoy/api/api.h" @@ -10,10 +11,12 @@ #include "envoy/event/deferred_deletable.h" #include "envoy/event/dispatcher.h" #include "envoy/network/connection_handler.h" +#include "envoy/stats/scope.h" #include "common/common/logger.h" #include "common/common/thread.h" #include "common/event/libevent.h" +#include "common/event/libevent_scheduler.h" namespace Envoy { namespace Event { @@ -23,17 +26,19 @@ namespace Event { */ class DispatcherImpl : Logger::Loggable, public Dispatcher { public: - explicit DispatcherImpl(Api::Api& api); - DispatcherImpl(Buffer::WatermarkFactoryPtr&& factory, Api::Api& api); + DispatcherImpl(Api::Api& api, Event::TimeSystem& time_system); + DispatcherImpl(Buffer::WatermarkFactoryPtr&& factory, Api::Api& api, + Event::TimeSystem& time_system); ~DispatcherImpl(); /** * @return event_base& the libevent base. */ - event_base& base() { return *base_; } + event_base& base() { return base_scheduler_.base(); } // Event::Dispatcher - TimeSystem& timeSystem() override { return api_.timeSystem(); } + TimeSource& timeSource() override { return api_.timeSource(); } + void initializeStats(Stats::Scope& scope, const std::string& prefix) override; void clearDeferredDeleteList() override; Network::ConnectionPtr createServerConnection(Network::ConnectionSocketPtr&& socket, @@ -70,9 +75,11 @@ class DispatcherImpl : Logger::Loggable, public Dispatcher { bool isThreadSafe() const { return run_tid_ == nullptr || run_tid_->isCurrentThreadId(); } Api::Api& api_; + std::string stats_prefix_; + std::unique_ptr stats_; Thread::ThreadIdPtr run_tid_; Buffer::WatermarkFactoryPtr buffer_factory_; - Libevent::BasePtr base_; + LibeventScheduler base_scheduler_; SchedulerPtr scheduler_; TimerPtr deferred_delete_timer_; TimerPtr post_timer_; diff --git a/source/common/event/file_event_impl.cc b/source/common/event/file_event_impl.cc index bc1cf9ad5ab4b..feee927132ee0 100644 --- a/source/common/event/file_event_impl.cc +++ b/source/common/event/file_event_impl.cc @@ -13,6 +13,10 @@ namespace Event { FileEventImpl::FileEventImpl(DispatcherImpl& dispatcher, int fd, FileReadyCb cb, FileTriggerType trigger, uint32_t events) : cb_(cb), base_(&dispatcher.base()), fd_(fd), trigger_(trigger) { +#ifdef WIN32 + RELEASE_ASSERT(trigger_ == FileTriggerType::Level, + "libevent does not support edge triggers on Windows"); +#endif assignEvents(events); event_add(&raw_event_, nullptr); } @@ -36,30 +40,31 @@ void FileEventImpl::activate(uint32_t events) { } void FileEventImpl::assignEvents(uint32_t events) { - event_assign(&raw_event_, base_, fd_, - EV_PERSIST | (trigger_ == FileTriggerType::Level ? 0 : EV_ET) | - (events & FileReadyType::Read ? EV_READ : 0) | - (events & FileReadyType::Write ? EV_WRITE : 0) | - (events & FileReadyType::Closed ? EV_CLOSED : 0), - [](evutil_socket_t, short what, void* arg) -> void { - FileEventImpl* event = static_cast(arg); - uint32_t events = 0; - if (what & EV_READ) { - events |= FileReadyType::Read; - } + event_assign( + &raw_event_, base_, fd_, + EV_PERSIST | (trigger_ == FileTriggerType::Level ? 0 : EV_ET) | + (events & FileReadyType::Read ? EV_READ : 0) | + (events & FileReadyType::Write ? EV_WRITE : 0) | + (events & FileReadyType::Closed ? EV_CLOSED : 0), + [](evutil_socket_t, short what, void* arg) -> void { + FileEventImpl* event = static_cast(arg); + uint32_t events = 0; + if (what & EV_READ) { + events |= FileReadyType::Read; + } - if (what & EV_WRITE) { - events |= FileReadyType::Write; - } + if (what & EV_WRITE) { + events |= FileReadyType::Write; + } - if (what & EV_CLOSED) { - events |= FileReadyType::Closed; - } + if (what & EV_CLOSED) { + events |= FileReadyType::Closed; + } - ASSERT(events); - event->cb_(events); - }, - this); + ASSERT(events); + event->cb_(events); + }, + this); } void FileEventImpl::setEnabled(uint32_t events) { diff --git a/source/common/event/libevent.cc b/source/common/event/libevent.cc index a81fbeb82adc2..bf894858898b5 100644 --- a/source/common/event/libevent.cc +++ b/source/common/event/libevent.cc @@ -13,10 +13,14 @@ namespace Libevent { bool Global::initialized_ = false; void Global::initialize() { +#ifdef WIN32 + evthread_use_windows_threads(); +#else evthread_use_pthreads(); // Ignore SIGPIPE and allow errors to propagate through error codes. signal(SIGPIPE, SIG_IGN); +#endif initialized_ = true; } diff --git a/source/common/event/libevent_scheduler.cc b/source/common/event/libevent_scheduler.cc new file mode 100644 index 0000000000000..df22b45ba7371 --- /dev/null +++ b/source/common/event/libevent_scheduler.cc @@ -0,0 +1,103 @@ +#include "common/event/libevent_scheduler.h" + +#include "common/common/assert.h" +#include "common/event/timer_impl.h" + +#include "event2/util.h" + +namespace Envoy { +namespace Event { + +namespace { +void recordTimeval(Stats::Histogram& histogram, const timeval& tv) { + histogram.recordValue(tv.tv_sec * 1000000 + tv.tv_usec); +} +} // namespace + +LibeventScheduler::LibeventScheduler() : libevent_(event_base_new()) { + // The dispatcher won't work as expected if libevent hasn't been configured to use threads. + RELEASE_ASSERT(Libevent::Global::initialized(), ""); +} + +TimerPtr LibeventScheduler::createTimer(const TimerCb& cb) { + return std::make_unique(libevent_, cb); +}; + +void LibeventScheduler::run(Dispatcher::RunType mode) { + int flag = 0; + switch (mode) { + case Dispatcher::RunType::NonBlock: + flag = EVLOOP_NONBLOCK; +#ifdef WIN32 + // On Windows, EVLOOP_NONBLOCK will cause the libevent event_base_loop to run forever. + // This is because libevent only supports level triggering on Windows, and so the write + // event callbacks will trigger every time through the loop. Adding EVLOOP_ONCE ensures the + // loop will run at most once + flag |= EVLOOP_NONBLOCK | EVLOOP_ONCE; +#endif + break; + case Dispatcher::RunType::Block: + // The default flags have 'block' behavior. See + // http://www.wangafu.net/~nickm/libevent-book/Ref3_eventloop.html + break; + case Dispatcher::RunType::RunUntilExit: + flag = EVLOOP_NO_EXIT_ON_EMPTY; + break; + } + event_base_loop(libevent_.get(), flag); +} + +void LibeventScheduler::loopExit() { event_base_loopexit(libevent_.get(), nullptr); } + +void LibeventScheduler::initializeStats(DispatcherStats* stats) { + stats_ = stats; + // These are thread safe. + evwatch_prepare_new(libevent_.get(), &onPrepare, this); + evwatch_check_new(libevent_.get(), &onCheck, this); +} + +void LibeventScheduler::onPrepare(evwatch*, const evwatch_prepare_cb_info* info, void* arg) { + // `self` is `this`, passed in from evwatch_prepare_new. + auto self = static_cast(arg); + + // Record poll timeout and prepare time for this iteration of the event loop. The timeout is the + // expected polling duration, whereas the actual polling duration will be the difference measured + // between the prepare time and the check time immediately after polling. These are compared in + // onCheck to compute the poll_delay stat. + self->timeout_set_ = evwatch_prepare_get_timeout(info, &self->timeout_); + evutil_gettimeofday(&self->prepare_time_, nullptr); + + // If we have a check time available from a previous iteration of the event loop (that is, all but + // the first), compute the loop_duration stat. + if (self->check_time_.tv_sec != 0) { + timeval delta; + evutil_timersub(&self->prepare_time_, &self->check_time_, &delta); + recordTimeval(self->stats_->loop_duration_us_, delta); + } +} + +void LibeventScheduler::onCheck(evwatch*, const evwatch_check_cb_info*, void* arg) { + // `self` is `this`, passed in from evwatch_check_new. + auto self = static_cast(arg); + + // Record check time for this iteration of the event loop. Use this together with prepare time + // from above to compute the actual polling duration, and store it for the next iteration of the + // event loop to compute the loop duration. + evutil_gettimeofday(&self->check_time_, nullptr); + if (self->timeout_set_) { + timeval delta, delay; + evutil_timersub(&self->check_time_, &self->prepare_time_, &delta); + evutil_timersub(&delta, &self->timeout_, &delay); + + // Delay can be negative, meaning polling completed early. This happens in normal operation, + // either because I/O was ready before we hit the timeout, or just because the kernel was + // feeling saucy. Disregard negative delays in stats, since they don't indicate anything + // particularly useful. + if (delay.tv_sec >= 0) { + recordTimeval(self->stats_->poll_delay_us_, delay); + } + } +} + +} // namespace Event +} // namespace Envoy diff --git a/source/common/event/libevent_scheduler.h b/source/common/event/libevent_scheduler.h new file mode 100644 index 0000000000000..b9157bf4059b5 --- /dev/null +++ b/source/common/event/libevent_scheduler.h @@ -0,0 +1,63 @@ +#pragma once + +#include "envoy/event/dispatcher.h" +#include "envoy/event/timer.h" + +#include "common/event/libevent.h" + +#include "event2/event.h" +#include "event2/watch.h" + +namespace Envoy { +namespace Event { + +// Implements Scheduler based on libevent. +class LibeventScheduler : public Scheduler { +public: + LibeventScheduler(); + + // Scheduler + TimerPtr createTimer(const TimerCb& cb) override; + + /** + * Runs the event loop. + * + * @param mode The mode in which to run the event loop. + */ + void run(Dispatcher::RunType mode); + + /** + * Exits the libevent loop. + */ + void loopExit(); + + /** + * TODO(jmarantz): consider strengthening this abstraction and instead of + * exposing the libevent base pointer, provide API abstractions for the calls + * into it. Among other benefits this might make it more tractable to someday + * consider an alternative to libevent if the need arises. + * + * @return the underlying libevent structure. + */ + event_base& base() { return *libevent_; } + + /** + * Start writing stats once thread-local storage is ready to receive them (see + * ThreadLocalStoreImpl::initializeThreading). + */ + void initializeStats(DispatcherStats* stats_); + +private: + static void onPrepare(evwatch*, const evwatch_prepare_cb_info* info, void* arg); + static void onCheck(evwatch*, const evwatch_check_cb_info*, void* arg); + + Libevent::BasePtr libevent_; + DispatcherStats* stats_{}; // stats owned by the containing DispatcherImpl + bool timeout_set_{}; // whether there is a poll timeout in the current event loop iteration + timeval timeout_{}; // the poll timeout for the current event loop iteration, if available + timeval prepare_time_{}; // timestamp immediately before polling + timeval check_time_{}; // timestamp immediately after polling +}; + +} // namespace Event +} // namespace Envoy diff --git a/source/common/event/real_time_system.cc b/source/common/event/real_time_system.cc index cc20698bd07de..9621611c2df05 100644 --- a/source/common/event/real_time_system.cc +++ b/source/common/event/real_time_system.cc @@ -3,30 +3,25 @@ #include #include "common/common/assert.h" -#include "common/event/event_impl_base.h" #include "common/event/timer_impl.h" -#include "event2/event.h" - namespace Envoy { namespace Event { namespace { class RealScheduler : public Scheduler { public: - RealScheduler(Libevent::BasePtr& libevent) : libevent_(libevent) {} - TimerPtr createTimer(const TimerCb& cb) override { - return std::make_unique(libevent_, cb); - }; + RealScheduler(Scheduler& base_scheduler) : base_scheduler_(base_scheduler) {} + TimerPtr createTimer(const TimerCb& cb) override { return base_scheduler_.createTimer(cb); }; private: - Libevent::BasePtr& libevent_; + Scheduler& base_scheduler_; }; } // namespace -SchedulerPtr RealTimeSystem::createScheduler(Libevent::BasePtr& libevent) { - return std::make_unique(libevent); +SchedulerPtr RealTimeSystem::createScheduler(Scheduler& base_scheduler) { + return std::make_unique(base_scheduler); } } // namespace Event diff --git a/source/common/event/real_time_system.h b/source/common/event/real_time_system.h index 9ae68c80f56b7..5323da8bfac37 100644 --- a/source/common/event/real_time_system.h +++ b/source/common/event/real_time_system.h @@ -13,7 +13,7 @@ namespace Event { class RealTimeSystem : public TimeSystem { public: // TimeSystem - SchedulerPtr createScheduler(Libevent::BasePtr&) override; + SchedulerPtr createScheduler(Scheduler&) override; // TimeSource SystemTime systemTime() override { return time_source_.systemTime(); } diff --git a/source/common/event/timer_impl.cc b/source/common/event/timer_impl.cc index 5f5a9d8caab33..4c2a8c0ca4bcb 100644 --- a/source/common/event/timer_impl.cc +++ b/source/common/event/timer_impl.cc @@ -31,5 +31,7 @@ void TimerImpl::enableTimer(const std::chrono::milliseconds& d) { } } +bool TimerImpl::enabled() { return 0 != evtimer_pending(&raw_event_, nullptr); } + } // namespace Event } // namespace Envoy diff --git a/source/common/event/timer_impl.h b/source/common/event/timer_impl.h index 25b3ba513101a..2572b7c1ed19c 100644 --- a/source/common/event/timer_impl.h +++ b/source/common/event/timer_impl.h @@ -5,6 +5,7 @@ #include "envoy/event/timer.h" #include "common/event/event_impl_base.h" +#include "common/event/libevent.h" namespace Envoy { namespace Event { @@ -19,6 +20,7 @@ class TimerImpl : public Timer, ImplBase { // Timer void disableTimer() override; void enableTimer(const std::chrono::milliseconds& d) override; + bool enabled() override; private: TimerCb cb_; diff --git a/source/common/filesystem/BUILD b/source/common/filesystem/BUILD index f4f0d62e7d0b7..f88f9d6fa2e37 100644 --- a/source/common/filesystem/BUILD +++ b/source/common/filesystem/BUILD @@ -40,23 +40,43 @@ envoy_cc_posix_library( envoy_cc_library( name = "filesystem_lib", - srcs = ["filesystem_impl.cc"], - hdrs = ["filesystem_impl.h"], + deps = envoy_cc_platform_dep("filesystem_impl_lib"), +) + +envoy_cc_posix_library( + name = "filesystem_impl_lib", + srcs = ["posix/filesystem_impl.cc"], + hdrs = ["posix/filesystem_impl.h"], + strip_include_prefix = "posix", + deps = [ + ":file_shared_lib", + ], +) + +envoy_cc_win32_library( + name = "filesystem_impl_lib", + srcs = ["win32/filesystem_impl.cc"], + hdrs = ["win32/filesystem_impl.h"], + strip_include_prefix = "win32", + deps = [ + ":file_shared_lib", + ], +) + +envoy_cc_library( + name = "file_shared_lib", + srcs = ["file_shared_impl.cc"], + hdrs = ["file_shared_impl.h"], deps = [ - "//include/envoy/api:api_interface", - "//include/envoy/api:os_sys_calls_interface", - "//include/envoy/event:dispatcher_interface", "//include/envoy/filesystem:filesystem_interface", - "//source/common/api:os_sys_calls_lib", - "//source/common/buffer:buffer_lib", - "//source/common/common:thread_lib", + "//source/common/common:assert_lib", ], ) envoy_cc_library( name = "watcher_lib", srcs = select({ - "@bazel_tools//tools/osx:darwin": [ + "//bazel:apple": [ "kqueue/watcher_impl.cc", ], "//conditions:default": [ @@ -64,7 +84,7 @@ envoy_cc_library( ], }), hdrs = select({ - "@bazel_tools//tools/osx:darwin": [ + "//bazel:apple": [ "kqueue/watcher_impl.h", ], "//conditions:default": [ @@ -75,7 +95,7 @@ envoy_cc_library( "event", ], strip_include_prefix = select({ - "@bazel_tools//tools/osx:darwin": "kqueue", + "//bazel:apple": "kqueue", "//conditions:default": "inotify", }), deps = [ diff --git a/source/common/filesystem/file_shared_impl.cc b/source/common/filesystem/file_shared_impl.cc new file mode 100644 index 0000000000000..5e235c534e639 --- /dev/null +++ b/source/common/filesystem/file_shared_impl.cc @@ -0,0 +1,39 @@ +#include "common/filesystem/file_shared_impl.h" + +#include + +namespace Envoy { +namespace Filesystem { + +Api::IoError::IoErrorCode IoFileError::getErrorCode() const { return IoErrorCode::UnknownError; } + +std::string IoFileError::getErrorDetails() const { return ::strerror(errno_); } + +Api::IoCallBoolResult FileSharedImpl::open() { + if (isOpen()) { + return resultSuccess(true); + } + + openFile(); + return fd_ != -1 ? resultSuccess(true) : resultFailure(false, errno); +} + +Api::IoCallSizeResult FileSharedImpl::write(absl::string_view buffer) { + const ssize_t rc = writeFile(buffer); + return rc != -1 ? resultSuccess(rc) : resultFailure(rc, errno); +}; + +Api::IoCallBoolResult FileSharedImpl::close() { + ASSERT(isOpen()); + + bool success = closeFile(); + fd_ = -1; + return success ? resultSuccess(true) : resultFailure(false, errno); +} + +bool FileSharedImpl::isOpen() const { return fd_ != -1; }; + +std::string FileSharedImpl::path() const { return path_; }; + +} // namespace Filesystem +} // namespace Envoy \ No newline at end of file diff --git a/source/common/filesystem/file_shared_impl.h b/source/common/filesystem/file_shared_impl.h new file mode 100644 index 0000000000000..d018ed0314ae9 --- /dev/null +++ b/source/common/filesystem/file_shared_impl.h @@ -0,0 +1,61 @@ +#pragma once + +#include + +#include "envoy/filesystem/filesystem.h" + +#include "common/common/assert.h" + +namespace Envoy { +namespace Filesystem { + +class IoFileError : public Api::IoError { +public: + explicit IoFileError(int sys_errno) : errno_(sys_errno) {} + + ~IoFileError() override {} + + Api::IoError::IoErrorCode getErrorCode() const override; + std::string getErrorDetails() const override; + +private: + const int errno_; +}; + +using IoFileErrorPtr = std::unique_ptr; + +template Api::IoCallResult resultFailure(T result, int sys_errno) { + return {result, IoFileErrorPtr(new IoFileError(sys_errno), [](Api::IoError* err) { + ASSERT(err != nullptr); + delete err; + })}; +} + +template Api::IoCallResult resultSuccess(T result) { + return {result, IoFileErrorPtr(nullptr, [](Api::IoError*) { NOT_REACHED_GCOVR_EXCL_LINE; })}; +} + +class FileSharedImpl : public File { +public: + FileSharedImpl(const std::string& path) : fd_(-1), path_(path) {} + + virtual ~FileSharedImpl() {} + + // Filesystem::File + Api::IoCallBoolResult open() override; + Api::IoCallSizeResult write(absl::string_view buffer) override; + Api::IoCallBoolResult close() override; + bool isOpen() const override; + std::string path() const override; + +protected: + virtual void openFile() PURE; + virtual ssize_t writeFile(absl::string_view buffer) PURE; + virtual bool closeFile() PURE; + + int fd_; + const std::string path_; +}; + +} // namespace Filesystem +} // namespace Envoy \ No newline at end of file diff --git a/source/common/filesystem/filesystem_impl.cc b/source/common/filesystem/filesystem_impl.cc deleted file mode 100644 index 1fd3ce8a455cd..0000000000000 --- a/source/common/filesystem/filesystem_impl.cc +++ /dev/null @@ -1,282 +0,0 @@ -#include "common/filesystem/filesystem_impl.h" - -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "envoy/common/exception.h" -#include "envoy/common/time.h" -#include "envoy/event/dispatcher.h" -#include "envoy/thread/thread.h" - -#include "common/api/os_sys_calls_impl.h" -#include "common/common/assert.h" -#include "common/common/fmt.h" -#include "common/common/lock_guard.h" -#include "common/common/stack_array.h" - -#include "absl/strings/match.h" - -namespace Envoy { -namespace Filesystem { - -InstanceImpl::InstanceImpl(std::chrono::milliseconds file_flush_interval_msec, - Thread::ThreadFactory& thread_factory, Stats::Store& stats_store) - : file_flush_interval_msec_(file_flush_interval_msec), - file_stats_{FILESYSTEM_STATS(POOL_COUNTER_PREFIX(stats_store, "filesystem."), - POOL_GAUGE_PREFIX(stats_store, "filesystem."))}, - thread_factory_(thread_factory) {} - -FileSharedPtr InstanceImpl::createFile(const std::string& path, Event::Dispatcher& dispatcher, - Thread::BasicLockable& lock, - std::chrono::milliseconds file_flush_interval_msec) { - return std::make_shared(path, dispatcher, lock, file_stats_, - file_flush_interval_msec, thread_factory_); -}; - -FileSharedPtr InstanceImpl::createFile(const std::string& path, Event::Dispatcher& dispatcher, - Thread::BasicLockable& lock) { - return createFile(path, dispatcher, lock, file_flush_interval_msec_); -} - -bool InstanceImpl::fileExists(const std::string& path) { - std::ifstream input_file(path); - return input_file.is_open(); -} - -bool InstanceImpl::directoryExists(const std::string& path) { - DIR* const dir = ::opendir(path.c_str()); - const bool dir_exists = nullptr != dir; - if (dir_exists) { - ::closedir(dir); - } - - return dir_exists; -} - -ssize_t InstanceImpl::fileSize(const std::string& path) { - struct stat info; - if (::stat(path.c_str(), &info) != 0) { - return -1; - } - return info.st_size; -} - -std::string InstanceImpl::fileReadToEnd(const std::string& path) { - if (illegalPath(path)) { - throw EnvoyException(fmt::format("Invalid path: {}", path)); - } - - std::ios::sync_with_stdio(false); - - std::ifstream file(path); - if (!file) { - throw EnvoyException(fmt::format("unable to read file: {}", path)); - } - - std::stringstream file_string; - file_string << file.rdbuf(); - - return file_string.str(); -} - -Api::SysCallStringResult InstanceImpl::canonicalPath(const std::string& path) { - // TODO(htuch): When we are using C++17, switch to std::filesystem::canonical. - char* resolved_path = ::realpath(path.c_str(), nullptr); - if (resolved_path == nullptr) { - return {std::string(), errno}; - } - std::string resolved_path_string{resolved_path}; - ::free(resolved_path); - return {resolved_path_string, 0}; -} - -bool InstanceImpl::illegalPath(const std::string& path) { - const Api::SysCallStringResult canonical_path = canonicalPath(path); - if (canonical_path.rc_.empty()) { - ENVOY_LOG_MISC(debug, "Unable to determine canonical path for {}: {}", path, - ::strerror(canonical_path.errno_)); - return true; - } - - // Platform specific path sanity; we provide a convenience to avoid Envoy - // instances poking in bad places. We may have to consider conditioning on - // platform in the future, growing these or relaxing some constraints (e.g. - // there are valid reasons to go via /proc for file paths). - // TODO(htuch): Optimize this as a hash lookup if we grow any further. - if (absl::StartsWith(canonical_path.rc_, "/dev") || - absl::StartsWith(canonical_path.rc_, "/sys") || - absl::StartsWith(canonical_path.rc_, "/proc")) { - return true; - } - return false; -} - -FileImpl::FileImpl(const std::string& path, Event::Dispatcher& dispatcher, - Thread::BasicLockable& lock, FileSystemStats& stats, - std::chrono::milliseconds flush_interval_msec, - Thread::ThreadFactory& thread_factory) - : path_(path), file_lock_(lock), flush_timer_(dispatcher.createTimer([this]() -> void { - stats_.flushed_by_timer_.inc(); - flush_event_.notifyOne(); - flush_timer_->enableTimer(flush_interval_msec_); - })), - os_sys_calls_(Api::OsSysCallsSingleton::get()), thread_factory_(thread_factory), - flush_interval_msec_(flush_interval_msec), stats_(stats) { - open(); -} - -void FileImpl::open() { - Api::SysCallIntResult result = - os_sys_calls_.open(path_, O_RDWR | O_APPEND | O_CREAT, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); - fd_ = result.rc_; - if (-1 == fd_) { - throw EnvoyException( - fmt::format("unable to open file '{}': {}", path_, strerror(result.errno_))); - } -} - -void FileImpl::reopen() { reopen_file_ = true; } - -FileImpl::~FileImpl() { - { - Thread::LockGuard lock(write_lock_); - flush_thread_exit_ = true; - flush_event_.notifyOne(); - } - - if (flush_thread_ != nullptr) { - flush_thread_->join(); - } - - // Flush any remaining data. If file was not opened for some reason, skip flushing part. - if (fd_ != -1) { - if (flush_buffer_.length() > 0) { - doWrite(flush_buffer_); - } - - os_sys_calls_.close(fd_); - } -} - -void FileImpl::doWrite(Buffer::Instance& buffer) { - uint64_t num_slices = buffer.getRawSlices(nullptr, 0); - STACK_ARRAY(slices, Buffer::RawSlice, num_slices); - buffer.getRawSlices(slices.begin(), num_slices); - - // We must do the actual writes to disk under lock, so that we don't intermix chunks from - // different FileImpl pointing to the same underlying file. This can happen either via hot - // restart or if calling code opens the same underlying file into a different FileImpl in the - // same process. - // TODO PERF: Currently, we use a single cross process lock to serialize all disk writes. This - // will never block network workers, but does mean that only a single flush thread can - // actually flush to disk. In the future it would be nice if we did away with the cross - // process lock or had multiple locks. - { - Thread::LockGuard lock(file_lock_); - for (const Buffer::RawSlice& slice : slices) { - const Api::SysCallSizeResult result = os_sys_calls_.write(fd_, slice.mem_, slice.len_); - ASSERT(result.rc_ == static_cast(slice.len_)); - stats_.write_completed_.inc(); - } - } - - stats_.write_total_buffered_.sub(buffer.length()); - buffer.drain(buffer.length()); -} - -void FileImpl::flushThreadFunc() { - - while (true) { - std::unique_lock flush_lock; - - { - Thread::LockGuard write_lock(write_lock_); - - // flush_event_ can be woken up either by large enough flush_buffer or by timer. - // In case it was timer, flush_buffer_ can be empty. - while (flush_buffer_.length() == 0 && !flush_thread_exit_) { - // CondVar::wait() does not throw, so it's safe to pass the mutex rather than the guard. - flush_event_.wait(write_lock_); - } - - if (flush_thread_exit_) { - return; - } - - flush_lock = std::unique_lock(flush_lock_); - ASSERT(flush_buffer_.length() > 0); - about_to_write_buffer_.move(flush_buffer_); - ASSERT(flush_buffer_.length() == 0); - } - - // if we failed to open file before (-1 == fd_), then simply ignore - if (fd_ != -1) { - try { - if (reopen_file_) { - reopen_file_ = false; - os_sys_calls_.close(fd_); - open(); - } - - doWrite(about_to_write_buffer_); - } catch (const EnvoyException&) { - stats_.reopen_failed_.inc(); - } - } - } -} - -void FileImpl::flush() { - std::unique_lock flush_buffer_lock; - - { - Thread::LockGuard write_lock(write_lock_); - - // flush_lock_ must be held while checking this or else it is - // possible that flushThreadFunc() has already moved data from - // flush_buffer_ to about_to_write_buffer_, has unlocked write_lock_, - // but has not yet completed doWrite(). This would allow flush() to - // return before the pending data has actually been written to disk. - flush_buffer_lock = std::unique_lock(flush_lock_); - - if (flush_buffer_.length() == 0) { - return; - } - - about_to_write_buffer_.move(flush_buffer_); - ASSERT(flush_buffer_.length() == 0); - } - - doWrite(about_to_write_buffer_); -} - -void FileImpl::write(absl::string_view data) { - Thread::LockGuard lock(write_lock_); - - if (flush_thread_ == nullptr) { - createFlushStructures(); - } - - stats_.write_buffered_.inc(); - stats_.write_total_buffered_.add(data.length()); - flush_buffer_.add(data.data(), data.size()); - if (flush_buffer_.length() > MIN_FLUSH_SIZE) { - flush_event_.notifyOne(); - } -} - -void FileImpl::createFlushStructures() { - flush_thread_ = thread_factory_.createThread([this]() -> void { flushThreadFunc(); }); - flush_timer_->enableTimer(flush_interval_msec_); -} - -} // namespace Filesystem -} // namespace Envoy diff --git a/source/common/filesystem/filesystem_impl.h b/source/common/filesystem/filesystem_impl.h deleted file mode 100644 index a28317bbfa2d5..0000000000000 --- a/source/common/filesystem/filesystem_impl.h +++ /dev/null @@ -1,145 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include - -#include "envoy/api/api.h" -#include "envoy/api/os_sys_calls.h" -#include "envoy/event/dispatcher.h" -#include "envoy/filesystem/filesystem.h" -#include "envoy/stats/stats_macros.h" -#include "envoy/stats/store.h" - -#include "common/buffer/buffer_impl.h" -#include "common/common/thread.h" - -namespace Envoy { -// clang-format off -#define FILESYSTEM_STATS(COUNTER, GAUGE) \ - COUNTER(write_buffered) \ - COUNTER(write_completed) \ - COUNTER(flushed_by_timer) \ - COUNTER(reopen_failed) \ - GAUGE (write_total_buffered) -// clang-format on - -struct FileSystemStats { - FILESYSTEM_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT) -}; - -namespace Filesystem { - -/** - * Captures state, properties, and stats of a file-system. - */ -class InstanceImpl : public Instance { -public: - InstanceImpl(std::chrono::milliseconds file_flush_interval_msec, - Thread::ThreadFactory& thread_factory, Stats::Store& store); - - // Filesystem::Instance - FileSharedPtr createFile(const std::string& path, Event::Dispatcher& dispatcher, - Thread::BasicLockable& lock, - std::chrono::milliseconds file_flush_interval_msec) override; - FileSharedPtr createFile(const std::string& path, Event::Dispatcher& dispatcher, - Thread::BasicLockable& lock) override; - bool fileExists(const std::string& path) override; - bool directoryExists(const std::string& path) override; - ssize_t fileSize(const std::string& path) override; - std::string fileReadToEnd(const std::string& path) override; - Api::SysCallStringResult canonicalPath(const std::string& path) override; - bool illegalPath(const std::string& path) override; - -private: - const std::chrono::milliseconds file_flush_interval_msec_; - FileSystemStats file_stats_; - Thread::ThreadFactory& thread_factory_; -}; - -/** - * This is a file implementation geared for writing out access logs. It turn out that in certain - * cases even if a standard file is opened with O_NONBLOCK, the kernel can still block when writing. - * This implementation uses a flush thread per file, with the idea there there aren't that many - * files. If this turns out to be a good implementation we can potentially have a single flush - * thread that flushes all files, but we will start with this. - */ -class FileImpl : public File { -public: - FileImpl(const std::string& path, Event::Dispatcher& dispatcher, Thread::BasicLockable& lock, - FileSystemStats& stats_, std::chrono::milliseconds flush_interval_msec, - Thread::ThreadFactory& thread_factory); - ~FileImpl(); - - // Filesystem::File - void write(absl::string_view data) override; - - /** - * Filesystem::File - * Reopen file asynchronously. - * This only sets reopen flag, actual reopen operation is delayed. - * Reopen happens before the next write operation. - */ - void reopen() override; - - // Filesystem::File - void flush() override; - -private: - void doWrite(Buffer::Instance& buffer); - void flushThreadFunc(); - void open(); - void createFlushStructures(); - - // Minimum size before the flush thread will be told to flush. - static const uint64_t MIN_FLUSH_SIZE = 1024 * 64; - - int fd_; - std::string path_; - - // These locks are always acquired in the following order if multiple locks are held: - // 1) write_lock_ - // 2) flush_lock_ - // 3) file_lock_ - Thread::BasicLockable& file_lock_; // This lock is used only by the flush thread when writing - // to disk. This is used to make sure that file blocks do - // not get interleaved by multiple processes writing to - // the same file during hot-restart. - Thread::MutexBasicLockable flush_lock_; // This lock is used to prevent simultaneous flushes from - // the flush thread and a synchronous flush. This protects - // concurrent access to the about_to_write_buffer_, fd_, - // and all other data used during flushing and file - // re-opening. - Thread::MutexBasicLockable - write_lock_; // The lock is used when filling the flush buffer. It allows - // multiple threads to write to the same file at relatively - // high performance. It is always local to the process. - Thread::ThreadPtr flush_thread_; - Thread::CondVar flush_event_; - std::atomic flush_thread_exit_{}; - std::atomic reopen_file_{}; - Buffer::OwnedImpl - flush_buffer_ GUARDED_BY(write_lock_); // This buffer is used by multiple threads. It gets - // filled and then flushed either when max size is - // reached or when a timer fires. - // TODO(jmarantz): this should be GUARDED_BY(flush_lock_) but the analysis cannot poke through - // the std::make_unique assignment. I do not believe it's possible to annotate this properly now - // due to limitations in the clang thread annotation analysis. - Buffer::OwnedImpl about_to_write_buffer_; // This buffer is used only by the flush thread. Data - // is moved from flush_buffer_ under lock, and then - // the lock is released so that flush_buffer_ can - // continue to fill. This buffer is then used for the - // final write to disk. - Event::TimerPtr flush_timer_; - Api::OsSysCalls& os_sys_calls_; - Thread::ThreadFactory& thread_factory_; - const std::chrono::milliseconds flush_interval_msec_; // Time interval buffer gets flushed no - // matter if it reached the MIN_FLUSH_SIZE - // or not. - FileSystemStats& stats_; -}; - -} // namespace Filesystem -} // namespace Envoy diff --git a/source/common/filesystem/inotify/watcher_impl.cc b/source/common/filesystem/inotify/watcher_impl.cc index f61ac242b5182..a8956d348d9a7 100644 --- a/source/common/filesystem/inotify/watcher_impl.cc +++ b/source/common/filesystem/inotify/watcher_impl.cc @@ -17,13 +17,13 @@ namespace Filesystem { WatcherImpl::WatcherImpl(Event::Dispatcher& dispatcher) : inotify_fd_(inotify_init1(IN_NONBLOCK)), - inotify_event_(dispatcher.createFileEvent(inotify_fd_, - [this](uint32_t events) -> void { - ASSERT(events == Event::FileReadyType::Read); - onInotifyEvent(); - }, - Event::FileTriggerType::Edge, - Event::FileReadyType::Read)) { + inotify_event_(dispatcher.createFileEvent( + inotify_fd_, + [this](uint32_t events) -> void { + ASSERT(events == Event::FileReadyType::Read); + onInotifyEvent(); + }, + Event::FileTriggerType::Edge, Event::FileReadyType::Read)) { RELEASE_ASSERT(inotify_fd_ >= 0, ""); } @@ -40,7 +40,8 @@ void WatcherImpl::addWatch(const std::string& path, uint32_t events, OnChangedCb std::string directory = last_slash != 0 ? path.substr(0, last_slash) : "/"; std::string file = StringUtil::subspan(path, last_slash + 1, path.size()); - int watch_fd = inotify_add_watch(inotify_fd_, directory.c_str(), IN_ALL_EVENTS); + const uint32_t watch_mask = IN_MODIFY | IN_MOVED_TO; + int watch_fd = inotify_add_watch(inotify_fd_, directory.c_str(), watch_mask); if (watch_fd == -1) { throw EnvoyException( fmt::format("unable to add filesystem watch for file {}: {}", path, strerror(errno))); @@ -74,6 +75,9 @@ void WatcherImpl::onInotifyEvent() { file); uint32_t events = 0; + if (file_event->mask & IN_MODIFY) { + events |= Events::Modified; + } if (file_event->mask & IN_MOVED_TO) { events |= Events::MovedTo; } diff --git a/source/common/filesystem/inotify/watcher_impl.h b/source/common/filesystem/inotify/watcher_impl.h index 1361e82cb943d..2885c55b578e1 100644 --- a/source/common/filesystem/inotify/watcher_impl.h +++ b/source/common/filesystem/inotify/watcher_impl.h @@ -16,7 +16,7 @@ namespace Filesystem { /** * Implementation of Watcher that uses inotify. inotify is an awful API. In order to make this work * in a somewhat sane way we always watch the directory that owns the thing being watched, and then - * filter for events that are relevant to the the thing being watched. + * filter for events that are relevant to the thing being watched. */ class WatcherImpl : public Watcher, Logger::Loggable { public: diff --git a/source/common/filesystem/kqueue/watcher_impl.cc b/source/common/filesystem/kqueue/watcher_impl.cc index f0e4ced2c2bfb..4b2e8c102b609 100644 --- a/source/common/filesystem/kqueue/watcher_impl.cc +++ b/source/common/filesystem/kqueue/watcher_impl.cc @@ -17,15 +17,14 @@ namespace Envoy { namespace Filesystem { WatcherImpl::WatcherImpl(Event::Dispatcher& dispatcher) - : queue_(kqueue()), - kqueue_event_(dispatcher.createFileEvent(queue_, - [this](uint32_t events) -> void { - if (events & Event::FileReadyType::Read) { - onKqueueEvent(); - } - }, - Event::FileTriggerType::Edge, - Event::FileReadyType::Read)) {} + : queue_(kqueue()), kqueue_event_(dispatcher.createFileEvent( + queue_, + [this](uint32_t events) -> void { + if (events & Event::FileReadyType::Read) { + onKqueueEvent(); + } + }, + Event::FileTriggerType::Edge, Event::FileReadyType::Read)) {} WatcherImpl::~WatcherImpl() { close(queue_); @@ -69,7 +68,7 @@ WatcherImpl::FileWatchPtr WatcherImpl::addWatch(const std::string& path, uint32_ watch->callback_ = cb; watch->watching_dir_ = watching_dir; - int flags = NOTE_DELETE | NOTE_RENAME; + u_int flags = NOTE_DELETE | NOTE_RENAME | NOTE_WRITE; if (watching_dir) { flags = NOTE_DELETE | NOTE_WRITE; } @@ -150,6 +149,9 @@ void WatcherImpl::onKqueueEvent() { if (event.fflags & NOTE_RENAME) { events |= Events::MovedTo; } + if (event.fflags & NOTE_WRITE) { + events |= Events::Modified; + } } ENVOY_LOG(debug, "notification: fd: {} flags: {:x} file: {}", file->fd_, event.fflags, diff --git a/source/common/filesystem/posix/filesystem_impl.cc b/source/common/filesystem/posix/filesystem_impl.cc new file mode 100644 index 0000000000000..214d97aed3240 --- /dev/null +++ b/source/common/filesystem/posix/filesystem_impl.cc @@ -0,0 +1,122 @@ +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "envoy/common/exception.h" + +#include "common/common/assert.h" +#include "common/common/fmt.h" +#include "common/common/logger.h" +#include "common/filesystem/filesystem_impl.h" + +#include "absl/strings/match.h" + +namespace Envoy { +namespace Filesystem { + +FileImplPosix::~FileImplPosix() { + if (isOpen()) { + const Api::IoCallBoolResult result = close(); + ASSERT(result.rc_); + } +} + +void FileImplPosix::openFile() { + const int flags = O_RDWR | O_APPEND | O_CREAT; + const int mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH; + + fd_ = ::open(path_.c_str(), flags, mode); +} + +ssize_t FileImplPosix::writeFile(absl::string_view buffer) { + return ::write(fd_, buffer.data(), buffer.size()); +} + +bool FileImplPosix::closeFile() { return ::close(fd_) != -1; } + +FilePtr InstanceImplPosix::createFile(const std::string& path) { + return std::make_unique(path); +} + +bool InstanceImplPosix::fileExists(const std::string& path) { + std::ifstream input_file(path); + return input_file.is_open(); +} + +bool InstanceImplPosix::directoryExists(const std::string& path) { + DIR* const dir = ::opendir(path.c_str()); + const bool dir_exists = nullptr != dir; + if (dir_exists) { + ::closedir(dir); + } + + return dir_exists; +} + +ssize_t InstanceImplPosix::fileSize(const std::string& path) { + struct stat info; + if (::stat(path.c_str(), &info) != 0) { + return -1; + } + return info.st_size; +} + +std::string InstanceImplPosix::fileReadToEnd(const std::string& path) { + if (illegalPath(path)) { + throw EnvoyException(fmt::format("Invalid path: {}", path)); + } + + std::ios::sync_with_stdio(false); + + std::ifstream file(path); + if (file.fail()) { + throw EnvoyException(fmt::format("unable to read file: {}", path)); + } + + std::stringstream file_string; + file_string << file.rdbuf(); + + return file_string.str(); +} + +bool InstanceImplPosix::illegalPath(const std::string& path) { + const Api::SysCallStringResult canonical_path = canonicalPath(path); + if (canonical_path.rc_.empty()) { + ENVOY_LOG_MISC(debug, "Unable to determine canonical path for {}: {}", path, + ::strerror(canonical_path.errno_)); + return true; + } + + // Platform specific path sanity; we provide a convenience to avoid Envoy + // instances poking in bad places. We may have to consider conditioning on + // platform in the future, growing these or relaxing some constraints (e.g. + // there are valid reasons to go via /proc for file paths). + // TODO(htuch): Optimize this as a hash lookup if we grow any further. + if (absl::StartsWith(canonical_path.rc_, "/dev") || + absl::StartsWith(canonical_path.rc_, "/sys") || + absl::StartsWith(canonical_path.rc_, "/proc")) { + return true; + } + return false; +} + +Api::SysCallStringResult InstanceImplPosix::canonicalPath(const std::string& path) { + // TODO(htuch): When we are using C++17, switch to std::filesystem::canonical. + char* resolved_path = ::realpath(path.c_str(), nullptr); + if (resolved_path == nullptr) { + return {std::string(), errno}; + } + std::string resolved_path_string{resolved_path}; + ::free(resolved_path); + return {resolved_path_string, 0}; +} + +} // namespace Filesystem +} // namespace Envoy diff --git a/source/common/filesystem/posix/filesystem_impl.h b/source/common/filesystem/posix/filesystem_impl.h new file mode 100644 index 0000000000000..8c6279e664999 --- /dev/null +++ b/source/common/filesystem/posix/filesystem_impl.h @@ -0,0 +1,44 @@ +#pragma once + +#include +#include + +#include "envoy/api/os_sys_calls.h" + +#include "common/filesystem/file_shared_impl.h" + +namespace Envoy { +namespace Filesystem { + +class FileImplPosix : public FileSharedImpl { +public: + FileImplPosix(const std::string& path) : FileSharedImpl(path) {} + ~FileImplPosix(); + +protected: + // Filesystem::FileSharedImpl + void openFile() override; + ssize_t writeFile(absl::string_view buffer) override; + bool closeFile() override; + +private: + friend class FileSystemImplTest; +}; + +class InstanceImplPosix : public Instance { +public: + // Filesystem::Instance + FilePtr createFile(const std::string& path) override; + bool fileExists(const std::string& path) override; + bool directoryExists(const std::string& path) override; + ssize_t fileSize(const std::string& path) override; + std::string fileReadToEnd(const std::string& path) override; + bool illegalPath(const std::string& path) override; + +private: + Api::SysCallStringResult canonicalPath(const std::string& path); + friend class FileSystemImplTest; +}; + +} // namespace Filesystem +} // namespace Envoy diff --git a/source/common/filesystem/win32/filesystem_impl.cc b/source/common/filesystem/win32/filesystem_impl.cc new file mode 100644 index 0000000000000..41a15235ef3cd --- /dev/null +++ b/source/common/filesystem/win32/filesystem_impl.cc @@ -0,0 +1,100 @@ +#include +#include +#include +#include + +// uses macros to #define a ton of symbols, two of which (DELETE and GetMessage) +// interfere with our code. DELETE shows up in the base.pb.h header generated from +// api/envoy/api/core/base.proto. Since it's a generated header, we can't #undef DELETE at +// the top of that header to avoid the collision. Similarly, GetMessage shows up in generated +// protobuf code so we can't #undef the symbol there. +#undef DELETE +#undef GetMessage + +#include "common/common/assert.h" +#include "common/filesystem/filesystem_impl.h" + +#include +#include +#include +#include + +#include "envoy/common/exception.h" + +#include "common/common/fmt.h" + +namespace Envoy { +namespace Filesystem { + +FileImplWin32::~FileImplWin32() { + if (isOpen()) { + const Api::IoCallBoolResult result = close(); + ASSERT(result.rc_); + } +} + +void FileImplWin32::openFile() { + const int flags = _O_RDWR | _O_APPEND | _O_CREAT; + const int mode = _S_IREAD | _S_IWRITE; + + fd_ = ::_open(path_.c_str(), flags, mode); +} + +ssize_t FileImplWin32::writeFile(absl::string_view buffer) { + return ::_write(fd_, buffer.data(), buffer.size()); +} + +bool FileImplWin32::closeFile() { return ::_close(fd_) != -1; } + +FilePtr InstanceImplWin32::createFile(const std::string& path) { + return std::make_unique(path); +} + +bool InstanceImplWin32::fileExists(const std::string& path) { + const DWORD attributes = ::GetFileAttributes(path.c_str()); + return attributes != INVALID_FILE_ATTRIBUTES; +} + +bool InstanceImplWin32::directoryExists(const std::string& path) { + const DWORD attributes = ::GetFileAttributes(path.c_str()); + if (attributes == INVALID_FILE_ATTRIBUTES) { + return false; + } + return attributes & FILE_ATTRIBUTE_DIRECTORY; +} + +ssize_t InstanceImplWin32::fileSize(const std::string& path) { + struct _stat info; + if (::_stat(path.c_str(), &info) != 0) { + return -1; + } + return info.st_size; +} + +std::string InstanceImplWin32::fileReadToEnd(const std::string& path) { + if (illegalPath(path)) { + throw EnvoyException(fmt::format("Invalid path: {}", path)); + } + + std::ios::sync_with_stdio(false); + + // On Windows, we need to explicitly set the file mode as binary. Otherwise, + // 0x1a will be treated as EOF + std::ifstream file(path, std::ios_base::binary); + if (file.fail()) { + throw EnvoyException(fmt::format("unable to read file: {}", path)); + } + + std::stringstream file_string; + file_string << file.rdbuf(); + + return file_string.str(); +} + +bool InstanceImplWin32::illegalPath(const std::string& path) { + // Currently, we don't know of any obviously illegal paths on Windows + return false; +} + +} // namespace Filesystem +} // namespace Envoy diff --git a/source/common/filesystem/win32/filesystem_impl.h b/source/common/filesystem/win32/filesystem_impl.h new file mode 100644 index 0000000000000..7c7add205c87d --- /dev/null +++ b/source/common/filesystem/win32/filesystem_impl.h @@ -0,0 +1,38 @@ +#pragma once + +#include +#include + +#include "common/filesystem/file_shared_impl.h" + +namespace Envoy { +namespace Filesystem { + +class FileImplWin32 : public FileSharedImpl { +public: + FileImplWin32(const std::string& path) : FileSharedImpl(path) {} + ~FileImplWin32(); + +protected: + // Filesystem::FileSharedImpl + void openFile() override; + ssize_t writeFile(absl::string_view buffer) override; + bool closeFile() override; + +private: + friend class FileSystemImplTest; +}; + +class InstanceImplWin32 : public Instance { +public: + // Filesystem::Instance + FilePtr createFile(const std::string& path) override; + bool fileExists(const std::string& path) override; + bool directoryExists(const std::string& path) override; + ssize_t fileSize(const std::string& path) override; + std::string fileReadToEnd(const std::string& path) override; + bool illegalPath(const std::string& path) override; +}; + +} // namespace Filesystem +} // namespace Envoy diff --git a/source/common/grpc/BUILD b/source/common/grpc/BUILD index bfe2e369f6234..b7248ecf4f04a 100644 --- a/source/common/grpc/BUILD +++ b/source/common/grpc/BUILD @@ -89,6 +89,25 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "google_grpc_utils_lib", + srcs = ["google_grpc_utils.cc"], + hdrs = ["google_grpc_utils.h"], + external_deps = [ + "abseil_optional", + "grpc", + ], + deps = [ + "//source/common/buffer:buffer_lib", + "//source/common/common:assert_lib", + "//source/common/common:empty_string", + "//source/common/common:enum_to_int", + "//source/common/common:macros", + "//source/common/common:utility_lib", + "//source/common/grpc:status_lib", + ], +) + envoy_cc_library( name = "google_async_client_lib", srcs = ["google_async_client_impl.cc"], diff --git a/source/common/grpc/async_client.cc b/source/common/grpc/async_client.cc index 739ab5e6747f4..5931c3d448b46 100644 --- a/source/common/grpc/async_client.cc +++ b/source/common/grpc/async_client.cc @@ -9,7 +9,7 @@ namespace Envoy { namespace Grpc { void AsyncStream::sendMessage(const Protobuf::Message& request, bool end_stream) { - sendRawMessage(Common::serializeBody(request), end_stream); + sendRawMessage(Common::serializeToGrpcFrame(request), end_stream); } AsyncRequest* AsyncClient::send(const Protobuf::MethodDescriptor& service_method, @@ -17,7 +17,7 @@ AsyncRequest* AsyncClient::send(const Protobuf::MethodDescriptor& service_method Tracing::Span& parent_span, const absl::optional& timeout) { return sendRaw(service_method.service()->full_name(), service_method.name(), - Common::serializeBody(request), callbacks, parent_span, timeout); + Common::serializeToGrpcFrame(request), callbacks, parent_span, timeout); } AsyncStream* AsyncClient::start(const Protobuf::MethodDescriptor& service_method, diff --git a/source/common/grpc/async_client_impl.cc b/source/common/grpc/async_client_impl.cc index 80d4230868b1a..8441424025edb 100644 --- a/source/common/grpc/async_client_impl.cc +++ b/source/common/grpc/async_client_impl.cc @@ -27,7 +27,7 @@ AsyncRequest* AsyncClientImpl::send(const Protobuf::MethodDescriptor& service_me AsyncRequestCallbacks& callbacks, Tracing::Span& parent_span, const absl::optional& timeout) { return sendRaw(service_method.service()->full_name(), service_method.name(), - Common::serializeBody(request), callbacks, parent_span, timeout); + Common::serializeToGrpcFrame(request), callbacks, parent_span, timeout); } AsyncRequest* AsyncClientImpl::sendRaw(absl::string_view service_full_name, @@ -190,7 +190,7 @@ void AsyncStreamImpl::onReset() { } void AsyncStreamImpl::sendMessage(const Protobuf::Message& request, bool end_stream) { - sendRawMessage(Common::serializeBody(request), end_stream); + sendRawMessage(Common::serializeToGrpcFrame(request), end_stream); } void AsyncStreamImpl::sendRawMessage(Buffer::InstancePtr request, bool end_stream) { @@ -228,8 +228,8 @@ AsyncRequestImpl::AsyncRequestImpl(AsyncClientImpl& parent, absl::string_view se current_span_ = parent_span.spawnChild(Tracing::EgressConfig::get(), "async " + parent.remote_cluster_name_ + " egress", parent.time_source_.systemTime()); - current_span_->setTag(Tracing::Tags::get().UPSTREAM_CLUSTER, parent.remote_cluster_name_); - current_span_->setTag(Tracing::Tags::get().COMPONENT, Tracing::Tags::get().PROXY); + current_span_->setTag(Tracing::Tags::get().UpstreamCluster, parent.remote_cluster_name_); + current_span_->setTag(Tracing::Tags::get().Component, Tracing::Tags::get().Proxy); } AsyncRequestImpl::AsyncRequestImpl(AsyncClientImpl& parent, @@ -238,7 +238,7 @@ AsyncRequestImpl::AsyncRequestImpl(AsyncClientImpl& parent, AsyncRequestCallbacks& callbacks, Tracing::Span& parent_span, const absl::optional& timeout) : AsyncRequestImpl(parent, service_method.service()->full_name(), service_method.name(), - Common::serializeBody(request), callbacks, parent_span, timeout) {} + Common::serializeToGrpcFrame(request), callbacks, parent_span, timeout) {} void AsyncRequestImpl::initialize(bool buffer_body_for_retry) { AsyncStreamImpl::initialize(buffer_body_for_retry); @@ -249,7 +249,7 @@ void AsyncRequestImpl::initialize(bool buffer_body_for_retry) { } void AsyncRequestImpl::cancel() { - current_span_->setTag(Tracing::Tags::get().STATUS, Tracing::Tags::get().CANCELED); + current_span_->setTag(Tracing::Tags::get().Status, Tracing::Tags::get().Canceled); current_span_->finishSpan(); this->resetStream(); } @@ -273,13 +273,13 @@ bool AsyncRequestImpl::onReceiveRawMessage(Buffer::InstancePtr response) { void AsyncRequestImpl::onReceiveTrailingMetadata(Http::HeaderMapPtr&&) {} void AsyncRequestImpl::onRemoteClose(Grpc::Status::GrpcStatus status, const std::string& message) { - current_span_->setTag(Tracing::Tags::get().GRPC_STATUS_CODE, std::to_string(status)); + current_span_->setTag(Tracing::Tags::get().GrpcStatusCode, std::to_string(status)); if (status != Grpc::Status::GrpcStatus::Ok) { - current_span_->setTag(Tracing::Tags::get().ERROR, Tracing::Tags::get().TRUE); + current_span_->setTag(Tracing::Tags::get().Error, Tracing::Tags::get().True); callbacks_.onFailure(status, message, *current_span_); } else if (response_ == nullptr) { - current_span_->setTag(Tracing::Tags::get().ERROR, Tracing::Tags::get().TRUE); + current_span_->setTag(Tracing::Tags::get().Error, Tracing::Tags::get().True); callbacks_.onFailure(Status::Internal, EMPTY_STRING, *current_span_); } else { callbacks_.onSuccessRaw(std::move(response_), *current_span_); diff --git a/source/common/grpc/common.cc b/source/common/grpc/common.cc index 9d1f20ee3362c..90b78348ee257 100644 --- a/source/common/grpc/common.cc +++ b/source/common/grpc/common.cc @@ -33,7 +33,8 @@ bool Common::hasGrpcContentType(const Http::HeaderMap& headers) { absl::StartsWith(content_type->value().getStringView(), Http::Headers::get().ContentTypeValues.Grpc) && (content_type->value().size() == Http::Headers::get().ContentTypeValues.Grpc.size() || - content_type->value().c_str()[Http::Headers::get().ContentTypeValues.Grpc.size()] == '+'); + content_type->value() + .getStringView()[Http::Headers::get().ContentTypeValues.Grpc.size()] == '+'); } bool Common::isGrpcResponseHeader(const Http::HeaderMap& headers, bool end_stream) { @@ -55,11 +56,11 @@ void Common::chargeStat(const Upstream::ClusterInfo& cluster, const std::string& } cluster.statsScope() .counter(fmt::format("{}.{}.{}.{}", protocol, grpc_service, grpc_method, - grpc_status->value().c_str())) + grpc_status->value().getStringView())) .inc(); uint64_t grpc_status_code; - const bool success = - StringUtil::atoul(grpc_status->value().c_str(), grpc_status_code) && grpc_status_code == 0; + const bool success = absl::SimpleAtoi(grpc_status->value().getStringView(), &grpc_status_code) && + grpc_status_code == 0; chargeStat(cluster, protocol, grpc_service, grpc_method, success); } @@ -87,7 +88,7 @@ absl::optional Common::getGrpcStatus(const Http::HeaderMap& if (!grpc_status_header || grpc_status_header->value().empty()) { return absl::optional(); } - if (!StringUtil::atoul(grpc_status_header->value().c_str(), grpc_status_code) || + if (!absl::SimpleAtoi(grpc_status_header->value().getStringView(), &grpc_status_code) || grpc_status_code > Status::GrpcStatus::MaximumValid) { return absl::optional(Status::GrpcStatus::InvalidCode); } @@ -96,15 +97,15 @@ absl::optional Common::getGrpcStatus(const Http::HeaderMap& std::string Common::getGrpcMessage(const Http::HeaderMap& trailers) { const auto entry = trailers.GrpcMessage(); - return entry ? entry->value().c_str() : EMPTY_STRING; + return entry ? std::string(entry->value().getStringView()) : EMPTY_STRING; } bool Common::resolveServiceAndMethod(const Http::HeaderEntry* path, std::string* service, std::string* method) { - if (path == nullptr || path->value().c_str() == nullptr) { + if (path == nullptr) { return false; } - const auto parts = StringUtil::splitToken(path->value().c_str(), "/"); + const auto parts = StringUtil::splitToken(path->value().getStringView(), "/"); if (parts.size() != 2) { return false; } @@ -113,9 +114,11 @@ bool Common::resolveServiceAndMethod(const Http::HeaderEntry* path, std::string* return true; } -Buffer::InstancePtr Common::serializeBody(const Protobuf::Message& message) { +Buffer::InstancePtr Common::serializeToGrpcFrame(const Protobuf::Message& message) { // http://www.grpc.io/docs/guides/wire.html // Reserve enough space for the entire message and the 5 byte header. + // NB: we do not use prependGrpcFrameHeader because that would add another BufferFragment and this + // (using a single BufferFragment) is more efficient. Buffer::InstancePtr body(new Buffer::OwnedImpl()); const uint32_t size = message.ByteSize(); const uint32_t alloc_size = size + 5; @@ -136,7 +139,7 @@ Buffer::InstancePtr Common::serializeBody(const Protobuf::Message& message) { } Buffer::InstancePtr Common::serializeMessage(const Protobuf::Message& message) { - Buffer::InstancePtr body(new Buffer::OwnedImpl()); + auto body = std::make_unique(); const uint32_t size = message.ByteSize(); Buffer::RawSlice iovec; body->reserve(size, &iovec, 1); @@ -155,8 +158,9 @@ std::chrono::milliseconds Common::getGrpcTimeout(Http::HeaderMap& request_header Http::HeaderEntry* header_grpc_timeout_entry = request_headers.GrpcTimeout(); if (header_grpc_timeout_entry) { uint64_t grpc_timeout; - const char* unit = - StringUtil::strtoul(header_grpc_timeout_entry->value().c_str(), grpc_timeout); + // TODO(dnoe): Migrate to pure string_view (#6580) + std::string grpc_timeout_string(header_grpc_timeout_entry->value().getStringView()); + const char* unit = StringUtil::strtoull(grpc_timeout_string.c_str(), grpc_timeout); if (unit != nullptr && *unit != '\0') { switch (*unit) { case 'H': @@ -248,9 +252,7 @@ void Common::checkForHeaderOnlyError(Http::Message& http_response) { throw Exception(absl::optional(), "bad grpc-status header"); } - const Http::HeaderEntry* grpc_status_message = http_response.headers().GrpcMessage(); - throw Exception(grpc_status_code.value(), - grpc_status_message ? grpc_status_message->value().c_str() : EMPTY_STRING); + throw Exception(grpc_status_code.value(), Common::getGrpcMessage(http_response.headers())); } void Common::validateResponse(Http::Message& http_response) { @@ -272,9 +274,7 @@ void Common::validateResponse(Http::Message& http_response) { } if (grpc_status_code.value() != 0) { - const Http::HeaderEntry* grpc_status_message = http_response.trailers()->GrpcMessage(); - throw Exception(grpc_status_code.value(), - grpc_status_message ? grpc_status_message->value().c_str() : EMPTY_STRING); + throw Exception(grpc_status_code.value(), Common::getGrpcMessage(*http_response.trailers())); } } @@ -371,5 +371,13 @@ Buffer::InstancePtr Common::makeBufferInstance(const grpc::ByteBuffer& byteBuffe return buffer; } +void Common::prependGrpcFrameHeader(Buffer::Instance& buffer) { + std::array header; + header[0] = 0; // flags + const uint32_t nsize = htonl(buffer.length()); + std::memcpy(&header[1], reinterpret_cast(&nsize), sizeof(uint32_t)); + buffer.prepend(absl::string_view(&header[0], 5)); +} + } // namespace Grpc } // namespace Envoy diff --git a/source/common/grpc/common.h b/source/common/grpc/common.h index 7e05d63399fa3..592916f8e9e8d 100644 --- a/source/common/grpc/common.h +++ b/source/common/grpc/common.h @@ -120,9 +120,9 @@ class Common { std::string* method); /** - * Serialize protobuf message. With grpc header. + * Serialize protobuf message with gRPC frame header. */ - static Buffer::InstancePtr serializeBody(const Protobuf::Message& message); + static Buffer::InstancePtr serializeToGrpcFrame(const Protobuf::Message& message); /** * Serialize protobuf message. Without grpc header. @@ -169,6 +169,12 @@ class Common { */ static Buffer::InstancePtr makeBufferInstance(const grpc::ByteBuffer& byteBuffer); + /** + * Prepend a gRPC frame header to a Buffer::Instance containing a single gRPC frame. + * @param buffer containing the frame data which will be modified. + */ + static void prependGrpcFrameHeader(Buffer::Instance& buffer); + private: static void checkForHeaderOnlyError(Http::Message& http_response); }; diff --git a/source/common/grpc/google_async_client_impl.cc b/source/common/grpc/google_async_client_impl.cc index 8c6526fcddba9..a24db6098e40b 100644 --- a/source/common/grpc/google_async_client_impl.cc +++ b/source/common/grpc/google_async_client_impl.cc @@ -178,7 +178,8 @@ void GoogleAsyncStreamImpl::initialize(bool /*buffer_body_for_retry*/) { initial_metadata.iterate( [](const Http::HeaderEntry& header, void* ctxt) { auto* client_context = static_cast(ctxt); - client_context->AddMetadata(header.key().c_str(), header.value().c_str()); + client_context->AddMetadata(std::string(header.key().getStringView()), + std::string(header.value().getStringView())); return Http::HeaderMap::Iterate::Continue; }, &ctxt_); @@ -409,8 +410,8 @@ GoogleAsyncRequestImpl::GoogleAsyncRequestImpl( current_span_ = parent_span.spawnChild(Tracing::EgressConfig::get(), "async " + parent.stat_prefix_ + " egress", parent.timeSource().systemTime()); - current_span_->setTag(Tracing::Tags::get().UPSTREAM_CLUSTER, parent.stat_prefix_); - current_span_->setTag(Tracing::Tags::get().COMPONENT, Tracing::Tags::get().PROXY); + current_span_->setTag(Tracing::Tags::get().UpstreamCluster, parent.stat_prefix_); + current_span_->setTag(Tracing::Tags::get().Component, Tracing::Tags::get().Proxy); } GoogleAsyncRequestImpl::GoogleAsyncRequestImpl( @@ -430,7 +431,7 @@ void GoogleAsyncRequestImpl::initialize(bool buffer_body_for_retry) { } void GoogleAsyncRequestImpl::cancel() { - current_span_->setTag(Tracing::Tags::get().STATUS, Tracing::Tags::get().CANCELED); + current_span_->setTag(Tracing::Tags::get().Status, Tracing::Tags::get().Canceled); current_span_->finishSpan(); this->resetStream(); } @@ -455,13 +456,13 @@ ProtobufTypes::MessagePtr GoogleAsyncRequestImpl::createEmptyResponse() { void GoogleAsyncRequestImpl::onRemoteClose(Grpc::Status::GrpcStatus status, const std::string& message) { - current_span_->setTag(Tracing::Tags::get().GRPC_STATUS_CODE, std::to_string(status)); + current_span_->setTag(Tracing::Tags::get().GrpcStatusCode, std::to_string(status)); if (status != Grpc::Status::GrpcStatus::Ok) { - current_span_->setTag(Tracing::Tags::get().ERROR, Tracing::Tags::get().TRUE); + current_span_->setTag(Tracing::Tags::get().Error, Tracing::Tags::get().True); callbacks_.onFailure(status, message, *current_span_); } else if (response_ == nullptr) { - current_span_->setTag(Tracing::Tags::get().ERROR, Tracing::Tags::get().TRUE); + current_span_->setTag(Tracing::Tags::get().Error, Tracing::Tags::get().True); callbacks_.onFailure(Status::Internal, EMPTY_STRING, *current_span_); } else { callbacks_.onSuccessRaw(std::move(response_), *current_span_); diff --git a/source/common/grpc/google_async_client_impl.h b/source/common/grpc/google_async_client_impl.h index e6621f2cfad29..3de8919238f55 100644 --- a/source/common/grpc/google_async_client_impl.h +++ b/source/common/grpc/google_async_client_impl.h @@ -171,7 +171,7 @@ class GoogleAsyncClientImpl final : public AsyncClient, Logger::Loggable diff --git a/source/common/grpc/google_grpc_creds_impl.cc b/source/common/grpc/google_grpc_creds_impl.cc index 30f6ef3a04090..d73ad3cb59997 100644 --- a/source/common/grpc/google_grpc_creds_impl.cc +++ b/source/common/grpc/google_grpc_creds_impl.cc @@ -25,6 +25,9 @@ std::shared_ptr CredsUtility::getChannelCredentials( case envoy::api::v2::core::GrpcService::GoogleGrpc::ChannelCredentials::kLocalCredentials: { return grpc::experimental::LocalCredentials(UDS); } + case envoy::api::v2::core::GrpcService::GoogleGrpc::ChannelCredentials::kGoogleDefault: { + return grpc::GoogleDefaultCredentials(); + } default: return nullptr; } diff --git a/source/common/grpc/google_grpc_utils.cc b/source/common/grpc/google_grpc_utils.cc new file mode 100644 index 0000000000000..82325780a337c --- /dev/null +++ b/source/common/grpc/google_grpc_utils.cc @@ -0,0 +1,108 @@ +#include "common/grpc/google_grpc_utils.h" + +#include +#include +#include +#include + +#include "common/buffer/buffer_impl.h" +#include "common/common/assert.h" +#include "common/common/empty_string.h" +#include "common/common/enum_to_int.h" +#include "common/common/fmt.h" +#include "common/common/macros.h" +#include "common/common/stack_array.h" +#include "common/common/utility.h" + +#include "absl/strings/match.h" + +namespace Envoy { +namespace Grpc { + +struct BufferInstanceContainer { + BufferInstanceContainer(int ref_count, Buffer::InstancePtr&& buffer) + : ref_count_(ref_count), buffer_(std::move(buffer)) {} + std::atomic ref_count_; // In case gPRC dereferences in a different threads. + Buffer::InstancePtr buffer_; + + static void derefBufferInstanceContainer(void* container_ptr) { + auto container = static_cast(container_ptr); + container->ref_count_--; + // This is safe because the ref_count_ is never incremented. + if (container->ref_count_ <= 0) { + delete container; + } + } +}; + +grpc::ByteBuffer GoogleGrpcUtils::makeByteBuffer(Buffer::InstancePtr&& buffer_instance) { + if (!buffer_instance) { + return {}; + } + Buffer::RawSlice on_raw_slice; + // NB: we need to pass in >= 1 in order to get the real "n" (see Buffer::Instance for details). + const int n_slices = buffer_instance->getRawSlices(&on_raw_slice, 1); + if (n_slices <= 0) { + return {}; + } + auto* container = new BufferInstanceContainer{n_slices, std::move(buffer_instance)}; + if (n_slices == 1) { + grpc::Slice one_slice(on_raw_slice.mem_, on_raw_slice.len_, + &BufferInstanceContainer::derefBufferInstanceContainer, container); + return {&one_slice, 1}; + } + STACK_ARRAY(many_raw_slices, Buffer::RawSlice, n_slices); + container->buffer_->getRawSlices(many_raw_slices.begin(), n_slices); + std::vector slices; + slices.reserve(n_slices); + for (int i = 0; i < n_slices; i++) { + slices.emplace_back(many_raw_slices[i].mem_, many_raw_slices[i].len_, + &BufferInstanceContainer::derefBufferInstanceContainer, container); + } + return {&slices[0], slices.size()}; +} + +struct ByteBufferContainer { + ByteBufferContainer(int ref_count) : ref_count_(ref_count) {} + ~ByteBufferContainer() { ::free(fragments_); } + uint32_t ref_count_; + Buffer::BufferFragmentImpl* fragments_ = nullptr; + std::vector slices_; +}; + +Buffer::InstancePtr GoogleGrpcUtils::makeBufferInstance(const grpc::ByteBuffer& byte_buffer) { + auto buffer = std::make_unique(); + if (byte_buffer.Length() == 0) { + return buffer; + } + // NB: ByteBuffer::Dump moves the data out of the ByteBuffer so we need to ensure that the + // lifetime of the Slice(s) exceeds our Buffer::Instance. + std::vector slices; + byte_buffer.Dump(&slices); + auto* container = new ByteBufferContainer(static_cast(slices.size())); + std::function releaser = + [container](const void*, size_t, const Buffer::BufferFragmentImpl*) { + container->ref_count_--; + if (container->ref_count_ <= 0) { + delete container; + } + }; + // NB: addBufferFragment takes a pointer alias to the BufferFragmentImpl which is passed in so we + // need to ensure that the lifetime of those objects exceeds that of the Buffer::Instance. + RELEASE_ASSERT(!::posix_memalign(reinterpret_cast(&container->fragments_), + alignof(Buffer::BufferFragmentImpl), + sizeof(Buffer::BufferFragmentImpl) * slices.size()), + "posix_memalign failure"); + for (size_t i = 0; i < slices.size(); i++) { + new (&container->fragments_[i]) + Buffer::BufferFragmentImpl(slices[i].begin(), slices[i].size(), releaser); + } + for (size_t i = 0; i < slices.size(); i++) { + buffer->addBufferFragment(container->fragments_[i]); + } + container->slices_ = std::move(slices); + return buffer; +} + +} // namespace Grpc +} // namespace Envoy diff --git a/source/common/grpc/google_grpc_utils.h b/source/common/grpc/google_grpc_utils.h new file mode 100644 index 0000000000000..86975ea4f6bd3 --- /dev/null +++ b/source/common/grpc/google_grpc_utils.h @@ -0,0 +1,33 @@ +#pragma once + +#include +#include + +#include "envoy/buffer/buffer.h" + +#include "grpcpp/grpcpp.h" + +namespace Envoy { +namespace Grpc { + +class GoogleGrpcUtils { +public: + /** + * Build grpc::ByteBuffer which aliases the data in a Buffer::InstancePtr. + * @param buffer source data container. + * @return byteBuffer target container aliased to the data in Buffer::Instance and owning the + * Buffer::Instance. + */ + static grpc::ByteBuffer makeByteBuffer(Buffer::InstancePtr&& buffer); + + /** + * Build Buffer::Instance which aliases the data in a grpc::ByteBuffer. + * @param buffer source data container. + * @return a Buffer::InstancePtr aliased to the data in the provided grpc::ByteBuffer and + * owning the corresponding grpc::Slice(s). + */ + static Buffer::InstancePtr makeBufferInstance(const grpc::ByteBuffer& buffer); +}; + +} // namespace Grpc +} // namespace Envoy diff --git a/source/common/http/BUILD b/source/common/http/BUILD index 903f861828657..503a1d4c4e0fc 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -89,6 +89,7 @@ envoy_cc_library( "//include/envoy/stats:stats_interface", "//source/common/common:enum_to_int", "//source/common/common:utility_lib", + "//source/common/stats:symbol_table_lib", "@envoy_api//envoy/type:http_status_cc", ], ) @@ -141,6 +142,7 @@ envoy_cc_library( ":exception_lib", ":header_map_lib", ":headers_lib", + ":path_utility_lib", ":user_agent_lib", ":utility_lib", "//include/envoy/access_log:access_log_interface", @@ -314,3 +316,15 @@ envoy_cc_library( "@envoy_api//envoy/type:range_cc", ], ) + +envoy_cc_library( + name = "path_utility_lib", + srcs = ["path_utility.cc"], + hdrs = ["path_utility.h"], + external_deps = ["abseil_optional"], + deps = [ + "//include/envoy/http:header_map_interface", + "//source/common/chromium_url", + "//source/common/common:logger_lib", + ], +) diff --git a/source/common/http/async_client_impl.cc b/source/common/http/async_client_impl.cc index 676001d5822cb..0ff58f7e69d62 100644 --- a/source/common/http/async_client_impl.cc +++ b/source/common/http/async_client_impl.cc @@ -17,6 +17,7 @@ const std::list AsyncStreamImpl::NullCorsPolicy::allow_origin_regex_ const absl::optional AsyncStreamImpl::NullCorsPolicy::allow_credentials_; const std::vector> AsyncStreamImpl::NullRateLimitPolicy::rate_limit_policy_entry_; +const AsyncStreamImpl::NullHedgePolicy AsyncStreamImpl::RouteEntryImpl::hedge_policy_; const AsyncStreamImpl::NullRateLimitPolicy AsyncStreamImpl::RouteEntryImpl::rate_limit_policy_; const AsyncStreamImpl::NullRetryPolicy AsyncStreamImpl::RouteEntryImpl::retry_policy_; const AsyncStreamImpl::NullShadowPolicy AsyncStreamImpl::RouteEntryImpl::shadow_policy_; @@ -40,7 +41,7 @@ AsyncClientImpl::AsyncClientImpl(Upstream::ClusterInfoConstSharedPtr cluster, Http::Context& http_context) : cluster_(cluster), config_("http.async-client.", local_info, stats_store, cm, runtime, random, - std::move(shadow_writer), true, false, false, dispatcher.timeSystem(), http_context), + std::move(shadow_writer), true, false, false, dispatcher.timeSource(), http_context), dispatcher_(dispatcher) {} AsyncClientImpl::~AsyncClientImpl() { @@ -76,7 +77,7 @@ AsyncClient::Stream* AsyncClientImpl::start(AsyncClient::StreamCallbacks& callba AsyncStreamImpl::AsyncStreamImpl(AsyncClientImpl& parent, AsyncClient::StreamCallbacks& callbacks, const AsyncClient::StreamOptions& options) : parent_(parent), stream_callbacks_(callbacks), stream_id_(parent.config_.random_.random()), - router_(parent.config_), stream_info_(Protocol::Http11, parent.dispatcher().timeSystem()), + router_(parent.config_), stream_info_(Protocol::Http11, parent.dispatcher().timeSource()), tracing_config_(Tracing::EgressConfig::get()), route_(std::make_shared(parent_.cluster_->name(), options.timeout)), send_xff_(options.send_xff) { @@ -112,7 +113,7 @@ void AsyncStreamImpl::encodeTrailers(HeaderMapPtr&& trailers) { } void AsyncStreamImpl::sendHeaders(HeaderMap& headers, bool end_stream) { - if (Http::Headers::get().MethodValues.Head == headers.Method()->value().c_str()) { + if (Http::Headers::get().MethodValues.Head == headers.Method()->value().getStringView()) { is_head_request_ = true; } diff --git a/source/common/http/async_client_impl.h b/source/common/http/async_client_impl.h index af81847471514..030c1bd95f1ac 100644 --- a/source/common/http/async_client_impl.h +++ b/source/common/http/async_client_impl.h @@ -109,6 +109,17 @@ class AsyncStreamImpl : public AsyncClient::Stream, static const absl::optional allow_credentials_; }; + struct NullHedgePolicy : public Router::HedgePolicy { + // Router::HedgePolicy + uint32_t initialRequests() const override { return 1; } + const envoy::type::FractionalPercent& additionalRequestChance() const override { + return additional_request_chance_; + } + bool hedgeOnPerTryTimeout() const override { return false; } + + const envoy::type::FractionalPercent additional_request_chance_; + }; + struct NullRateLimitPolicy : public Router::RateLimitPolicy { // Router::RateLimitPolicy const std::vector>& @@ -137,8 +148,12 @@ class AsyncStreamImpl : public AsyncClient::Stream, const std::vector& retriableStatusCodes() const override { return retriable_status_codes_; } + absl::optional baseInterval() const override { + return absl::nullopt; + } + absl::optional maxInterval() const override { return absl::nullopt; } - const std::vector retriable_status_codes_; + const std::vector retriable_status_codes_{}; }; struct NullShadowPolicy : public Router::ShadowPolicy { @@ -161,6 +176,7 @@ class AsyncStreamImpl : public AsyncClient::Stream, } const std::string& name() const override { return EMPTY_STRING; } + bool usesVhds() const override { return false; } static const std::list internal_only_headers_; }; @@ -200,6 +216,7 @@ class AsyncStreamImpl : public AsyncClient::Stream, bool) const override {} void finalizeResponseHeaders(Http::HeaderMap&, const StreamInfo::StreamInfo&) const override {} const Router::HashPolicy* hashPolicy() const override { return nullptr; } + const Router::HedgePolicy& hedgePolicy() const override { return hedge_policy_; } const Router::MetadataMatchCriteria* metadataMatchCriteria() const override { return nullptr; } Upstream::ResourcePriority priority() const override { return Upstream::ResourcePriority::Default; @@ -218,6 +235,9 @@ class AsyncStreamImpl : public AsyncClient::Stream, absl::optional maxGrpcTimeout() const override { return absl::nullopt; } + absl::optional grpcTimeoutOffset() const override { + return absl::nullopt; + } const Router::VirtualCluster* virtualCluster(const Http::HeaderMap&) const override { return nullptr; } @@ -243,6 +263,7 @@ class AsyncStreamImpl : public AsyncClient::Stream, return Router::InternalRedirectAction::PassThrough; } + static const NullHedgePolicy hedge_policy_; static const NullRateLimitPolicy rate_limit_policy_; static const NullRetryPolicy retry_policy_; static const NullShadowPolicy shadow_policy_; @@ -298,10 +319,18 @@ class AsyncStreamImpl : public AsyncClient::Stream, // filter which uses this function for buffering. ASSERT(buffered_body_ != nullptr); } + void injectDecodedDataToFilterChain(Buffer::Instance&, bool) override { + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } const Buffer::Instance* decodingBuffer() override { return buffered_body_.get(); } + void modifyDecodingBuffer(std::function) override { + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } void sendLocalReply(Code code, absl::string_view body, std::function modify_headers, - const absl::optional grpc_status) override { + const absl::optional grpc_status, + absl::string_view details) override { + stream_info_.setResponseCodeDetails(details); Utility::sendLocalReply( is_grpc_request_, [this, modify_headers](HeaderMapPtr&& headers, bool end_stream) -> void { @@ -327,6 +356,8 @@ class AsyncStreamImpl : public AsyncClient::Stream, void setDecoderBufferLimit(uint32_t) override {} uint32_t decoderBufferLimit() override { return 0; } bool recreateStream() override { return false; } + void addUpstreamSocketOptions(const Network::Socket::OptionsSharedPtr&) override {} + Network::Socket::OptionsSharedPtr getUpstreamSocketOptions() const override { return {}; } AsyncClient::StreamCallbacks& stream_callbacks_; const uint64_t stream_id_; @@ -370,6 +401,9 @@ class AsyncRequestImpl final : public AsyncClient::Request, // internal use of the router filter which uses this function for buffering. } const Buffer::Instance* decodingBuffer() override { return request_->body().get(); } + void modifyDecodingBuffer(std::function) override { + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } MessagePtr request_; AsyncClient::Callbacks& callbacks_; diff --git a/source/common/http/codec_client.cc b/source/common/http/codec_client.cc index 3f8defb71ee6d..134f8cad313ea 100644 --- a/source/common/http/codec_client.cc +++ b/source/common/http/codec_client.cc @@ -115,11 +115,11 @@ void CodecClient::onData(Buffer::Instance& data) { try { codec_->dispatch(data); } catch (CodecProtocolException& e) { - ENVOY_CONN_LOG(info, "protocol error: {}", *connection_, e.what()); + ENVOY_CONN_LOG(debug, "protocol error: {}", *connection_, e.what()); close(); protocol_error = true; } catch (PrematureResponseException& e) { - ENVOY_CONN_LOG(info, "premature response", *connection_); + ENVOY_CONN_LOG(debug, "premature response", *connection_); close(); // Don't count 408 responses where we have no active requests as protocol errors diff --git a/source/common/http/codec_client.h b/source/common/http/codec_client.h index 98956b273aa5a..0f4dff8bd8ab8 100644 --- a/source/common/http/codec_client.h +++ b/source/common/http/codec_client.h @@ -78,6 +78,11 @@ class CodecClient : Logger::Loggable, */ uint64_t id() { return connection_->id(); } + /** + * @return the underlying connection error. + */ + absl::string_view connectionFailureReason() { return connection_->transportFailureReason(); } + /** * @return size_t the number of outstanding requests that have not completed or been reset. */ @@ -179,7 +184,9 @@ class CodecClient : Logger::Loggable, : StreamDecoderWrapper(inner), parent_(parent) {} // StreamCallbacks - void onResetStream(StreamResetReason reason) override { parent_.onReset(*this, reason); } + void onResetStream(StreamResetReason reason, absl::string_view) override { + parent_.onReset(*this, reason); + } void onAboveWriteBufferHighWatermark() override {} void onBelowWriteBufferLowWatermark() override {} diff --git a/source/common/http/codec_helper.h b/source/common/http/codec_helper.h index 6f58317218326..4d2b456bbcc96 100644 --- a/source/common/http/codec_helper.h +++ b/source/common/http/codec_helper.h @@ -46,7 +46,7 @@ class StreamCallbackHelper { reset_callbacks_started_ = true; for (StreamCallbacks* callbacks : callbacks_) { if (callbacks) { - callbacks->onResetStream(reason); + callbacks->onResetStream(reason, absl::string_view()); } } } diff --git a/source/common/http/codes.h b/source/common/http/codes.h index f135067c683c2..b7b50ee9fa08d 100644 --- a/source/common/http/codes.h +++ b/source/common/http/codes.h @@ -60,7 +60,6 @@ class CodeStatsImpl : public CodeStats { const absl::string_view internal_upstream_rq_{"internal.upstream_rq_"}; const absl::string_view upstream_rq_completed_{"upstream_rq_completed"}; const absl::string_view upstream_rq_time_{"upstream_rq_time"}; - const absl::string_view upstream_rq_time{"upstream_rq_time"}; const absl::string_view upstream_rq_{"upstream_rq_"}; const absl::string_view vcluster_{"vcluster"}; const absl::string_view vhost_{"vhost"}; diff --git a/source/common/http/conn_manager_config.h b/source/common/http/conn_manager_config.h index 32d7d8b7d2f69..2fb4ee9e9c575 100644 --- a/source/common/http/conn_manager_config.h +++ b/source/common/http/conn_manager_config.h @@ -109,6 +109,7 @@ struct TracingConnectionManagerConfig { uint64_t client_sampling_; uint64_t random_sampling_; uint64_t overall_sampling_; + bool verbose_; }; typedef std::unique_ptr TracingConnectionManagerConfigPtr; @@ -211,12 +212,6 @@ class ConnectionManagerConfig { */ virtual FilterChainFactory& filterFactory() PURE; - /** - * @return whether the connection manager will reverse the order of encoder - * filters in the HTTP filter chain. - */ - virtual bool reverseEncodeOrder() PURE; - /** * @return whether the connection manager will generate a fresh x-request-id if the request does * not have one. @@ -345,6 +340,11 @@ class ConnectionManagerConfig { * @return supplies the http1 settings. */ virtual const Http::Http1Settings& http1Settings() const PURE; + + /** + * @return if the HttpConnectionManager should normalize url following RFC3986 + */ + virtual bool shouldNormalizePath() const PURE; }; } // namespace Http } // namespace Envoy diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 8805eacffe33e..c9004f00ad9da 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -29,14 +29,47 @@ #include "common/http/headers.h" #include "common/http/http1/codec_impl.h" #include "common/http/http2/codec_impl.h" +#include "common/http/path_utility.h" #include "common/http/utility.h" #include "common/network/utility.h" +#include "absl/strings/escaping.h" #include "absl/strings/match.h" namespace Envoy { namespace Http { +namespace { + +template using FilterList = std::list>; + +// Shared helper for recording the latest filter used. +template +void recordLatestDataFilter(const typename FilterList::iterator current_filter, + T*& latest_filter, const FilterList& filters) { + // If this is the first time we're calling onData, just record the current filter. + if (latest_filter == nullptr) { + latest_filter = current_filter->get(); + return; + } + + // We want to keep this pointing at the latest filter in the filter list that has received the + // onData callback. To do so, we compare the current latest with the *previous* filter. If they + // match, then we must be processing a new filter for the first time. We omit this check if we're + // the first filter, since the above check handles that case. + // + // We compare against the previous filter to avoid multiple filter iterations from reseting the + // pointer: If we just set latest to current, then the first onData filter iteration would + // correctly iterate over the filters and set latest, but on subsequent onData iterations + // we'd start from the beginning again, potentially allowing filter N to modify the buffer even + // though filter M > N was the filter that inserted data into the buffer. + if (current_filter != filters.begin() && latest_filter == std::prev(current_filter)->get()) { + latest_filter = current_filter->get(); + } +} + +} // namespace + ConnectionManagerStats ConnectionManagerImpl::generateStats(const std::string& prefix, Stats::Scope& scope) { return { @@ -63,9 +96,9 @@ ConnectionManagerImpl::ConnectionManagerImpl(ConnectionManagerConfig& config, const LocalInfo::LocalInfo& local_info, Upstream::ClusterManager& cluster_manager, Server::OverloadManager* overload_manager, - Event::TimeSystem& time_system) + TimeSource& time_source) : config_(config), stats_(config_.stats()), - conn_length_(new Stats::Timespan(stats_.named_.downstream_cx_length_ms_, time_system)), + conn_length_(new Stats::Timespan(stats_.named_.downstream_cx_length_ms_, time_source)), drain_close_(drain_close), random_generator_(random_generator), http_context_(http_context), runtime_(runtime), local_info_(local_info), cluster_manager_(cluster_manager), listener_stats_(config_.listenerStats()), @@ -77,7 +110,7 @@ ConnectionManagerImpl::ConnectionManagerImpl(ConnectionManagerConfig& config, overload_manager ? overload_manager->getThreadLocalOverloadState().getState( Server::OverloadActionNames::get().DisableHttpKeepAlive) : Server::OverloadManager::getInactiveState()), - time_system_(time_system) {} + time_source_(time_source) {} const HeaderMapImpl& ConnectionManagerImpl::continueHeader() { CONSTRUCT_ON_FIRST_USE(HeaderMapImpl, @@ -279,7 +312,7 @@ Network::FilterStatus ConnectionManagerImpl::onData(Buffer::Instance& data, bool void ConnectionManagerImpl::resetAllStreams() { while (!streams_.empty()) { // Mimic a downstream reset in this case. - streams_.front()->onResetStream(StreamResetReason::ConnectionTermination); + streams_.front()->onResetStream(StreamResetReason::ConnectionTermination, absl::string_view()); } } @@ -369,8 +402,9 @@ ConnectionManagerImpl::ActiveStream::ActiveStream(ConnectionManagerImpl& connect snapped_route_config_(connection_manager.config_.routeConfigProvider().config()), stream_id_(connection_manager.random_generator_.random()), request_response_timespan_(new Stats::Timespan( - connection_manager_.stats_.named_.downstream_rq_time_, connection_manager_.timeSystem())), - stream_info_(connection_manager_.codec_->protocol(), connection_manager_.timeSystem()) { + connection_manager_.stats_.named_.downstream_rq_time_, connection_manager_.timeSource())), + stream_info_(connection_manager_.codec_->protocol(), connection_manager_.timeSource()), + upstream_options_(std::make_shared()) { connection_manager_.stats_.named_.downstream_rq_total_.inc(); connection_manager_.stats_.named_.downstream_rq_active_.inc(); if (connection_manager_.codec_->protocol() == Protocol::Http2) { @@ -389,6 +423,8 @@ ConnectionManagerImpl::ActiveStream::ActiveStream(ConnectionManagerImpl& connect stream_info_.setDownstreamRemoteAddress( connection_manager_.read_callbacks_->connection().remoteAddress()); + stream_info_.setDownstreamSslConnection(connection_manager_.read_callbacks_->connection().ssl()); + if (connection_manager_.config_.streamIdleTimeout().count()) { idle_timeout_ms_ = connection_manager_.config_.streamIdleTimeout(); stream_idle_timer_ = connection_manager_.read_callbacks_->connection().dispatcher().createTimer( @@ -458,9 +494,11 @@ void ConnectionManagerImpl::ActiveStream::onIdleTimeout() { // or gRPC status code, and/or set H2 RST_STREAM error. connection_manager_.doEndStream(*this); } else { - sendLocalReply( - request_headers_ != nullptr && Grpc::Common::hasGrpcContentType(*request_headers_), - Http::Code::RequestTimeout, "stream timeout", nullptr, is_head_request_, absl::nullopt); + stream_info_.setResponseFlag(StreamInfo::ResponseFlag::StreamIdleTimeout); + sendLocalReply(request_headers_ != nullptr && + Grpc::Common::hasGrpcContentType(*request_headers_), + Http::Code::RequestTimeout, "stream timeout", nullptr, is_head_request_, + absl::nullopt, StreamInfo::ResponseCodeDetails::get().StreamIdleTimeout); } } @@ -468,7 +506,7 @@ void ConnectionManagerImpl::ActiveStream::onRequestTimeout() { connection_manager_.stats_.named_.downstream_rq_timeout_.inc(); sendLocalReply(request_headers_ != nullptr && Grpc::Common::hasGrpcContentType(*request_headers_), Http::Code::RequestTimeout, "request timeout", nullptr, is_head_request_, - absl::nullopt); + absl::nullopt, StreamInfo::ResponseCodeDetails::get().RequestOverallTimeout); } void ConnectionManagerImpl::ActiveStream::addStreamDecoderFilterWorker( @@ -482,11 +520,7 @@ void ConnectionManagerImpl::ActiveStream::addStreamEncoderFilterWorker( StreamEncoderFilterSharedPtr filter, bool dual_filter) { ActiveStreamEncoderFilterPtr wrapper(new ActiveStreamEncoderFilter(*this, filter, dual_filter)); filter->setEncoderFilterCallbacks(*wrapper); - if (connection_manager_.config_.reverseEncodeOrder()) { - wrapper->moveIntoList(std::move(wrapper), encoder_filters_); - } else { - wrapper->moveIntoListBack(std::move(wrapper), encoder_filters_); - } + wrapper->moveIntoList(std::move(wrapper), encoder_filters_); } void ConnectionManagerImpl::ActiveStream::addAccessLogHandler( @@ -541,7 +575,8 @@ const Network::Connection* ConnectionManagerImpl::ActiveStream::connection() { // e.g. many early returns do not currently handle connection: close properly. void ConnectionManagerImpl::ActiveStream::decodeHeaders(HeaderMapPtr&& headers, bool end_stream) { request_headers_ = std::move(headers); - if (Http::Headers::get().MethodValues.Head == request_headers_->Method()->value().c_str()) { + if (Http::Headers::get().MethodValues.Head == + request_headers_->Method()->value().getStringView()) { is_head_request_ = true; } ENVOY_STREAM_LOG(debug, "request headers complete (end_stream={}):\n{}", *this, end_stream, @@ -561,7 +596,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(HeaderMapPtr&& headers, connection_manager_.stats_.named_.downstream_rq_overload_close_.inc(); sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Http::Code::ServiceUnavailable, "envoy overloaded", nullptr, is_head_request_, - absl::nullopt); + absl::nullopt, StreamInfo::ResponseCodeDetails::get().Overload); return; } @@ -589,7 +624,8 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(HeaderMapPtr&& headers, stream_info_.protocol(protocol); if (!connection_manager_.config_.http1Settings().accept_http_10_) { // Send "Upgrade Required" if HTTP/1.0 support is not explicitly configured on. - sendLocalReply(false, Code::UpgradeRequired, "", nullptr, is_head_request_, absl::nullopt); + sendLocalReply(false, Code::UpgradeRequired, "", nullptr, is_head_request_, absl::nullopt, + StreamInfo::ResponseCodeDetails::get().LowVersion); return; } else { // HTTP/1.0 defaults to single-use connections. Make sure the connection @@ -612,7 +648,8 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(HeaderMapPtr&& headers, } else { // Require host header. For HTTP/1.1 Host has already been translated to :authority. sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::BadRequest, "", - nullptr, is_head_request_, absl::nullopt); + nullptr, is_head_request_, absl::nullopt, + StreamInfo::ResponseCodeDetails::get().MissingHost); return; } } @@ -620,7 +657,8 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(HeaderMapPtr&& headers, ASSERT(connection_manager_.config_.maxRequestHeadersKb() > 0); if (request_headers_->byteSize() > (connection_manager_.config_.maxRequestHeadersKb() * 1024)) { sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), - Code::RequestHeaderFieldsTooLarge, "", nullptr, is_head_request_, absl::nullopt); + Code::RequestHeaderFieldsTooLarge, "", nullptr, is_head_request_, absl::nullopt, + StreamInfo::ResponseCodeDetails::get().RequestHeadersTooLarge); return; } @@ -629,10 +667,24 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(HeaderMapPtr&& headers, // when the allow_absolute_url flag is enabled on the HCM. // https://tools.ietf.org/html/rfc7230#section-5.3 We also need to check for the existence of // :path because CONNECT does not have a path, and we don't support that currently. - if (!request_headers_->Path() || request_headers_->Path()->value().c_str()[0] != '/') { + if (!request_headers_->Path() || request_headers_->Path()->value().getStringView().empty() || + request_headers_->Path()->value().getStringView()[0] != '/') { + const bool has_path = + request_headers_->Path() && !request_headers_->Path()->value().getStringView().empty(); connection_manager_.stats_.named_.downstream_rq_non_relative_path_.inc(); sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::NotFound, "", nullptr, - is_head_request_, absl::nullopt); + is_head_request_, absl::nullopt, + has_path ? StreamInfo::ResponseCodeDetails::get().AbsolutePath + : StreamInfo::ResponseCodeDetails::get().MissingPath); + return; + } + + // Path sanitization should happen before any path access other than the above sanity check. + if (!ConnectionManagerUtility::maybeNormalizePath(*request_headers_, + connection_manager_.config_)) { + sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::BadRequest, "", + nullptr, is_head_request_, absl::nullopt, + StreamInfo::ResponseCodeDetails::get().PathNormalizationFailed); return; } @@ -641,6 +693,15 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(HeaderMapPtr&& headers, Http::Headers::get().ConnectionValues.Close)) { state_.saw_connection_close_ = true; } + // Note: Proxy-Connection is not a standard header, but is supported here + // since it is supported by http-parser the underlying parser for http + // requests. + if (protocol != Protocol::Http2 && !state_.saw_connection_close_ && + request_headers_->ProxyConnection() && + absl::EqualsIgnoreCase(request_headers_->ProxyConnection()->value().getStringView(), + Http::Headers::get().ConnectionValues.Close)) { + state_.saw_connection_close_ = true; + } if (!state_.is_internally_created_) { // Only sanitize headers on first pass. // Modify the downstream remote address depending on configuration and headers. @@ -662,7 +723,8 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(HeaderMapPtr&& headers, // Do not allow upgrades if the route does not support it. connection_manager_.stats_.named_.downstream_rq_ws_on_non_ws_route_.inc(); sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::Forbidden, "", - nullptr, is_head_request_, absl::nullopt); + nullptr, is_head_request_, absl::nullopt, + StreamInfo::ResponseCodeDetails::get().UpgradeFailed); return; } // Allow non websocket requests to go through websocket enabled routes. @@ -739,7 +801,7 @@ void ConnectionManagerImpl::ActiveStream::traceRequest() { // should be used to override the active span's operation. if (req_operation_override) { if (!req_operation_override->value().empty()) { - active_span_->setOperation(req_operation_override->value().c_str()); + active_span_->setOperation(req_operation_override->value().getStringView()); // Clear the decorated operation so won't be used in the response header, as // it has been overridden by the inbound decorator operation request header. @@ -753,13 +815,10 @@ void ConnectionManagerImpl::ActiveStream::traceRequest() { void ConnectionManagerImpl::ActiveStream::decodeHeaders(ActiveStreamDecoderFilter* filter, HeaderMap& headers, bool end_stream) { - std::list::iterator entry; + // Headers filter iteration should always start with the next filter if available. + std::list::iterator entry = + commonDecodePrefix(filter, FilterIterationStartState::AlwaysStartFromNext); std::list::iterator continue_data_entry = decoder_filters_.end(); - if (!filter) { - entry = decoder_filters_.begin(); - } else { - entry = std::next(filter->entry()); - } for (; entry != decoder_filters_.end(); entry++) { ASSERT(!(state_.filter_call_state_ & FilterCallState::DecodeHeaders)); @@ -790,9 +849,11 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(ActiveStreamDecoderFilte if (continue_data_entry != decoder_filters_.end()) { // We use the continueDecoding() code since it will correctly handle not calling - // decodeHeaders() again. Fake setting stopped_ since the continueDecoding() code expects it. + // decodeHeaders() again. Fake setting StopSingleIteration since the continueDecoding() code + // expects it. ASSERT(buffered_request_data_); - (*continue_data_entry)->stopped_ = true; + (*continue_data_entry)->iteration_state_ = + ActiveStreamFilterBase::IterationState::StopSingleIteration; (*continue_data_entry)->continueDecoding(); } @@ -805,11 +866,12 @@ void ConnectionManagerImpl::ActiveStream::decodeData(Buffer::Instance& data, boo maybeEndDecode(end_stream); stream_info_.addBytesReceived(data.length()); - decodeData(nullptr, data, end_stream); + decodeData(nullptr, data, end_stream, FilterIterationStartState::CanStartFromCurrent); } -void ConnectionManagerImpl::ActiveStream::decodeData(ActiveStreamDecoderFilter* filter, - Buffer::Instance& data, bool end_stream) { +void ConnectionManagerImpl::ActiveStream::decodeData( + ActiveStreamDecoderFilter* filter, Buffer::Instance& data, bool end_stream, + FilterIterationStartState filter_iteration_start_state) { resetIdleTimer(); // If we previously decided to decode only the headers, do nothing here. @@ -823,16 +885,17 @@ void ConnectionManagerImpl::ActiveStream::decodeData(ActiveStreamDecoderFilter* return; } - std::list::iterator entry; auto trailers_added_entry = decoder_filters_.end(); const bool trailers_exists_at_start = request_trailers_ != nullptr; - if (!filter) { - entry = decoder_filters_.begin(); - } else { - entry = std::next(filter->entry()); - } + // Filter iteration may start at the current filter. + std::list::iterator entry = + commonDecodePrefix(filter, filter_iteration_start_state); for (; entry != decoder_filters_.end(); entry++) { + // If the filter pointed by entry has stopped for all frame types, return now. + if (handleDataIfStopAll(**entry, data, state_.decoder_filters_streaming_)) { + return; + } // If end_stream_ is marked for a filter, the data is not for this filter and filters after. // // In following case, ActiveStreamFilterBase::commonContinue() could be called recursively and @@ -874,9 +937,15 @@ void ConnectionManagerImpl::ActiveStream::decodeData(ActiveStreamDecoderFilter* if (end_stream) { state_.filter_call_state_ |= FilterCallState::LastDataFrame; } + + recordLatestDataFilter(entry, state_.latest_data_decoding_filter_, decoder_filters_); + state_.filter_call_state_ |= FilterCallState::DecodeData; (*entry)->end_stream_ = end_stream && !request_trailers_; FilterDataStatus status = (*entry)->handle_->decodeData(data, (*entry)->end_stream_); + if ((*entry)->end_stream_) { + (*entry)->handle_->decodeComplete(); + } state_.filter_call_state_ &= ~FilterCallState::DecodeData; if (end_stream) { state_.filter_call_state_ &= ~FilterCallState::LastDataFrame; @@ -933,7 +1002,7 @@ void ConnectionManagerImpl::ActiveStream::addDecodedData(ActiveStreamDecoderFilt } else if (state_.filter_call_state_ & FilterCallState::DecodeTrailers) { // In this case we need to inline dispatch the data to further filters. If those filters // choose to buffer/stop iteration that's fine. - decodeData(&filter, data, false); + decodeData(&filter, data, false, FilterIterationStartState::AlwaysStartFromNext); } else { // TODO(mattklein123): Formalize error handling for filters and add tests. Should probably // throw an exception here. @@ -960,17 +1029,20 @@ void ConnectionManagerImpl::ActiveStream::decodeTrailers(ActiveStreamDecoderFilt return; } - std::list::iterator entry; - if (!filter) { - entry = decoder_filters_.begin(); - } else { - entry = std::next(filter->entry()); - } + // Filter iteration may start at the current filter. + std::list::iterator entry = + commonDecodePrefix(filter, FilterIterationStartState::CanStartFromCurrent); for (; entry != decoder_filters_.end(); entry++) { + // If the filter pointed by entry has stopped for all frame type, return now. + if ((*entry)->stoppedAll()) { + return; + } + ASSERT(!(state_.filter_call_state_ & FilterCallState::DecodeTrailers)); state_.filter_call_state_ |= FilterCallState::DecodeTrailers; FilterTrailersStatus status = (*entry)->handle_->decodeTrailers(trailers); + (*entry)->handle_->decodeComplete(); (*entry)->end_stream_ = true; state_.filter_call_state_ &= ~FilterCallState::DecodeTrailers; ENVOY_STREAM_LOG(trace, "decode trailers called: filter={} status={}", *this, @@ -998,20 +1070,39 @@ void ConnectionManagerImpl::ActiveStream::disarmRequestTimeout() { } std::list::iterator -ConnectionManagerImpl::ActiveStream::commonEncodePrefix(ActiveStreamEncoderFilter* filter, - bool end_stream) { +ConnectionManagerImpl::ActiveStream::commonEncodePrefix( + ActiveStreamEncoderFilter* filter, bool end_stream, + FilterIterationStartState filter_iteration_start_state) { // Only do base state setting on the initial call. Subsequent calls for filtering do not touch // the base state. if (filter == nullptr) { ASSERT(!state_.local_complete_); state_.local_complete_ = end_stream; + return encoder_filters_.begin(); } + if (filter_iteration_start_state == FilterIterationStartState::CanStartFromCurrent && + (*(filter->entry()))->iterate_from_current_filter_) { + // The filter iteration has been stopped for all frame types, and now the iteration continues. + // The current filter's encoding callback has not be called. Call it now. + return filter->entry(); + } + return std::next(filter->entry()); +} + +std::list::iterator +ConnectionManagerImpl::ActiveStream::commonDecodePrefix( + ActiveStreamDecoderFilter* filter, FilterIterationStartState filter_iteration_start_state) { if (!filter) { - return encoder_filters_.begin(); - } else { - return std::next(filter->entry()); + return decoder_filters_.begin(); } + if (filter_iteration_start_state == FilterIterationStartState::CanStartFromCurrent && + (*(filter->entry()))->iterate_from_current_filter_) { + // The filter iteration has been stopped for all frame types, and now the iteration continues. + // The current filter's callback function has not been called. Call it now. + return filter->entry(); + } + return std::next(filter->entry()); } void ConnectionManagerImpl::startDrainSequence() { @@ -1024,7 +1115,10 @@ void ConnectionManagerImpl::startDrainSequence() { } void ConnectionManagerImpl::ActiveStream::refreshCachedRoute() { - Router::RouteConstSharedPtr route = snapped_route_config_->route(*request_headers_, stream_id_); + Router::RouteConstSharedPtr route; + if (request_headers_ != nullptr) { + route = snapped_route_config_->route(*request_headers_, stream_id_); + } stream_info_.route_entry_ = route ? route->routeEntry() : nullptr; cached_route_ = std::move(route); if (nullptr == stream_info_.route_entry_) { @@ -1039,29 +1133,32 @@ void ConnectionManagerImpl::ActiveStream::refreshCachedRoute() { void ConnectionManagerImpl::ActiveStream::sendLocalReply( bool is_grpc_request, Code code, absl::string_view body, const std::function& modify_headers, bool is_head_request, - const absl::optional grpc_status) { + const absl::optional grpc_status, absl::string_view details) { + ENVOY_STREAM_LOG(debug, "Sending local reply with details {}", *this, details); ASSERT(response_headers_ == nullptr); // For early error handling, do a best-effort attempt to create a filter chain // to ensure access logging. if (!state_.created_filter_chain_) { createFilterChain(); } - Utility::sendLocalReply(is_grpc_request, - [this, modify_headers](HeaderMapPtr&& headers, bool end_stream) -> void { - if (modify_headers != nullptr) { - modify_headers(*headers); - } - response_headers_ = std::move(headers); - // TODO: Start encoding from the last decoder filter that saw the - // request instead. - encodeHeaders(nullptr, *response_headers_, end_stream); - }, - [this](Buffer::Instance& data, bool end_stream) -> void { - // TODO: Start encoding from the last decoder filter that saw the - // request instead. - encodeData(nullptr, data, end_stream); - }, - state_.destroyed_, code, body, grpc_status, is_head_request); + stream_info_.setResponseCodeDetails(details); + Utility::sendLocalReply( + is_grpc_request, + [this, modify_headers](HeaderMapPtr&& headers, bool end_stream) -> void { + if (modify_headers != nullptr) { + modify_headers(*headers); + } + response_headers_ = std::move(headers); + // TODO: Start encoding from the last decoder filter that saw the + // request instead. + encodeHeaders(nullptr, *response_headers_, end_stream); + }, + [this](Buffer::Instance& data, bool end_stream) -> void { + // TODO: Start encoding from the last decoder filter that saw the + // request instead. + encodeData(nullptr, data, end_stream, FilterIterationStartState::CanStartFromCurrent); + }, + state_.destroyed_, code, body, grpc_status, is_head_request); } void ConnectionManagerImpl::ActiveStream::encode100ContinueHeaders( @@ -1075,7 +1172,9 @@ void ConnectionManagerImpl::ActiveStream::encode100ContinueHeaders( // filter. This is simpler than that case because 100 continue implies no // end-stream, and because there are normal headers coming there's no need for // complex continuation logic. - std::list::iterator entry = commonEncodePrefix(filter, false); + // 100-continue filter iteration should always start with the next filter if available. + std::list::iterator entry = + commonEncodePrefix(filter, false, FilterIterationStartState::AlwaysStartFromNext); for (; entry != encoder_filters_.end(); entry++) { ASSERT(!(state_.filter_call_state_ & FilterCallState::Encode100ContinueHeaders)); state_.filter_call_state_ |= FilterCallState::Encode100ContinueHeaders; @@ -1106,7 +1205,9 @@ void ConnectionManagerImpl::ActiveStream::encodeHeaders(ActiveStreamEncoderFilte resetIdleTimer(); disarmRequestTimeout(); - std::list::iterator entry = commonEncodePrefix(filter, end_stream); + // Headers filter iteration should always start with the next filter if available. + std::list::iterator entry = + commonEncodePrefix(filter, end_stream, FilterIterationStartState::AlwaysStartFromNext); std::list::iterator continue_data_entry = encoder_filters_.end(); for (; entry != encoder_filters_.end(); entry++) { @@ -1115,6 +1216,9 @@ void ConnectionManagerImpl::ActiveStream::encodeHeaders(ActiveStreamEncoderFilte (*entry)->end_stream_ = encoding_headers_only_ || (end_stream && continue_data_entry == encoder_filters_.end()); FilterHeadersStatus status = (*entry)->handle_->encodeHeaders(headers, (*entry)->end_stream_); + if ((*entry)->end_stream_) { + (*entry)->handle_->encodeComplete(); + } state_.filter_call_state_ &= ~FilterCallState::EncodeHeaders; ENVOY_STREAM_LOG(trace, "encode headers called: filter={} status={}", *this, static_cast((*entry).get()), static_cast(status)); @@ -1182,7 +1286,7 @@ void ConnectionManagerImpl::ActiveStream::encodeHeaders(ActiveStreamEncoderFilte connection_manager_.stats_.named_.downstream_rq_response_before_rq_complete_.inc(); } - if (connection_manager_.drain_state_ == DrainState::Closing && + if (connection_manager_.drain_state_ != DrainState::NotDraining && connection_manager_.codec_->protocol() != Protocol::Http2) { // If the connection manager is draining send "Connection: Close" on HTTP/1.1 connections. // Do not do this for H2 (which drains via GOAWAY) or Upgrade (as the upgrade @@ -1209,7 +1313,7 @@ void ConnectionManagerImpl::ActiveStream::encodeHeaders(ActiveStreamEncoderFilte // should be used to override the active span's operation. if (resp_operation_override) { if (!resp_operation_override->value().empty() && active_span_) { - active_span_->setOperation(resp_operation_override->value().c_str()); + active_span_->setOperation(resp_operation_override->value().getStringView()); } // Remove header so not propagated to service. headers.removeEnvoyDecoratorOperation(); @@ -1231,9 +1335,11 @@ void ConnectionManagerImpl::ActiveStream::encodeHeaders(ActiveStreamEncoderFilte encoding_headers_only_ || (end_stream && continue_data_entry == encoder_filters_.end())); if (continue_data_entry != encoder_filters_.end()) { // We use the continueEncoding() code since it will correctly handle not calling - // encodeHeaders() again. Fake setting stopped_ since the continueEncoding() code expects it. + // encodeHeaders() again. Fake setting StopSingleIteration since the continueEncoding() code + // expects it. ASSERT(buffered_response_data_); - (*continue_data_entry)->stopped_ = true; + (*continue_data_entry)->iteration_state_ = + ActiveStreamFilterBase::IterationState::StopSingleIteration; (*continue_data_entry)->continueEncoding(); } else { // End encoding if this is a header only response, either due to a filter converting it to one @@ -1289,7 +1395,7 @@ void ConnectionManagerImpl::ActiveStream::addEncodedData(ActiveStreamEncoderFilt } else if (state_.filter_call_state_ & FilterCallState::EncodeTrailers) { // In this case we need to inline dispatch the data to further filters. If those filters // choose to buffer/stop iteration that's fine. - encodeData(&filter, data, false); + encodeData(&filter, data, false, FilterIterationStartState::AlwaysStartFromNext); } else { // TODO(mattklein123): Formalize error handling for filters and add tests. Should probably // throw an exception here. @@ -1297,8 +1403,9 @@ void ConnectionManagerImpl::ActiveStream::addEncodedData(ActiveStreamEncoderFilt } } -void ConnectionManagerImpl::ActiveStream::encodeData(ActiveStreamEncoderFilter* filter, - Buffer::Instance& data, bool end_stream) { +void ConnectionManagerImpl::ActiveStream::encodeData( + ActiveStreamEncoderFilter* filter, Buffer::Instance& data, bool end_stream, + FilterIterationStartState filter_iteration_start_state) { resetIdleTimer(); // If we previously decided to encode only the headers, do nothing here. @@ -1306,11 +1413,17 @@ void ConnectionManagerImpl::ActiveStream::encodeData(ActiveStreamEncoderFilter* return; } - std::list::iterator entry = commonEncodePrefix(filter, end_stream); + // Filter iteration may start at the current filter. + std::list::iterator entry = + commonEncodePrefix(filter, end_stream, filter_iteration_start_state); auto trailers_added_entry = encoder_filters_.end(); const bool trailers_exists_at_start = response_trailers_ != nullptr; for (; entry != encoder_filters_.end(); entry++) { + // If the filter pointed by entry has stopped for all frame type, return now. + if (handleDataIfStopAll(**entry, data, state_.encoder_filters_streaming_)) { + return; + } // If end_stream_ is marked for a filter, the data is not for this filter and filters after. // For details, please see the comment in the ActiveStream::decodeData() function. if ((*entry)->end_stream_) { @@ -1325,8 +1438,14 @@ void ConnectionManagerImpl::ActiveStream::encodeData(ActiveStreamEncoderFilter* if (end_stream) { state_.filter_call_state_ |= FilterCallState::LastDataFrame; } + + recordLatestDataFilter(entry, state_.latest_data_encoding_filter_, encoder_filters_); + (*entry)->end_stream_ = end_stream && !response_trailers_; FilterDataStatus status = (*entry)->handle_->encodeData(data, (*entry)->end_stream_); + if ((*entry)->end_stream_) { + (*entry)->handle_->encodeComplete(); + } state_.filter_call_state_ &= ~FilterCallState::EncodeData; if (end_stream) { state_.filter_call_state_ &= ~FilterCallState::LastDataFrame; @@ -1369,11 +1488,18 @@ void ConnectionManagerImpl::ActiveStream::encodeTrailers(ActiveStreamEncoderFilt return; } - std::list::iterator entry = commonEncodePrefix(filter, true); + // Filter iteration may start at the current filter. + std::list::iterator entry = + commonEncodePrefix(filter, true, FilterIterationStartState::CanStartFromCurrent); for (; entry != encoder_filters_.end(); entry++) { + // If the filter pointed by entry has stopped for all frame type, return now. + if ((*entry)->stoppedAll()) { + return; + } ASSERT(!(state_.filter_call_state_ & FilterCallState::EncodeTrailers)); state_.filter_call_state_ |= FilterCallState::EncodeTrailers; FilterTrailersStatus status = (*entry)->handle_->encodeTrailers(trailers); + (*entry)->handle_->encodeComplete(); (*entry)->end_stream_ = true; state_.filter_call_state_ &= ~FilterCallState::EncodeTrailers; ENVOY_STREAM_LOG(trace, "encode trailers called: filter={} status={}", *this, @@ -1397,7 +1523,20 @@ void ConnectionManagerImpl::ActiveStream::maybeEndEncode(bool end_stream) { } } -void ConnectionManagerImpl::ActiveStream::onResetStream(StreamResetReason) { +bool ConnectionManagerImpl::ActiveStream::handleDataIfStopAll(ActiveStreamFilterBase& filter, + Buffer::Instance& data, + bool& filter_streaming) { + if (filter.stoppedAll()) { + ASSERT(!filter.canIterate()); + filter_streaming = + filter.iteration_state_ == ActiveStreamFilterBase::IterationState::StopAllWatermark; + filter.commonHandleBufferData(data); + return true; + } + return false; +} + +void ConnectionManagerImpl::ActiveStream::onResetStream(StreamResetReason, absl::string_view) { // NOTE: This function gets called in all of the following cases: // 1) We TX an app level reset // 2) The codec TX a codec level reset @@ -1426,18 +1565,22 @@ ConnectionManagerImpl::ActiveStream::requestHeadersForTags() const { return connection_manager_.config_.tracingConfig()->request_headers_for_tags_; } +bool ConnectionManagerImpl::ActiveStream::verbose() const { + return connection_manager_.config_.tracingConfig()->verbose_; +} + void ConnectionManagerImpl::ActiveStream::callHighWatermarkCallbacks() { ++high_watermark_count_; - if (watermark_callbacks_) { - watermark_callbacks_->onAboveWriteBufferHighWatermark(); + for (auto watermark_callbacks : watermark_callbacks_) { + watermark_callbacks->onAboveWriteBufferHighWatermark(); } } void ConnectionManagerImpl::ActiveStream::callLowWatermarkCallbacks() { ASSERT(high_watermark_count_ > 0); --high_watermark_count_; - if (watermark_callbacks_) { - watermark_callbacks_->onBelowWriteBufferLowWatermark(); + for (auto watermark_callbacks : watermark_callbacks_) { + watermark_callbacks->onBelowWriteBufferLowWatermark(); } } @@ -1469,7 +1612,7 @@ bool ConnectionManagerImpl::ActiveStream::createFilterChain() { } if (connection_manager_.config_.filterFactory().createUpgradeFilterChain( - upgrade->value().c_str(), upgrade_map, *this)) { + upgrade->value().getStringView(), upgrade_map, *this)) { state_.successful_upgrade_ = true; connection_manager_.stats_.named_.downstream_cx_upgrades_total_.inc(); connection_manager_.stats_.named_.downstream_cx_upgrades_active_.inc(); @@ -1495,8 +1638,13 @@ void ConnectionManagerImpl::ActiveStreamFilterBase::commonContinue() { ENVOY_STREAM_LOG(trace, "continuing filter chain: filter={}", parent_, static_cast(this)); - ASSERT(stopped_); - stopped_ = false; + ASSERT(!canIterate()); + // If iteration has stopped for all frame types, set iterate_from_current_filter_ to true so the + // filter iteration starts with the current filter instead of the next one. + if (stoppedAll()) { + iterate_from_current_filter_ = true; + } + allowIteration(); // Only resume with do100ContinueHeaders() if we've actually seen a 100-Continue. if (parent_.has_continue_headers_ && !continue_headers_continued_) { @@ -1526,16 +1674,18 @@ void ConnectionManagerImpl::ActiveStreamFilterBase::commonContinue() { if (trailers()) { doTrailers(); } + + iterate_from_current_filter_ = false; } bool ConnectionManagerImpl::ActiveStreamFilterBase::commonHandleAfter100ContinueHeadersCallback( FilterHeadersStatus status) { ASSERT(parent_.has_continue_headers_); ASSERT(!continue_headers_continued_); - ASSERT(!stopped_); + ASSERT(canIterate()); if (status == FilterHeadersStatus::StopIteration) { - stopped_ = true; + iteration_state_ = IterationState::StopSingleIteration; return false; } else { ASSERT(status == FilterHeadersStatus::Continue); @@ -1547,10 +1697,16 @@ bool ConnectionManagerImpl::ActiveStreamFilterBase::commonHandleAfter100Continue bool ConnectionManagerImpl::ActiveStreamFilterBase::commonHandleAfterHeadersCallback( FilterHeadersStatus status, bool& headers_only) { ASSERT(!headers_continued_); - ASSERT(!stopped_); + ASSERT(canIterate()); if (status == FilterHeadersStatus::StopIteration) { - stopped_ = true; + iteration_state_ = IterationState::StopSingleIteration; + return false; + } else if (status == FilterHeadersStatus::StopAllIterationAndBuffer) { + iteration_state_ = IterationState::StopAllBuffer; + return false; + } else if (status == FilterHeadersStatus::StopAllIterationAndWatermark) { + iteration_state_ = IterationState::StopAllWatermark; return false; } else if (status == FilterHeadersStatus::ContinueAndEndStream) { // Set headers_only to true so we know to end early if necessary, @@ -1586,7 +1742,7 @@ bool ConnectionManagerImpl::ActiveStreamFilterBase::commonHandleAfterDataCallbac FilterDataStatus status, Buffer::Instance& provided_data, bool& buffer_was_streaming) { if (status == FilterDataStatus::Continue) { - if (stopped_) { + if (iteration_state_ == IterationState::StopSingleIteration) { commonHandleBufferData(provided_data); commonContinue(); return false; @@ -1594,7 +1750,7 @@ bool ConnectionManagerImpl::ActiveStreamFilterBase::commonHandleAfterDataCallbac ASSERT(headers_continued_); } } else { - stopped_ = true; + iteration_state_ = IterationState::StopSingleIteration; if (status == FilterDataStatus::StopIterationAndBuffer || status == FilterDataStatus::StopIterationAndWatermark) { buffer_was_streaming = status == FilterDataStatus::StopIterationAndWatermark; @@ -1611,7 +1767,7 @@ bool ConnectionManagerImpl::ActiveStreamFilterBase::commonHandleAfterTrailersCal FilterTrailersStatus status) { if (status == FilterTrailersStatus::Continue) { - if (stopped_) { + if (iteration_state_ == IterationState::StopSingleIteration) { commonContinue(); return false; } else { @@ -1685,6 +1841,12 @@ void ConnectionManagerImpl::ActiveStreamDecoderFilter::addDecodedData(Buffer::In parent_.addDecodedData(*this, data, streaming); } +void ConnectionManagerImpl::ActiveStreamDecoderFilter::injectDecodedDataToFilterChain( + Buffer::Instance& data, bool end_stream) { + parent_.decodeData(this, data, end_stream, + ActiveStream::FilterIterationStartState::CanStartFromCurrent); +} + void ConnectionManagerImpl::ActiveStreamDecoderFilter::continueDecoding() { commonContinue(); } void ConnectionManagerImpl::ActiveStreamDecoderFilter::encode100ContinueHeaders( @@ -1706,7 +1868,8 @@ void ConnectionManagerImpl::ActiveStreamDecoderFilter::encodeHeaders(HeaderMapPt void ConnectionManagerImpl::ActiveStreamDecoderFilter::encodeData(Buffer::Instance& data, bool end_stream) { - parent_.encodeData(nullptr, data, end_stream); + parent_.encodeData(nullptr, data, end_stream, + ActiveStream::FilterIterationStartState::CanStartFromCurrent); } void ConnectionManagerImpl::ActiveStreamDecoderFilter::encodeTrailers(HeaderMapPtr&& trailers) { @@ -1733,7 +1896,7 @@ void ConnectionManagerImpl::ActiveStreamDecoderFilter::requestDataTooLarge() { } else { parent_.connection_manager_.stats_.named_.downstream_rq_too_large_.inc(); sendLocalReply(Code::PayloadTooLarge, CodeUtility::toString(Code::PayloadTooLarge), nullptr, - absl::nullopt); + absl::nullopt, StreamInfo::ResponseCodeDetails::get().RequestPayloadTooLarge); } } @@ -1752,19 +1915,20 @@ void ConnectionManagerImpl::ActiveStreamDecoderFilter:: void ConnectionManagerImpl::ActiveStreamDecoderFilter::addDownstreamWatermarkCallbacks( DownstreamWatermarkCallbacks& watermark_callbacks) { - // This is called exactly once per stream, by the router filter. - // If there's ever a need for another filter to subscribe to watermark callbacks this can be - // turned into a vector. - ASSERT(parent_.watermark_callbacks_ == nullptr); - parent_.watermark_callbacks_ = &watermark_callbacks; + // This is called exactly once per upstream-stream, by the router filter. Therefore, we + // expect the same callbacks to not be registered twice. + ASSERT(std::find(parent_.watermark_callbacks_.begin(), parent_.watermark_callbacks_.end(), + &watermark_callbacks) == parent_.watermark_callbacks_.end()); + parent_.watermark_callbacks_.emplace(parent_.watermark_callbacks_.end(), &watermark_callbacks); for (uint32_t i = 0; i < parent_.high_watermark_count_; ++i) { watermark_callbacks.onAboveWriteBufferHighWatermark(); } } void ConnectionManagerImpl::ActiveStreamDecoderFilter::removeDownstreamWatermarkCallbacks( DownstreamWatermarkCallbacks& watermark_callbacks) { - ASSERT(parent_.watermark_callbacks_ == &watermark_callbacks); - parent_.watermark_callbacks_ = nullptr; + ASSERT(std::find(parent_.watermark_callbacks_.begin(), parent_.watermark_callbacks_.end(), + &watermark_callbacks) != parent_.watermark_callbacks_.end()); + parent_.watermark_callbacks_.remove(&watermark_callbacks); } bool ConnectionManagerImpl::ActiveStreamDecoderFilter::recreateStream() { @@ -1801,6 +1965,12 @@ void ConnectionManagerImpl::ActiveStreamEncoderFilter::addEncodedData(Buffer::In return parent_.addEncodedData(*this, data, streaming); } +void ConnectionManagerImpl::ActiveStreamEncoderFilter::injectEncodedDataToFilterChain( + Buffer::Instance& data, bool end_stream) { + parent_.encodeData(this, data, end_stream, + ActiveStream::FilterIterationStartState::CanStartFromCurrent); +} + HeaderMap& ConnectionManagerImpl::ActiveStreamEncoderFilter::addEncodedTrailers() { return parent_.addEncodedTrailers(); } @@ -1829,11 +1999,14 @@ void ConnectionManagerImpl::ActiveStreamEncoderFilter::responseDataTooLarge() { if (!headers_continued_) { // Make sure we won't end up with nested watermark calls from the body buffer. parent_.state_.encoder_filters_streaming_ = true; - stopped_ = false; + allowIteration(); + parent_.stream_info_.setResponseCodeDetails( + StreamInfo::ResponseCodeDetails::get().RequestHeadersTooLarge); Http::Utility::sendLocalReply( Grpc::Common::hasGrpcContentType(*parent_.request_headers_), [&](HeaderMapPtr&& response_headers, bool end_stream) -> void { + parent_.chargeStats(*response_headers); parent_.response_headers_ = std::move(response_headers); parent_.response_encoder_->encodeHeaders(*parent_.response_headers_, end_stream); parent_.state_.local_complete_ = end_stream; diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h index 982817691beeb..06a79a9b03578 100644 --- a/source/common/http/conn_manager_impl.h +++ b/source/common/http/conn_manager_impl.h @@ -51,7 +51,7 @@ class ConnectionManagerImpl : Logger::Loggable, Runtime::RandomGenerator& random_generator, Http::Context& http_context, Runtime::Loader& runtime, const LocalInfo::LocalInfo& local_info, Upstream::ClusterManager& cluster_manager, - Server::OverloadManager* overload_manager, Event::TimeSystem& time_system); + Server::OverloadManager* overload_manager, TimeSource& time_system); ~ConnectionManagerImpl(); static ConnectionManagerStats generateStats(const std::string& prefix, Stats::Scope& scope); @@ -85,7 +85,7 @@ class ConnectionManagerImpl : Logger::Loggable, codec_->onUnderlyingConnectionBelowWriteBufferLowWatermark(); } - Event::TimeSystem& timeSystem() { return time_system_; } + TimeSource& timeSource() { return time_source_; } private: struct ActiveStream; @@ -95,16 +95,26 @@ class ConnectionManagerImpl : Logger::Loggable, */ struct ActiveStreamFilterBase : public virtual StreamFilterCallbacks { ActiveStreamFilterBase(ActiveStream& parent, bool dual_filter) - : parent_(parent), headers_continued_(false), continue_headers_continued_(false), - stopped_(false), end_stream_(false), dual_filter_(dual_filter) {} + : iteration_state_(IterationState::Continue), iterate_from_current_filter_(false), + parent_(parent), headers_continued_(false), continue_headers_continued_(false), + end_stream_(false), dual_filter_(dual_filter) {} + // Functions in the following block are called after the filter finishes processing + // corresponding data. Those functions handle state updates and data storage (if needed) + // according to the status returned by filter's callback functions. bool commonHandleAfter100ContinueHeadersCallback(FilterHeadersStatus status); bool commonHandleAfterHeadersCallback(FilterHeadersStatus status, bool& headers_only); - void commonHandleBufferData(Buffer::Instance& provided_data); bool commonHandleAfterDataCallback(FilterDataStatus status, Buffer::Instance& provided_data, bool& buffer_was_streaming); bool commonHandleAfterTrailersCallback(FilterTrailersStatus status); + // Buffers provided_data. + void commonHandleBufferData(Buffer::Instance& provided_data); + + // If iteration has stopped for all frame types, calls this function to buffer the data before + // the filter processes data. The function also updates streaming state. + void commonBufferDataIfStopAll(Buffer::Instance& provided_data, bool& buffer_was_streaming); + void commonContinue(); virtual bool canContinue() PURE; virtual Buffer::WatermarkBufferPtr createBuffer() PURE; @@ -128,10 +138,35 @@ class ConnectionManagerImpl : Logger::Loggable, Tracing::Span& activeSpan() override; Tracing::Config& tracingConfig() override; + // Functions to set or get iteration state. + bool canIterate() { return iteration_state_ == IterationState::Continue; } + bool stoppedAll() { + return iteration_state_ == IterationState::StopAllBuffer || + iteration_state_ == IterationState::StopAllWatermark; + } + void allowIteration() { + ASSERT(iteration_state_ != IterationState::Continue); + iteration_state_ = IterationState::Continue; + } + + // The state of iteration. + enum class IterationState { + Continue, // Iteration has not stopped for any frame type. + StopSingleIteration, // Iteration has stopped for headers, 100-continue, or data. + StopAllBuffer, // Iteration has stopped for all frame types, and following data should + // be buffered. + StopAllWatermark, // Iteration has stopped for all frame types, and following data should + // be buffered until high watermark is reached. + }; + IterationState iteration_state_; + // If the filter resumes iteration from a StopAllBuffer/Watermark state, the current filter + // hasn't parsed data and trailers. As a result, the filter iteration should start with the + // current filter instead of the next one. If true, filter iteration starts with the current + // filter. Otherwise, starts with the next filter in the chain. + bool iterate_from_current_filter_; ActiveStream& parent_; bool headers_continued_ : 1; bool continue_headers_continued_ : 1; - bool stopped_ : 1; // If true, end_stream is called for this filter. bool end_stream_ : 1; const bool dual_filter_ : 1; @@ -164,23 +199,33 @@ class ConnectionManagerImpl : Logger::Loggable, parent_.decodeHeaders(this, *parent_.request_headers_, end_stream); } void doData(bool end_stream) override { - parent_.decodeData(this, *parent_.buffered_request_data_, end_stream); + parent_.decodeData(this, *parent_.buffered_request_data_, end_stream, + ActiveStream::FilterIterationStartState::CanStartFromCurrent); } void doTrailers() override { parent_.decodeTrailers(this, *parent_.request_trailers_); } const HeaderMapPtr& trailers() override { return parent_.request_trailers_; } // Http::StreamDecoderFilterCallbacks void addDecodedData(Buffer::Instance& data, bool streaming) override; + void injectDecodedDataToFilterChain(Buffer::Instance& data, bool end_stream) override; HeaderMap& addDecodedTrailers() override; void continueDecoding() override; const Buffer::Instance* decodingBuffer() override { return parent_.buffered_request_data_.get(); } + + void modifyDecodingBuffer(std::function callback) override { + ASSERT(parent_.state_.latest_data_decoding_filter_ == this); + callback(*parent_.buffered_request_data_.get()); + } + void sendLocalReply(Code code, absl::string_view body, std::function modify_headers, - const absl::optional grpc_status) override { + const absl::optional grpc_status, + absl::string_view details) override { + parent_.stream_info_.setResponseCodeDetails(details); parent_.sendLocalReply(is_grpc_request_, code, body, modify_headers, parent_.is_head_request_, - grpc_status); + grpc_status, details); } void encode100ContinueHeaders(HeaderMapPtr&& headers) override; void encodeHeaders(HeaderMapPtr&& headers, bool end_stream) override; @@ -197,12 +242,24 @@ class ConnectionManagerImpl : Logger::Loggable, uint32_t decoderBufferLimit() override { return parent_.buffer_limit_; } bool recreateStream() override; + void addUpstreamSocketOptions(const Network::Socket::OptionsSharedPtr& options) override { + Network::Socket::appendOptions(parent_.upstream_options_, options); + } + + Network::Socket::OptionsSharedPtr getUpstreamSocketOptions() const override { + return parent_.upstream_options_; + } + // Each decoder filter instance checks if the request passed to the filter is gRPC // so that we can issue gRPC local responses to gRPC requests. Filter's decodeHeaders() // called here may change the content type, so we must check it before the call. FilterHeadersStatus decodeHeaders(HeaderMap& headers, bool end_stream) { is_grpc_request_ = Grpc::Common::hasGrpcContentType(headers); - return handle_->decodeHeaders(headers, end_stream); + FilterHeadersStatus status = handle_->decodeHeaders(headers, end_stream); + if (end_stream) { + handle_->decodeComplete(); + } + return status; } void requestDataTooLarge(); @@ -236,13 +293,15 @@ class ConnectionManagerImpl : Logger::Loggable, parent_.encodeHeaders(this, *parent_.response_headers_, end_stream); } void doData(bool end_stream) override { - parent_.encodeData(this, *parent_.buffered_response_data_, end_stream); + parent_.encodeData(this, *parent_.buffered_response_data_, end_stream, + ActiveStream::FilterIterationStartState::CanStartFromCurrent); } void doTrailers() override { parent_.encodeTrailers(this, *parent_.response_trailers_); } const HeaderMapPtr& trailers() override { return parent_.response_trailers_; } // Http::StreamEncoderFilterCallbacks void addEncodedData(Buffer::Instance& data, bool streaming) override; + void injectEncodedDataToFilterChain(Buffer::Instance& data, bool end_stream) override; HeaderMap& addEncodedTrailers() override; void onEncoderFilterAboveWriteBufferHighWatermark() override; void onEncoderFilterBelowWriteBufferLowWatermark() override; @@ -252,6 +311,10 @@ class ConnectionManagerImpl : Logger::Loggable, const Buffer::Instance* encodingBuffer() override { return parent_.buffered_response_data_.get(); } + void modifyEncodingBuffer(std::function callback) override { + ASSERT(parent_.state_.latest_data_encoding_filter_ == this); + callback(*parent_.buffered_response_data_.get()); + } void responseDataTooLarge(); void responseDataDrained(); @@ -274,16 +337,28 @@ class ConnectionManagerImpl : Logger::Loggable, ActiveStream(ConnectionManagerImpl& connection_manager); ~ActiveStream(); + // Indicates which filter to start the iteration with. + enum class FilterIterationStartState { AlwaysStartFromNext, CanStartFromCurrent }; + void addStreamDecoderFilterWorker(StreamDecoderFilterSharedPtr filter, bool dual_filter); void addStreamEncoderFilterWorker(StreamEncoderFilterSharedPtr filter, bool dual_filter); void chargeStats(const HeaderMap& headers); + // Returns the encoder filter to start iteration with. std::list::iterator - commonEncodePrefix(ActiveStreamEncoderFilter* filter, bool end_stream); + commonEncodePrefix(ActiveStreamEncoderFilter* filter, bool end_stream, + FilterIterationStartState filter_iteration_start_state); + // Returns the decoder filter to start iteration with. + std::list::iterator + commonDecodePrefix(ActiveStreamDecoderFilter* filter, + FilterIterationStartState filter_iteration_start_state); const Network::Connection* connection(); void addDecodedData(ActiveStreamDecoderFilter& filter, Buffer::Instance& data, bool streaming); HeaderMap& addDecodedTrailers(); void decodeHeaders(ActiveStreamDecoderFilter* filter, HeaderMap& headers, bool end_stream); - void decodeData(ActiveStreamDecoderFilter* filter, Buffer::Instance& data, bool end_stream); + // Sends data through decoding filter chains. filter_iteration_start_state indicates which + // filter to start the iteration with. + void decodeData(ActiveStreamDecoderFilter* filter, Buffer::Instance& data, bool end_stream, + FilterIterationStartState filter_iteration_start_state); void decodeTrailers(ActiveStreamDecoderFilter* filter, HeaderMap& trailers); void disarmRequestTimeout(); void maybeEndDecode(bool end_stream); @@ -292,17 +367,27 @@ class ConnectionManagerImpl : Logger::Loggable, void sendLocalReply(bool is_grpc_request, Code code, absl::string_view body, const std::function& modify_headers, bool is_head_request, - const absl::optional grpc_status); + const absl::optional grpc_status, + absl::string_view details); void encode100ContinueHeaders(ActiveStreamEncoderFilter* filter, HeaderMap& headers); void encodeHeaders(ActiveStreamEncoderFilter* filter, HeaderMap& headers, bool end_stream); - void encodeData(ActiveStreamEncoderFilter* filter, Buffer::Instance& data, bool end_stream); + // Sends data through encoding filter chains. filter_iteration_start_state indicates which + // filter to start the iteration with. + void encodeData(ActiveStreamEncoderFilter* filter, Buffer::Instance& data, bool end_stream, + FilterIterationStartState filter_iteration_start_state); void encodeTrailers(ActiveStreamEncoderFilter* filter, HeaderMap& trailers); void encodeMetadata(ActiveStreamEncoderFilter* filter, MetadataMapPtr&& metadata_map_ptr); void maybeEndEncode(bool end_stream); uint64_t streamId() { return stream_id_; } + // Returns true if filter has stopped iteration for all frame types. Otherwise, returns false. + // filter_streaming is the variable to indicate if stream is streaming, and its value may be + // changed by the function. + bool handleDataIfStopAll(ActiveStreamFilterBase& filter, Buffer::Instance& data, + bool& filter_streaming); // Http::StreamCallbacks - void onResetStream(StreamResetReason reason) override; + void onResetStream(StreamResetReason reason, + absl::string_view transport_failure_reason) override; void onAboveWriteBufferHighWatermark() override; void onBelowWriteBufferLowWatermark() override; @@ -327,8 +412,9 @@ class ConnectionManagerImpl : Logger::Loggable, void addAccessLogHandler(AccessLog::InstanceSharedPtr handler) override; // Tracing::TracingConfig - virtual Tracing::OperationName operationName() const override; - virtual const std::vector& requestHeadersForTags() const override; + Tracing::OperationName operationName() const override; + const std::vector& requestHeadersForTags() const override; + bool verbose() const override; void traceRequest(); @@ -384,6 +470,10 @@ class ConnectionManagerImpl : Logger::Loggable, // True if this stream is internally created. Currently only used for // internal redirects or other streams created via recreateStream(). bool is_internally_created_ : 1; + + // Used to track which filter is the latest filter that has received data. + ActiveStreamEncoderFilter* latest_data_encoding_filter_{}; + ActiveStreamDecoderFilter* latest_data_decoding_filter_{}; }; // Possibly increases buffer_limit_ to the value of limit. @@ -422,7 +512,7 @@ class ConnectionManagerImpl : Logger::Loggable, StreamInfo::StreamInfoImpl stream_info_; absl::optional cached_route_; absl::optional cached_cluster_info_; - DownstreamWatermarkCallbacks* watermark_callbacks_{nullptr}; + std::list watermark_callbacks_{}; uint32_t buffer_limit_{0}; uint32_t high_watermark_count_{0}; const std::string* decorated_operation_{nullptr}; @@ -435,6 +525,7 @@ class ConnectionManagerImpl : Logger::Loggable, // Whether a filter has indicated that the response should be treated as a headers only // response. bool encoding_headers_only_{}; + Network::Socket::OptionsSharedPtr upstream_options_; }; typedef std::unique_ptr ActiveStreamPtr; @@ -489,7 +580,7 @@ class ConnectionManagerImpl : Logger::Loggable, // lookup in the hot path of processing each request. const Server::OverloadActionState& overload_stop_accepting_requests_ref_; const Server::OverloadActionState& overload_disable_keepalive_ref_; - Event::TimeSystem& time_system_; + TimeSource& time_source_; }; } // namespace Http diff --git a/source/common/http/conn_manager_utility.cc b/source/common/http/conn_manager_utility.cc index 2ebfe881d4d79..f061d33c99d23 100644 --- a/source/common/http/conn_manager_utility.cc +++ b/source/common/http/conn_manager_utility.cc @@ -10,6 +10,7 @@ #include "common/http/headers.h" #include "common/http/http1/codec_impl.h" #include "common/http/http2/codec_impl.h" +#include "common/http/path_utility.h" #include "common/http/utility.h" #include "common/network/utility.h" #include "common/runtime/uuid_util.h" @@ -41,11 +42,11 @@ ServerConnectionPtr ConnectionManagerUtility::autoCreateCodec( ServerConnectionCallbacks& callbacks, Stats::Scope& scope, const Http1Settings& http1_settings, const Http2Settings& http2_settings, const uint32_t max_request_headers_kb) { if (determineNextProtocol(connection, data) == Http2::ALPN_STRING) { - return ServerConnectionPtr{new Http2::ServerConnectionImpl( - connection, callbacks, scope, http2_settings, max_request_headers_kb)}; + return std::make_unique(connection, callbacks, scope, + http2_settings, max_request_headers_kb); } else { - return ServerConnectionPtr{ - new Http1::ServerConnectionImpl(connection, callbacks, http1_settings)}; + return std::make_unique(connection, callbacks, http1_settings, + max_request_headers_kb); } } @@ -186,7 +187,9 @@ Network::Address::InstanceConstSharedPtr ConnectionManagerUtility::mutateRequest // TODO(htuch): should this be under the config.userAgent() condition or in the outer scope? if (!local_info.nodeName().empty()) { - request_headers.insertEnvoyDownstreamServiceNode().value(local_info.nodeName()); + // Following setReference() is safe because local info is constant for the life of the server. + request_headers.insertEnvoyDownstreamServiceNode().value().setReference( + local_info.nodeName()); } } @@ -222,7 +225,8 @@ void ConnectionManagerUtility::mutateTracingRequestHeader(HeaderMap& request_hea return; } - std::string x_request_id = request_headers.RequestId()->value().c_str(); + // TODO(dnoe): Migrate uuidModBy and others below to take string_view (#6580) + std::string x_request_id(request_headers.RequestId()->value().getStringView()); uint64_t result; // Skip if x-request-id is corrupted. if (!UuidUtils::uuidModBy(x_request_id, result, 10000)) { @@ -279,9 +283,9 @@ void ConnectionManagerUtility::mutateXfccRequestHeader(HeaderMap& request_header // the XFCC header. if (config.forwardClientCert() == ForwardClientCertType::AppendForward || config.forwardClientCert() == ForwardClientCertType::SanitizeSet) { - const std::string uri_san_local_cert = connection.ssl()->uriSanLocalCertificate(); - if (!uri_san_local_cert.empty()) { - client_cert_details.push_back("By=" + uri_san_local_cert); + const auto uri_sans_local_cert = connection.ssl()->uriSanLocalCertificate(); + if (!uri_sans_local_cert.empty()) { + client_cert_details.push_back("By=" + uri_sans_local_cert[0]); } const std::string cert_digest = connection.ssl()->sha256PeerCertificateDigest(); if (!cert_digest.empty()) { @@ -301,10 +305,13 @@ void ConnectionManagerUtility::mutateXfccRequestHeader(HeaderMap& request_header client_cert_details.push_back("Subject=\"" + connection.ssl()->subjectPeerCertificate() + "\""); break; - case ClientCertDetailsType::URI: + case ClientCertDetailsType::URI: { // The "URI" key still exists even if the URI is empty. - client_cert_details.push_back("URI=" + connection.ssl()->uriSanPeerCertificate()); + const auto sans = connection.ssl()->uriSanPeerCertificate(); + const auto& uri_san = sans.empty() ? "" : sans[0]; + client_cert_details.push_back("URI=" + uri_san); break; + } case ClientCertDetailsType::DNS: { const std::vector dns_sans = connection.ssl()->dnsSansPeerCertificate(); if (!dns_sans.empty()) { @@ -361,5 +368,15 @@ void ConnectionManagerUtility::mutateResponseHeaders(HeaderMap& response_headers } } +/* static */ +bool ConnectionManagerUtility::maybeNormalizePath(HeaderMap& request_headers, + const ConnectionManagerConfig& config) { + ASSERT(request_headers.Path()); + if (config.shouldNormalizePath()) { + return PathUtil::canonicalPath(*request_headers.Path()); + } + return true; +} + } // namespace Http } // namespace Envoy diff --git a/source/common/http/conn_manager_utility.h b/source/common/http/conn_manager_utility.h index 0d313f185e649..126982df77533 100644 --- a/source/common/http/conn_manager_utility.h +++ b/source/common/http/conn_manager_utility.h @@ -59,6 +59,11 @@ class ConnectionManagerUtility { static void mutateResponseHeaders(HeaderMap& response_headers, const HeaderMap* request_headers, const std::string& via); + // Sanitize the path in the header map if forced by config. + // Side affect: the string view of Path header is invalidated. + // Return false if error happens during the sanitization. + static bool maybeNormalizePath(HeaderMap& request_headers, const ConnectionManagerConfig& config); + private: /** * Mutate request headers if request needs to be traced. diff --git a/source/common/http/conn_pool_base.cc b/source/common/http/conn_pool_base.cc index 93a61f46d0e83..fbc930ff4083e 100644 --- a/source/common/http/conn_pool_base.cc +++ b/source/common/http/conn_pool_base.cc @@ -24,7 +24,8 @@ ConnPoolImplBase::newPendingRequest(StreamDecoder& decoder, ConnectionPool::Call } void ConnPoolImplBase::purgePendingRequests( - const Upstream::HostDescriptionConstSharedPtr& host_description) { + const Upstream::HostDescriptionConstSharedPtr& host_description, + absl::string_view failure_reason) { // NOTE: We move the existing pending requests to a temporary list. This is done so that // if retry logic submits a new request to the pool, we don't fail it inline. std::list pending_requests_to_purge(std::move(pending_requests_)); @@ -33,7 +34,7 @@ void ConnPoolImplBase::purgePendingRequests( pending_requests_to_purge.front()->removeFromList(pending_requests_to_purge); host_->cluster().stats().upstream_rq_pending_failure_eject_.inc(); request->callbacks_.onPoolFailure(ConnectionPool::PoolFailureReason::ConnectionFailure, - host_description); + failure_reason, host_description); } } diff --git a/source/common/http/conn_pool_base.h b/source/common/http/conn_pool_base.h index dcdfe32cd20bf..2c08842281947 100644 --- a/source/common/http/conn_pool_base.h +++ b/source/common/http/conn_pool_base.h @@ -4,6 +4,8 @@ #include "common/common/linked_object.h" +#include "absl/strings/string_view.h" + namespace Envoy { namespace Http { @@ -37,7 +39,8 @@ class ConnPoolImplBase : protected Logger::Loggable { void onPendingRequestCancel(PendingRequest& request); // Fails all pending requests, calling onPoolFailure on the associated callbacks. - void purgePendingRequests(const Upstream::HostDescriptionConstSharedPtr& host_description); + void purgePendingRequests(const Upstream::HostDescriptionConstSharedPtr& host_description, + absl::string_view failure_reason); // Must be implemented by sub class. Attempts to drain inactive clients. virtual void checkForDrained() PURE; diff --git a/source/common/http/date_provider_impl.cc b/source/common/http/date_provider_impl.cc index 8f218d867a5a7..3f6158cb90aaf 100644 --- a/source/common/http/date_provider_impl.cc +++ b/source/common/http/date_provider_impl.cc @@ -10,7 +10,7 @@ DateFormatter DateProviderImplBase::date_formatter_("%a, %d %b %Y %H:%M:%S GMT") TlsCachingDateProviderImpl::TlsCachingDateProviderImpl(Event::Dispatcher& dispatcher, ThreadLocal::SlotAllocator& tls) - : DateProviderImplBase(dispatcher.timeSystem()), tls_(tls.allocateSlot()), + : DateProviderImplBase(dispatcher.timeSource()), tls_(tls.allocateSlot()), refresh_timer_(dispatcher.createTimer([this]() -> void { onRefreshDate(); })) { onRefreshDate(); diff --git a/source/common/http/header_map_impl.cc b/source/common/http/header_map_impl.cc index e5c642349176a..20747d7e6efe6 100644 --- a/source/common/http/header_map_impl.cc +++ b/source/common/http/header_map_impl.cc @@ -39,16 +39,19 @@ HeaderString::HeaderString() : type_(Type::Inline) { clear(); static_assert(sizeof(inline_buffer_) >= MaxIntegerLength, ""); static_assert(MinDynamicCapacity >= MaxIntegerLength, ""); + ASSERT(valid()); } HeaderString::HeaderString(const LowerCaseString& ref_value) : type_(Type::Reference) { buffer_.ref_ = ref_value.get().c_str(); string_length_ = ref_value.get().size(); + ASSERT(valid()); } HeaderString::HeaderString(const std::string& ref_value) : type_(Type::Reference) { buffer_.ref_ = ref_value.c_str(); string_length_ = ref_value.size(); + ASSERT(valid()); } HeaderString::HeaderString(HeaderString&& move_value) { @@ -76,6 +79,7 @@ HeaderString::HeaderString(HeaderString&& move_value) { break; } } + ASSERT(valid()); } HeaderString::~HeaderString() { freeDynamic(); } @@ -86,6 +90,8 @@ void HeaderString::freeDynamic() { } } +bool HeaderString::valid() const { return validHeaderString(getStringView()); } + void HeaderString::append(const char* data, uint32_t size) { switch (type_) { case Type::Reference: { @@ -143,6 +149,7 @@ void HeaderString::append(const char* data, uint32_t size) { memcpy(buffer_.dynamic_ + string_length_, data, size); string_length_ += size; buffer_.dynamic_[string_length_] = 0; + ASSERT(valid()); } void HeaderString::clear() { @@ -203,6 +210,11 @@ void HeaderString::setCopy(const char* data, uint32_t size) { memcpy(buffer_.dynamic_, data, size); buffer_.dynamic_[size] = 0; string_length_ = size; + ASSERT(valid()); +} + +void HeaderString::setCopy(absl::string_view view) { + this->setCopy(view.data(), static_cast(view.size())); } void HeaderString::setInteger(uint64_t value) { @@ -235,6 +247,7 @@ void HeaderString::setReference(const std::string& ref_value) { type_ = Type::Reference; buffer_.ref_ = ref_value.c_str(); string_length_ = ref_value.size(); + ASSERT(valid()); } // Specialization needed for HeaderMapImpl::HeaderList::insert() when key is LowerCaseString. @@ -263,7 +276,7 @@ void HeaderMapImpl::HeaderEntryImpl::value(absl::string_view value) { void HeaderMapImpl::HeaderEntryImpl::value(uint64_t value) { value_.setInteger(value); } void HeaderMapImpl::HeaderEntryImpl::value(const HeaderEntry& header) { - value(header.value().c_str(), header.value().size()); + value(header.value().getStringView()); } #define INLINE_HEADER_STATIC_MAP_ENTRY(name) \ @@ -315,9 +328,9 @@ void HeaderMapImpl::copyFrom(const HeaderMap& header_map) { [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { // TODO(mattklein123) PERF: Avoid copying here if not necessary. HeaderString key_string; - key_string.setCopy(header.key().c_str(), header.key().size()); + key_string.setCopy(header.key().getStringView()); HeaderString value_string; - value_string.setCopy(header.value().c_str(), header.value().size()); + value_string.setCopy(header.value().getStringView()); static_cast(context)->addViaMove(std::move(key_string), std::move(value_string)); @@ -332,7 +345,7 @@ bool HeaderMapImpl::operator==(const HeaderMapImpl& rhs) const { } for (auto i = headers_.begin(), j = rhs.headers_.begin(); i != headers_.end(); ++i, ++j) { - if (i->key() != j->key().c_str() || i->value() != j->value().c_str()) { + if (i->key() != j->key().getStringView() || i->value() != j->value().getStringView()) { return false; } } @@ -340,15 +353,17 @@ bool HeaderMapImpl::operator==(const HeaderMapImpl& rhs) const { return true; } +bool HeaderMapImpl::operator!=(const HeaderMapImpl& rhs) const { return !operator==(rhs); } + void HeaderMapImpl::insertByKey(HeaderString&& key, HeaderString&& value) { - EntryCb cb = ConstSingleton::get().find(key.c_str()); + EntryCb cb = ConstSingleton::get().find(key.getStringView()); if (cb) { key.clear(); StaticLookupResponse ref_lookup_response = cb(*this); if (*ref_lookup_response.entry_ == nullptr) { maybeCreateInline(ref_lookup_response.entry_, *ref_lookup_response.key_, std::move(value)); } else { - appendToHeader((*ref_lookup_response.entry_)->value(), value.c_str()); + appendToHeader((*ref_lookup_response.entry_)->value(), value.getStringView()); value.clear(); } } else { @@ -360,9 +375,9 @@ void HeaderMapImpl::insertByKey(HeaderString&& key, HeaderString&& value) { void HeaderMapImpl::addViaMove(HeaderString&& key, HeaderString&& value) { // If this is an inline header, we can't addViaMove, because we'll overwrite // the existing value. - auto* entry = getExistingInline(key.c_str()); + auto* entry = getExistingInline(key.getStringView()); if (entry != nullptr) { - appendToHeader(entry->value(), value.c_str()); + appendToHeader(entry->value(), value.getStringView()); key.clear(); value.clear(); } else { @@ -393,7 +408,7 @@ void HeaderMapImpl::addReferenceKey(const LowerCaseString& key, const std::strin } void HeaderMapImpl::addCopy(const LowerCaseString& key, uint64_t value) { - auto* entry = getExistingInline(key.get().c_str()); + auto* entry = getExistingInline(key.get()); if (entry != nullptr) { char buf[32]; StringUtil::itoa(buf, sizeof(buf), value); @@ -410,7 +425,7 @@ void HeaderMapImpl::addCopy(const LowerCaseString& key, uint64_t value) { } void HeaderMapImpl::addCopy(const LowerCaseString& key, const std::string& value) { - auto* entry = getExistingInline(key.get().c_str()); + auto* entry = getExistingInline(key.get()); if (entry != nullptr) { appendToHeader(entry->value(), value); return; @@ -488,7 +503,7 @@ void HeaderMapImpl::iterateReverse(ConstIterateCb cb, void* context) const { HeaderMap::Lookup HeaderMapImpl::lookup(const LowerCaseString& key, const HeaderEntry** entry) const { - EntryCb cb = ConstSingleton::get().find(key.get().c_str()); + EntryCb cb = ConstSingleton::get().find(key.get()); if (cb) { // The accessor callbacks for predefined inline headers take a HeaderMapImpl& as an argument; // even though we don't make any modifications, we need to cast_cast in order to use the @@ -510,7 +525,7 @@ HeaderMap::Lookup HeaderMapImpl::lookup(const LowerCaseString& key, } void HeaderMapImpl::remove(const LowerCaseString& key) { - EntryCb cb = ConstSingleton::get().find(key.get().c_str()); + EntryCb cb = ConstSingleton::get().find(key.get()); if (cb) { StaticLookupResponse ref_lookup_response = cb(*this); removeInline(ref_lookup_response.entry_); @@ -531,7 +546,7 @@ void HeaderMapImpl::removePrefix(const LowerCaseString& prefix) { if (to_remove) { // If this header should be removed, make sure any references in the // static lookup table are cleared as well. - EntryCb cb = ConstSingleton::get().find(entry.key().c_str()); + EntryCb cb = ConstSingleton::get().find(entry.key().getStringView()); if (cb) { StaticLookupResponse ref_lookup_response = cb(*this); if (ref_lookup_response.entry_) { @@ -569,7 +584,7 @@ HeaderMapImpl::HeaderEntryImpl& HeaderMapImpl::maybeCreateInline(HeaderEntryImpl return **entry; } -HeaderMapImpl::HeaderEntryImpl* HeaderMapImpl::getExistingInline(const char* key) { +HeaderMapImpl::HeaderEntryImpl* HeaderMapImpl::getExistingInline(absl::string_view key) { EntryCb cb = ConstSingleton::get().find(key); if (cb) { StaticLookupResponse ref_lookup_response = cb(*this); diff --git a/source/common/http/header_map_impl.h b/source/common/http/header_map_impl.h index 564fbd0c7495e..ffa2e069f33d6 100644 --- a/source/common/http/header_map_impl.h +++ b/source/common/http/header_map_impl.h @@ -61,6 +61,7 @@ class HeaderMapImpl : public HeaderMap, NonCopyable { * comparison (order matters). */ bool operator==(const HeaderMapImpl& rhs) const; + bool operator!=(const HeaderMapImpl& rhs) const; // Http::HeaderMap void addReference(const LowerCaseString& key, const std::string& value) override; @@ -79,6 +80,7 @@ class HeaderMapImpl : public HeaderMap, NonCopyable { void remove(const LowerCaseString& key) override; void removePrefix(const LowerCaseString& key) override; size_t size() const override { return headers_.size(); } + bool empty() const override { return headers_.empty(); } protected: // For tests only, unoptimized, they aren't intended for regular HeaderMapImpl users. @@ -136,7 +138,9 @@ class HeaderMapImpl : public HeaderMap, NonCopyable { public: HeaderList() : pseudo_headers_end_(headers_.end()) {} - template bool isPseudoHeader(const Key& key) { return key.c_str()[0] == ':'; } + template bool isPseudoHeader(const Key& key) { + return !key.getStringView().empty() && key.getStringView()[0] == ':'; + } template std::list::iterator insert(Key&& key, Value&&... value) { @@ -176,6 +180,7 @@ class HeaderMapImpl : public HeaderMap, NonCopyable { std::list::const_reverse_iterator rbegin() const { return headers_.rbegin(); } std::list::const_reverse_iterator rend() const { return headers_.rend(); } size_t size() const { return headers_.size(); } + bool empty() const { return headers_.empty(); } private: std::list headers_; @@ -186,7 +191,7 @@ class HeaderMapImpl : public HeaderMap, NonCopyable { HeaderEntryImpl& maybeCreateInline(HeaderEntryImpl** entry, const LowerCaseString& key); HeaderEntryImpl& maybeCreateInline(HeaderEntryImpl** entry, const LowerCaseString& key, HeaderString&& value); - HeaderEntryImpl* getExistingInline(const char* key); + HeaderEntryImpl* getExistingInline(absl::string_view key); void removeInline(HeaderEntryImpl** entry); diff --git a/source/common/http/header_utility.cc b/source/common/http/header_utility.cc index 1bb3d4d77c4b1..396fdb624f171 100644 --- a/source/common/http/header_utility.cc +++ b/source/common/http/header_utility.cc @@ -87,16 +87,17 @@ bool HeaderUtility::matchHeaders(const Http::HeaderMap& request_headers, } bool match; + const absl::string_view header_view = header->value().getStringView(); switch (header_data.header_match_type_) { case HeaderMatchType::Value: - match = header_data.value_.empty() || header->value() == header_data.value_.c_str(); + match = header_data.value_.empty() || header_view == header_data.value_; break; case HeaderMatchType::Regex: - match = std::regex_match(header->value().c_str(), header_data.regex_pattern_); + match = std::regex_match(header_view.begin(), header_view.end(), header_data.regex_pattern_); break; case HeaderMatchType::Range: { int64_t header_value = 0; - match = StringUtil::atol(header->value().c_str(), header_value, 10) && + match = absl::SimpleAtoi(header_view, &header_value) && header_value >= header_data.range_.start() && header_value < header_data.range_.end(); break; } @@ -104,10 +105,10 @@ bool HeaderUtility::matchHeaders(const Http::HeaderMap& request_headers, match = true; break; case HeaderMatchType::Prefix: - match = absl::StartsWith(header->value().getStringView(), header_data.value_); + match = absl::StartsWith(header_view, header_data.value_); break; case HeaderMatchType::Suffix: - match = absl::EndsWith(header->value().getStringView(), header_data.value_); + match = absl::EndsWith(header_view, header_data.value_); break; default: NOT_REACHED_GCOVR_EXCL_LINE; @@ -120,9 +121,9 @@ void HeaderUtility::addHeaders(Http::HeaderMap& headers, const Http::HeaderMap& headers_to_add.iterate( [](const Http::HeaderEntry& header, void* context) -> Http::HeaderMap::Iterate { Http::HeaderString k; - k.setCopy(header.key().c_str(), header.key().size()); + k.setCopy(header.key().getStringView()); Http::HeaderString v; - v.setCopy(header.value().c_str(), header.value().size()); + v.setCopy(header.value().getStringView()); static_cast(context)->addViaMove(std::move(k), std::move(v)); return Http::HeaderMap::Iterate::Continue; }, diff --git a/source/common/http/headers.h b/source/common/http/headers.h index 0f90238da3d70..38403792c545e 100644 --- a/source/common/http/headers.h +++ b/source/common/http/headers.h @@ -36,6 +36,7 @@ class HeaderValues { const LowerCaseString Cookie{"cookie"}; const LowerCaseString Date{"date"}; const LowerCaseString EnvoyAttemptCount{"x-envoy-attempt-count"}; + const LowerCaseString EnvoyAuthPartialBody{"x-envoy-auth-partial-body"}; const LowerCaseString EnvoyDegraded{"x-envoy-degraded"}; const LowerCaseString EnvoyDownstreamServiceCluster{"x-envoy-downstream-service-cluster"}; const LowerCaseString EnvoyDownstreamServiceNode{"x-envoy-downstream-service-node"}; @@ -170,10 +171,13 @@ class HeaderValues { struct { const std::string Connect{"CONNECT"}; + const std::string Delete{"DELETE"}; const std::string Get{"GET"}; const std::string Head{"HEAD"}; const std::string Post{"POST"}; + const std::string Put{"PUT"}; const std::string Options{"OPTIONS"}; + const std::string Trace{"TRACE"}; } MethodValues; struct { diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index fcb9bf0b5ebc2..ded79fb00a93c 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -42,6 +42,9 @@ void StreamEncoderImpl::encodeHeader(const char* key, uint32_t key_size, const c connection_.addCharToBuffer('\r'); connection_.addCharToBuffer('\n'); } +void StreamEncoderImpl::encodeHeader(absl::string_view key, absl::string_view value) { + this->encodeHeader(key.data(), key.size(), value.data(), value.size()); +} void StreamEncoderImpl::encode100ContinueHeaders(const HeaderMap& headers) { ASSERT(headers.Status()->value() == "100"); @@ -54,11 +57,11 @@ void StreamEncoderImpl::encodeHeaders(const HeaderMap& headers, bool end_stream) bool saw_content_length = false; headers.iterate( [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { - const char* key_to_use = header.key().c_str(); + absl::string_view key_to_use = header.key().getStringView(); uint32_t key_size_to_use = header.key().size(); // Translate :authority -> host so that upper layers do not need to deal with this. if (key_size_to_use > 1 && key_to_use[0] == ':' && key_to_use[1] == 'a') { - key_to_use = Headers::get().HostLegacy.get().c_str(); + key_to_use = absl::string_view(Headers::get().HostLegacy.get()); key_size_to_use = Headers::get().HostLegacy.get().size(); } @@ -67,8 +70,8 @@ void StreamEncoderImpl::encodeHeaders(const HeaderMap& headers, bool end_stream) return HeaderMap::Iterate::Continue; } - static_cast(context)->encodeHeader( - key_to_use, key_size_to_use, header.value().c_str(), header.value().size()); + static_cast(context)->encodeHeader(key_to_use, + header.value().getStringView()); return HeaderMap::Iterate::Continue; }, this); @@ -265,14 +268,14 @@ void RequestStreamEncoderImpl::encodeHeaders(const HeaderMap& headers, bool end_ if (!method || !path) { throw CodecClientException(":method and :path must be specified"); } - if (method->value() == Headers::get().MethodValues.Head.c_str()) { + if (method->value() == Headers::get().MethodValues.Head) { head_request_ = true; } connection_.onEncodeHeaders(headers); connection_.reserveBuffer(std::max(4096U, path->value().size() + 4096)); - connection_.copyToBuffer(method->value().c_str(), method->value().size()); + connection_.copyToBuffer(method->value().getStringView().data(), method->value().size()); connection_.addCharToBuffer(' '); - connection_.copyToBuffer(path->value().c_str(), path->value().size()); + connection_.copyToBuffer(path->value().getStringView().data(), path->value().size()); connection_.copyToBuffer(REQUEST_POSTFIX, sizeof(REQUEST_POSTFIX) - 1); StreamEncoderImpl::encodeHeaders(headers, end_stream); @@ -316,9 +319,11 @@ const ToLowerTable& ConnectionImpl::toLowerTable() { return *table; } -ConnectionImpl::ConnectionImpl(Network::Connection& connection, http_parser_type type) +ConnectionImpl::ConnectionImpl(Network::Connection& connection, http_parser_type type, + uint32_t max_headers_kb) : connection_(connection), output_buffer_([&]() -> void { this->onBelowLowWatermark(); }, - [&]() -> void { this->onAboveHighWatermark(); }) { + [&]() -> void { this->onAboveHighWatermark(); }), + max_headers_kb_(max_headers_kb) { output_buffer_.setWatermarks(connection.bufferLimit()); http_parser_init(&parser_, type); parser_.data = this; @@ -326,7 +331,7 @@ ConnectionImpl::ConnectionImpl(Network::Connection& connection, http_parser_type void ConnectionImpl::completeLastHeader() { ENVOY_CONN_LOG(trace, "completed header: key={} value={}", connection_, - current_header_field_.c_str(), current_header_value_.c_str()); + current_header_field_.getStringView(), current_header_value_.getStringView()); if (!current_header_field_.empty()) { toLowerTable().toLowerCase(current_header_field_.buffer(), current_header_field_.size()); current_header_map_->addViaMove(std::move(current_header_field_), @@ -416,9 +421,24 @@ void ConnectionImpl::onHeaderValue(const char* data, size_t length) { // Ignore trailers. return; } + // http-parser should filter for this + // (https://tools.ietf.org/html/rfc7230#section-3.2.6), but it doesn't today. HeaderStrings + // have an invariant that they must not contain embedded zero characters + // (NUL, ASCII 0x0). + if (absl::string_view(data, length).find('\0') != absl::string_view::npos) { + throw CodecProtocolException("http/1.1 protocol error: header value contains NUL"); + } header_parsing_state_ = HeaderParsingState::Value; current_header_value_.append(data, length); + + const uint32_t total = + current_header_field_.size() + current_header_value_.size() + current_header_map_->byteSize(); + if (total > (max_headers_kb_ * 1024)) { + error_code_ = Http::Code::RequestHeaderFieldsTooLarge; + sendProtocolError(); + throw CodecProtocolException("headers size exceeds limit"); + } } int ConnectionImpl::onHeadersCompleteBase() { @@ -471,8 +491,9 @@ void ConnectionImpl::onResetStreamBase(StreamResetReason reason) { ServerConnectionImpl::ServerConnectionImpl(Network::Connection& connection, ServerConnectionCallbacks& callbacks, - Http1Settings settings) - : ConnectionImpl(connection, HTTP_REQUEST), callbacks_(callbacks), codec_settings_(settings) {} + Http1Settings settings, uint32_t max_request_headers_kb) + : ConnectionImpl(connection, HTTP_REQUEST, max_request_headers_kb), callbacks_(callbacks), + codec_settings_(settings) {} void ServerConnectionImpl::onEncodeComplete() { ASSERT(active_request_); @@ -490,8 +511,9 @@ void ServerConnectionImpl::handlePath(HeaderMapImpl& headers, unsigned int metho bool is_connect = (method == HTTP_CONNECT); // The url is relative or a wildcard when the method is OPTIONS. Nothing to do here. - if (active_request_->request_url_.c_str()[0] == '/' || - ((method == HTTP_OPTIONS) && active_request_->request_url_.c_str()[0] == '*')) { + if (!active_request_->request_url_.getStringView().empty() && + (active_request_->request_url_.getStringView()[0] == '/' || + ((method == HTTP_OPTIONS) && active_request_->request_url_.getStringView()[0] == '*'))) { headers.addViaMove(std::move(path), std::move(active_request_->request_url_)); return; } @@ -522,7 +544,7 @@ void ServerConnectionImpl::handlePath(HeaderMapImpl& headers, unsigned int metho // forward the received Host field-value. headers.insertHost().value(std::string(absolute_url.host_and_port())); - headers.insertPath().value(std::string(absolute_url.path())); + headers.insertPath().value(std::string(absolute_url.path_and_query_params())); active_request_->request_url_.clear(); } @@ -643,7 +665,7 @@ void ServerConnectionImpl::onBelowLowWatermark() { } ClientConnectionImpl::ClientConnectionImpl(Network::Connection& connection, ConnectionCallbacks&) - : ConnectionImpl(connection, HTTP_RESPONSE) {} + : ConnectionImpl(connection, HTTP_RESPONSE, MAX_RESPONSE_HEADERS_KB) {} bool ClientConnectionImpl::cannotHaveBody() { if ((!pending_responses_.empty() && pending_responses_.front().head_request_) || @@ -659,13 +681,11 @@ StreamEncoder& ClientConnectionImpl::newStream(StreamDecoder& response_decoder) if (resetStreamCalled()) { throw CodecClientException("cannot create new streams after calling reset"); } - // Streams are responsible for unwinding any outstanding readDisable(true) - // calls done on the underlying connection as they are destroyed. As this is - // the only place a HTTP/1 stream is destroyed where the Network::Connection is - // reused, unwind any outstanding readDisable() calls here. - while (!connection_.readEnabled()) { - connection_.readDisable(false); - } + + // If reads were disabled due to flow control, we expect reads to always be enabled again before + // reusing this connection. This is done when the final pipeline response is received. + ASSERT(connection_.readEnabled()); + request_encoder_ = std::make_unique(*this); pending_responses_.emplace_back(&response_decoder); return *request_encoder_; @@ -723,6 +743,18 @@ void ClientConnectionImpl::onMessageComplete() { PendingResponse response = pending_responses_.front(); pending_responses_.pop_front(); + // Streams are responsible for unwinding any outstanding readDisable(true) + // calls done on the underlying connection as they are destroyed. As this is + // the only place a HTTP/1 stream is destroyed where the Network::Connection is + // reused, unwind any outstanding readDisable() calls here. Only do this if there are no + // pipelined responses remaining. Also do this before we dispatch end_stream in case the caller + // immediately reuses the connection. + if (pending_responses_.empty()) { + while (!connection_.readEnabled()) { + connection_.readDisable(false); + } + } + if (deferred_end_stream_headers_) { response.decoder_->decodeHeaders(std::move(deferred_end_stream_headers_), true); deferred_end_stream_headers_.reset(); diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index 772807c9d46e6..27b80e5e5b81b 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -68,6 +68,13 @@ class StreamEncoderImpl : public StreamEncoder, */ void encodeHeader(const char* key, uint32_t key_size, const char* value, uint32_t value_size); + /** + * Called to encode an individual header. + * @param key supplies the header to encode as a string_view. + * @param value supplies the value to encode as a string_view. + */ + void encodeHeader(absl::string_view key, absl::string_view value); + /** * Called to finalize a stream encode. */ @@ -165,7 +172,8 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable pending_responses_; // Set true between receiving 100-Continue headers and receiving the spurious onMessageComplete. bool ignore_message_complete_for_100_continue_{}; + + // The default limit of 80 KiB is the vanilla http_parser behaviour. + static constexpr uint32_t MAX_RESPONSE_HEADERS_KB = 80; }; } // namespace Http1 diff --git a/source/common/http/http1/conn_pool.cc b/source/common/http/http1/conn_pool.cc index 2b25ede2bc869..d8ad735f9d249 100644 --- a/source/common/http/http1/conn_pool.cc +++ b/source/common/http/http1/conn_pool.cc @@ -59,9 +59,15 @@ void ConnPoolImpl::addDrainedCallback(DrainedCb cb) { checkForDrained(); } +bool ConnPoolImpl::hasActiveConnections() const { + return !pending_requests_.empty() || !busy_clients_.empty(); +} + void ConnPoolImpl::attachRequestToClient(ActiveClient& client, StreamDecoder& response_decoder, ConnectionPool::Callbacks& callbacks) { ASSERT(!client.stream_wrapper_); + host_->cluster().stats().upstream_rq_total_.inc(); + host_->stats().rq_total_.inc(); client.stream_wrapper_ = std::make_unique(response_decoder, client); callbacks.onPoolReady(*client.stream_wrapper_, client.real_host_description_); } @@ -86,8 +92,6 @@ void ConnPoolImpl::createNewConnection() { ConnectionPool::Cancellable* ConnPoolImpl::newStream(StreamDecoder& response_decoder, ConnectionPool::Callbacks& callbacks) { - host_->cluster().stats().upstream_rq_total_.inc(); - host_->stats().rq_total_.inc(); if (!ready_clients_.empty()) { ready_clients_.front()->moveBetweenLists(ready_clients_, busy_clients_); ENVOY_CONN_LOG(debug, "using existing connection", *busy_clients_.front()->codec_client_); @@ -103,14 +107,15 @@ ConnectionPool::Cancellable* ConnPoolImpl::newStream(StreamDecoder& response_dec } // If we have no connections at all, make one no matter what so we don't starve. - if ((ready_clients_.size() == 0 && busy_clients_.size() == 0) || can_create_connection) { + if ((ready_clients_.empty() && busy_clients_.empty()) || can_create_connection) { createNewConnection(); } return newPendingRequest(response_decoder, callbacks); } else { ENVOY_LOG(debug, "max pending requests overflow"); - callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, nullptr); + callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, absl::string_view(), + nullptr); host_->cluster().stats().upstream_rq_pending_overflow_.inc(); return nullptr; } @@ -120,7 +125,8 @@ void ConnPoolImpl::onConnectionEvent(ActiveClient& client, Network::ConnectionEv if (event == Network::ConnectionEvent::RemoteClose || event == Network::ConnectionEvent::LocalClose) { // The client died. - ENVOY_CONN_LOG(debug, "client disconnected", *client.codec_client_); + ENVOY_CONN_LOG(debug, "client disconnected, failure reason: {}", *client.codec_client_, + client.codec_client_->connectionFailureReason()); ActiveClientPtr removed; bool check_for_drained = true; if (client.stream_wrapper_) { @@ -154,7 +160,10 @@ void ConnPoolImpl::onConnectionEvent(ActiveClient& client, Network::ConnectionEv // that is behaving badly, requests can get stuck here in the pending state. If we see a // connect failure, we purge all pending requests so that calling code can determine what to // do with the request. - purgePendingRequests(client.real_host_description_); + ENVOY_CONN_LOG(debug, "purge pending, failure reason: {}", *client.codec_client_, + client.codec_client_->connectionFailureReason()); + purgePendingRequests(client.real_host_description_, + client.codec_client_->connectionFailureReason()); } dispatcher_.deferredDelete(std::move(removed)); @@ -270,6 +279,12 @@ void ConnPoolImpl::StreamWrapper::decodeHeaders(HeaderMapPtr&& headers, bool end saw_close_header_ = true; parent_.parent_.host_->cluster().stats().upstream_cx_close_notify_.inc(); } + if (!saw_close_header_ && headers->ProxyConnection() && + absl::EqualsIgnoreCase(headers->ProxyConnection()->value().getStringView(), + Headers::get().ConnectionValues.Close)) { + saw_close_header_ = true; + parent_.parent_.host_->cluster().stats().upstream_cx_close_notify_.inc(); + } StreamDecoderWrapper::decodeHeaders(std::move(headers), end_stream); } @@ -285,7 +300,7 @@ ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) remaining_requests_(parent_.host_->cluster().maxRequestsPerConnection()) { parent_.conn_connect_ms_ = std::make_unique( - parent_.host_->cluster().stats().upstream_cx_connect_ms_, parent_.dispatcher_.timeSystem()); + parent_.host_->cluster().stats().upstream_cx_connect_ms_, parent_.dispatcher_.timeSource()); Upstream::Host::CreateConnectionData data = parent_.host_->createConnection(parent_.dispatcher_, parent_.socket_options_, nullptr); real_host_description_ = data.host_description_; @@ -298,7 +313,7 @@ ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) parent_.host_->stats().cx_total_.inc(); parent_.host_->stats().cx_active_.inc(); conn_length_ = std::make_unique( - parent_.host_->cluster().stats().upstream_cx_length_ms_, parent_.dispatcher_.timeSystem()); + parent_.host_->cluster().stats().upstream_cx_length_ms_, parent_.dispatcher_.timeSource()); connect_timer_->enableTimer(parent_.host_->cluster().connectTimeout()); parent_.host_->cluster().resourceManager(parent_.priority_).connections().inc(); @@ -325,7 +340,7 @@ void ConnPoolImpl::ActiveClient::onConnectTimeout() { codec_client_->close(); } -CodecClientPtr ConnPoolImplProd::createCodecClient(Upstream::Host::CreateConnectionData& data) { +CodecClientPtr ProdConnPoolImpl::createCodecClient(Upstream::Host::CreateConnectionData& data) { CodecClientPtr codec{new CodecClientProd(CodecClient::Type::HTTP1, std::move(data.connection_), data.host_description_, dispatcher_)}; return codec; diff --git a/source/common/http/http1/conn_pool.h b/source/common/http/http1/conn_pool.h index 8d1d6ef15278b..9f28b0ac8c587 100644 --- a/source/common/http/http1/conn_pool.h +++ b/source/common/http/http1/conn_pool.h @@ -40,6 +40,7 @@ class ConnPoolImpl : public ConnectionPool::Instance, public ConnPoolImplBase { Http::Protocol protocol() const override { return Http::Protocol::Http11; } void addDrainedCallback(DrainedCb cb) override; void drainConnections() override; + bool hasActiveConnections() const override; ConnectionPool::Cancellable* newStream(StreamDecoder& response_decoder, ConnectionPool::Callbacks& callbacks) override; @@ -64,7 +65,9 @@ class ConnPoolImpl : public ConnectionPool::Instance, public ConnPoolImplBase { void onDecodeComplete() override; // Http::StreamCallbacks - void onResetStream(StreamResetReason) override { parent_.parent_.onDownstreamReset(parent_); } + void onResetStream(StreamResetReason, absl::string_view) override { + parent_.parent_.onDownstreamReset(parent_); + } void onAboveWriteBufferHighWatermark() override {} void onBelowWriteBufferLowWatermark() override {} @@ -125,9 +128,9 @@ class ConnPoolImpl : public ConnectionPool::Instance, public ConnPoolImplBase { /** * Production implementation of the ConnPoolImpl. */ -class ConnPoolImplProd : public ConnPoolImpl { +class ProdConnPoolImpl : public ConnPoolImpl { public: - ConnPoolImplProd(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, + ProdConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options) : ConnPoolImpl(dispatcher, host, priority, options) {} diff --git a/source/common/http/http2/BUILD b/source/common/http/http2/BUILD index 5a9b6979eaa6b..7d51a2208dff7 100644 --- a/source/common/http/http2/BUILD +++ b/source/common/http/http2/BUILD @@ -43,6 +43,18 @@ envoy_cc_library( ], ) +# Separate library for some nghttp2 setup stuff to avoid having tests take a +# dependency on everything in codec_lib. +envoy_cc_library( + name = "nghttp2_lib", + srcs = ["nghttp2.cc"], + hdrs = ["nghttp2.h"], + external_deps = ["nghttp2"], + deps = [ + "//source/common/common:minimal_logger_lib", + ], +) + envoy_cc_library( name = "conn_pool_lib", srcs = ["conn_pool.cc"], diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index c7dd029d8ce0c..5fe699fc1a989 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -33,7 +33,8 @@ bool Utility::reconstituteCrumbledCookies(const HeaderString& key, const HeaderS cookies.append("; ", 2); } - cookies.append(value.c_str(), value.size()); + const absl::string_view value_view = value.getStringView(); + cookies.append(value_view.data(), value_view.size()); return true; } @@ -66,9 +67,11 @@ static void insertHeader(std::vector& headers, const HeaderEntry& he if (header.value().type() == HeaderString::Type::Reference) { flags |= NGHTTP2_NV_FLAG_NO_COPY_VALUE; } - headers.push_back({remove_const(header.key().c_str()), - remove_const(header.value().c_str()), header.key().size(), - header.value().size(), flags}); + const absl::string_view header_key = header.key().getStringView(); + const absl::string_view header_value = header.value().getStringView(); + headers.push_back({remove_const(header_key.data()), + remove_const(header_value.data()), header_key.size(), + header_value.size(), flags}); } void ConnectionImpl::StreamImpl::buildHeaders(std::vector& final_headers, @@ -532,7 +535,8 @@ int ConnectionImpl::onFrameSend(const nghttp2_frame* frame) { } int ConnectionImpl::onInvalidFrame(int32_t stream_id, int error_code) { - ENVOY_CONN_LOG(debug, "invalid frame: {}", connection_, nghttp2_strerror(error_code)); + ENVOY_CONN_LOG(debug, "invalid frame: {} on stream {}", connection_, nghttp2_strerror(error_code), + stream_id); // The stream is about to be closed due to an invalid header or messaging. Don't kill the // entire connection if one stream has bad headers or messaging. @@ -854,6 +858,10 @@ ConnectionImpl::Http2Options::Http2Options(const Http2Settings& http2_settings) nghttp2_option_set_no_closed_streams(options_, 1); nghttp2_option_set_no_auto_window_update(options_, 1); + // The max send header block length is configured to an arbitrarily high number so as to never + // trigger the check within nghttp2, as we check request headers length in codec_impl::saveHeader. + nghttp2_option_set_max_send_header_block_length(options_, 0x2000000); + if (http2_settings.hpack_table_size_ != NGHTTP2_DEFAULT_HEADER_TABLE_SIZE) { nghttp2_option_set_max_deflate_dynamic_table_size(options_, http2_settings.hpack_table_size_); } diff --git a/source/common/http/http2/codec_impl.h b/source/common/http/http2/codec_impl.h index 713abb886aefc..1277e217e8919 100644 --- a/source/common/http/http2/codec_impl.h +++ b/source/common/http/http2/codec_impl.h @@ -239,7 +239,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable& final_headers, nghttp2_data_provider* provider) override; void transformUpgradeFromH1toH2(HeaderMap& headers) override { - upgrade_type_ = headers.Upgrade()->value().c_str(); + upgrade_type_ = std::string(headers.Upgrade()->value().getStringView()); Http::Utility::transformUpgradeRequestFromH1toH2(headers); } void maybeTransformUpgradeFromH2ToH1() override { diff --git a/source/common/http/http2/conn_pool.cc b/source/common/http/http2/conn_pool.cc index df2d8ce4c42c8..cc65db21146d6 100644 --- a/source/common/http/http2/conn_pool.cc +++ b/source/common/http/http2/conn_pool.cc @@ -45,6 +45,18 @@ void ConnPoolImpl::addDrainedCallback(DrainedCb cb) { checkForDrained(); } +bool ConnPoolImpl::hasActiveConnections() const { + if (primary_client_ && primary_client_->client_->numActiveRequests() > 0) { + return true; + } + + if (draining_client_ && draining_client_->client_->numActiveRequests() > 0) { + return true; + } + + return !pending_requests_.empty(); +} + void ConnPoolImpl::checkForDrained() { if (drained_callbacks_.empty()) { return; @@ -77,7 +89,8 @@ void ConnPoolImpl::newClientStream(Http::StreamDecoder& response_decoder, ConnectionPool::Callbacks& callbacks) { if (!host_->cluster().resourceManager(priority_).requests().canCreate()) { ENVOY_LOG(debug, "max requests overflow"); - callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, nullptr); + callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, absl::string_view(), + nullptr); host_->cluster().stats().upstream_rq_pending_overflow_.inc(); } else { ENVOY_CONN_LOG(debug, "creating stream", *primary_client_->client_); @@ -115,7 +128,8 @@ ConnectionPool::Cancellable* ConnPoolImpl::newStream(Http::StreamDecoder& respon // If we're not allowed to enqueue more requests, fail fast. if (!host_->cluster().resourceManager(priority_).pendingRequests().canCreate()) { ENVOY_LOG(debug, "max pending requests overflow"); - callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, nullptr); + callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, absl::string_view(), + nullptr); host_->cluster().stats().upstream_rq_pending_overflow_.inc(); return nullptr; } @@ -153,7 +167,8 @@ void ConnPoolImpl::onConnectionEvent(ActiveClient& client, Network::ConnectionEv // do with the request. // NOTE: We move the existing pending requests to a temporary list. This is done so that // if retry logic submits a new request to the pool, we don't fail it inline. - purgePendingRequests(client.real_host_description_); + purgePendingRequests(client.real_host_description_, + client.client_->connectionFailureReason()); } if (&client == primary_client_.get()) { @@ -260,7 +275,7 @@ ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) : parent_(parent), connect_timer_(parent_.dispatcher_.createTimer([this]() -> void { onConnectTimeout(); })) { parent_.conn_connect_ms_ = std::make_unique( - parent_.host_->cluster().stats().upstream_cx_connect_ms_, parent_.dispatcher_.timeSystem()); + parent_.host_->cluster().stats().upstream_cx_connect_ms_, parent_.dispatcher_.timeSource()); Upstream::Host::CreateConnectionData data = parent_.host_->createConnection(parent_.dispatcher_, parent_.socket_options_, nullptr); real_host_description_ = data.host_description_; @@ -276,7 +291,7 @@ ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) parent_.host_->cluster().stats().upstream_cx_active_.inc(); parent_.host_->cluster().stats().upstream_cx_http2_total_.inc(); conn_length_ = std::make_unique( - parent_.host_->cluster().stats().upstream_cx_length_ms_, parent_.dispatcher_.timeSystem()); + parent_.host_->cluster().stats().upstream_cx_length_ms_, parent_.dispatcher_.timeSource()); client_->setConnectionStats({parent_.host_->cluster().stats().upstream_cx_rx_bytes_total_, parent_.host_->cluster().stats().upstream_cx_rx_bytes_buffered_, diff --git a/source/common/http/http2/conn_pool.h b/source/common/http/http2/conn_pool.h index d831a3c7dbbe7..ae24573fbd0a8 100644 --- a/source/common/http/http2/conn_pool.h +++ b/source/common/http/http2/conn_pool.h @@ -33,6 +33,7 @@ class ConnPoolImpl : public ConnectionPool::Instance, public ConnPoolImplBase { Http::Protocol protocol() const override { return Http::Protocol::Http2; } void addDrainedCallback(DrainedCb cb) override; void drainConnections() override; + bool hasActiveConnections() const override; ConnectionPool::Cancellable* newStream(Http::StreamDecoder& response_decoder, ConnectionPool::Callbacks& callbacks) override; diff --git a/source/common/http/http2/metadata_encoder.h b/source/common/http/http2/metadata_encoder.h index af292f98a633a..7e27444204463 100644 --- a/source/common/http/http2/metadata_encoder.h +++ b/source/common/http/http2/metadata_encoder.h @@ -7,6 +7,7 @@ #include "envoy/http/codec.h" #include "common/buffer/buffer_impl.h" +#include "common/common/c_smart_ptr.h" #include "common/common/logger.h" #include "nghttp2/nghttp2.h" diff --git a/source/common/http/http2/nghttp2.cc b/source/common/http/http2/nghttp2.cc new file mode 100644 index 0000000000000..5781cfa262ad3 --- /dev/null +++ b/source/common/http/http2/nghttp2.cc @@ -0,0 +1,26 @@ +#include "common/http/http2/nghttp2.h" + +#include "common/common/logger.h" + +#include "nghttp2/nghttp2.h" + +namespace Envoy { +namespace Http { +namespace Http2 { + +void initializeNghttp2Logging() { + nghttp2_set_debug_vprintf_callback([](const char* format, va_list args) { + char buf[2048]; + const int n = ::vsnprintf(buf, sizeof(buf), format, args); + // nghttp2 inserts new lines, but we also insert a new line in the ENVOY_LOG + // below, so avoid double \n. + if (n >= 1 && static_cast(n) < sizeof(buf) && buf[n - 1] == '\n') { + buf[n - 1] = '\0'; + } + ENVOY_LOG_TO_LOGGER(Logger::Registry::getLog(Logger::Id::http2), trace, "nghttp2: {}", buf); + }); +} + +} // namespace Http2 +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/http2/nghttp2.h b/source/common/http/http2/nghttp2.h new file mode 100644 index 0000000000000..3a19e6ba3c570 --- /dev/null +++ b/source/common/http/http2/nghttp2.h @@ -0,0 +1,14 @@ +#pragma once + +namespace Envoy { +namespace Http { +namespace Http2 { + +/** + * Setup nghttp2 trace-level logging for when debugging. + */ +void initializeNghttp2Logging(); + +} // namespace Http2 +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/path_utility.cc b/source/common/http/path_utility.cc new file mode 100644 index 0000000000000..56ce3204a4689 --- /dev/null +++ b/source/common/http/path_utility.cc @@ -0,0 +1,56 @@ +#include "common/http/path_utility.h" + +#include "common/chromium_url/url_canon.h" +#include "common/chromium_url/url_canon_stdstring.h" +#include "common/common/logger.h" + +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" + +namespace Envoy { +namespace Http { + +namespace { +absl::optional canonicalizePath(absl::string_view original_path) { + std::string canonical_path; + chromium_url::Component in_component(0, original_path.size()); + chromium_url::Component out_component; + chromium_url::StdStringCanonOutput output(&canonical_path); + if (!chromium_url::CanonicalizePath(original_path.data(), in_component, &output, + &out_component)) { + return absl::nullopt; + } else { + output.Complete(); + return absl::make_optional(std::move(canonical_path)); + } +} +} // namespace + +/* static */ +bool PathUtil::canonicalPath(HeaderEntry& path_header) { + const auto original_path = path_header.value().getStringView(); + // canonicalPath is supposed to apply on path component in URL instead of :path header + const auto query_pos = original_path.find('?'); + auto normalized_path_opt = canonicalizePath( + query_pos == original_path.npos + ? original_path + : absl::string_view(original_path.data(), query_pos) // '?' is not included + ); + + if (!normalized_path_opt.has_value()) { + return false; + } + auto& normalized_path = normalized_path_opt.value(); + const absl::string_view query_suffix = + query_pos == original_path.npos + ? absl::string_view{} + : absl::string_view{original_path.data() + query_pos, original_path.size() - query_pos}; + if (!query_suffix.empty()) { + normalized_path.insert(normalized_path.end(), query_suffix.begin(), query_suffix.end()); + } + path_header.value(normalized_path); + return true; +} + +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/path_utility.h b/source/common/http/path_utility.h new file mode 100644 index 0000000000000..ad0d32c3ff7d6 --- /dev/null +++ b/source/common/http/path_utility.h @@ -0,0 +1,19 @@ +#pragma once + +#include "envoy/http/header_map.h" + +namespace Envoy { +namespace Http { + +/** + * Path helper extracted from chromium project. + */ +class PathUtil { +public: + // Returns if the normalization succeeds. + // If it is successful, the param will be updated with the normalized path. + static bool canonicalPath(HeaderEntry& path_header); +}; + +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/user_agent.cc b/source/common/http/user_agent.cc index 57cb82e571034..fb6725c4e13bd 100644 --- a/source/common/http/user_agent.cc +++ b/source/common/http/user_agent.cc @@ -36,10 +36,10 @@ void UserAgent::initializeFromHeaders(const HeaderMap& headers, const std::strin const HeaderEntry* user_agent = headers.UserAgent(); if (user_agent) { prefix_ = prefix; - if (user_agent->value().find("iOS")) { + if (user_agent->value().getStringView().find("iOS") != absl::string_view::npos) { type_ = Type::iOS; prefix_ += "user_agent.ios."; - } else if (user_agent->value().find("android")) { + } else if (user_agent->value().getStringView().find("android") != absl::string_view::npos) { type_ = Type::Android; prefix_ += "user_agent.android."; } diff --git a/source/common/http/utility.cc b/source/common/http/utility.cc index b6a6b777835e9..1f82cbc5ba976 100644 --- a/source/common/http/utility.cc +++ b/source/common/http/utility.cc @@ -56,11 +56,13 @@ bool Utility::Url::initialize(absl::string_view absolute_url) { // RFC allows the absolute-uri to not end in /, but the absolute path form // must start with - if ((u.field_set & (1 << UF_PATH)) == (1 << UF_PATH) && u.field_data[UF_PATH].len > 0) { - path_ = absl::string_view(absolute_url.data() + u.field_data[UF_PATH].off, - u.field_data[UF_PATH].len); + uint64_t path_len = + absolute_url.length() - (u.field_data[UF_HOST].off + host_and_port().length()); + if (path_len > 0) { + uint64_t path_beginning = u.field_data[UF_HOST].off + host_and_port().length(); + path_and_query_params_ = absl::string_view(absolute_url.data() + path_beginning, path_len); } else { - path_ = absl::string_view(kDefaultPath, 1); + path_and_query_params_ = absl::string_view(kDefaultPath, 1); } return true; } @@ -86,8 +88,8 @@ void Utility::appendVia(HeaderMap& headers, const std::string& via) { std::string Utility::createSslRedirectPath(const HeaderMap& headers) { ASSERT(headers.Host()); ASSERT(headers.Path()); - return fmt::format("https://{}{}", headers.Host()->value().c_str(), - headers.Path()->value().c_str()); + return fmt::format("https://{}{}", headers.Host()->value().getStringView(), + headers.Path()->value().getStringView()); } Utility::QueryParams Utility::parseQueryString(absl::string_view url) { @@ -119,8 +121,14 @@ Utility::QueryParams Utility::parseQueryString(absl::string_view url) { return params; } -const char* Utility::findQueryStringStart(const HeaderString& path) { - return std::find(path.c_str(), path.c_str() + path.size(), '?'); +absl::string_view Utility::findQueryStringStart(const HeaderString& path) { + absl::string_view path_str = path.getStringView(); + size_t query_offset = path_str.find('?'); + if (query_offset == absl::string_view::npos) { + query_offset = path_str.length(); + } + path_str.remove_prefix(query_offset); + return path_str; } std::string Utility::parseCookieValue(const HeaderMap& headers, const std::string& key) { @@ -136,13 +144,14 @@ std::string Utility::parseCookieValue(const HeaderMap& headers, const std::strin headers.iterateReverse( [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { // Find the cookie headers in the request (typically, there's only one). - if (header.key() == Http::Headers::get().Cookie.get().c_str()) { + if (header.key() == Http::Headers::get().Cookie.get()) { + // Split the cookie header into individual cookies. - for (const auto s : StringUtil::splitToken(header.value().c_str(), ";")) { + for (const auto s : StringUtil::splitToken(header.value().getStringView(), ";")) { // Find the key part of the cookie (i.e. the name of the cookie). size_t first_non_space = s.find_first_not_of(" "); size_t equals_index = s.find('='); - if (equals_index == std::string::npos) { + if (equals_index == absl::string_view::npos) { // The cookie is malformed if it does not have an `=`. Continue // checking other cookies in this header. continue; @@ -204,15 +213,15 @@ bool Utility::hasSetCookie(const HeaderMap& headers, const std::string& key) { headers.iterate( [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { // Find the set-cookie headers in the request - if (header.key() == Http::Headers::get().SetCookie.get().c_str()) { - const std::string value{header.value().c_str()}; + if (header.key() == Http::Headers::get().SetCookie.get()) { + const absl::string_view value{header.value().getStringView()}; const size_t equals_index = value.find('='); - if (equals_index == std::string::npos) { + if (equals_index == absl::string_view::npos) { // The cookie is malformed if it does not have an `=`. return HeaderMap::Iterate::Continue; } - std::string k = value.substr(0, equals_index); + absl::string_view k = value.substr(0, equals_index); State* state = static_cast(context); if (k == state->key_) { state->ret_ = true; @@ -229,7 +238,7 @@ bool Utility::hasSetCookie(const HeaderMap& headers, const std::string& key) { uint64_t Utility::getResponseStatus(const HeaderMap& headers) { const HeaderEntry* header = headers.Status(); uint64_t response_code; - if (!header || !StringUtil::atoul(headers.Status()->value().c_str(), response_code)) { + if (!header || !absl::SimpleAtoi(headers.Status()->value().getStringView(), &response_code)) { throw CodecClientException(":status must be specified and a valid unsigned long"); } return response_code; @@ -245,7 +254,7 @@ bool Utility::isUpgrade(const HeaderMap& headers) { bool Utility::isH2UpgradeRequest(const HeaderMap& headers) { return headers.Method() && - headers.Method()->value().c_str() == Http::Headers::get().MethodValues.Connect && + headers.Method()->value().getStringView() == Http::Headers::get().MethodValues.Connect && headers.Protocol() && !headers.Protocol()->value().empty(); } @@ -285,14 +294,15 @@ void Utility::sendLocalReply(bool is_grpc, StreamDecoderFilterCallbacks& callbac const bool& is_reset, Code response_code, absl::string_view body_text, const absl::optional grpc_status, bool is_head_request) { - sendLocalReply(is_grpc, - [&](HeaderMapPtr&& headers, bool end_stream) -> void { - callbacks.encodeHeaders(std::move(headers), end_stream); - }, - [&](Buffer::Instance& data, bool end_stream) -> void { - callbacks.encodeData(data, end_stream); - }, - is_reset, response_code, body_text, grpc_status, is_head_request); + sendLocalReply( + is_grpc, + [&](HeaderMapPtr&& headers, bool end_stream) -> void { + callbacks.encodeHeaders(std::move(headers), end_stream); + }, + [&](Buffer::Instance& data, bool end_stream) -> void { + callbacks.encodeData(data, end_stream); + }, + is_reset, response_code, body_text, grpc_status, is_head_request); } void Utility::sendLocalReply( @@ -346,7 +356,7 @@ Utility::getLastAddressFromXFF(const Http::HeaderMap& request_headers, uint32_t return {nullptr, false}; } - absl::string_view xff_string(xff_header->value().c_str(), xff_header->value().size()); + absl::string_view xff_string(xff_header->value().getStringView()); static const std::string separator(","); // Ignore the last num_to_skip addresses at the end of XFF. for (uint32_t i = 0; i < num_to_skip; i++) { @@ -444,18 +454,39 @@ std::string Utility::queryParamsToString(const QueryParams& params) { return out; } +const std::string Utility::resetReasonToString(const Http::StreamResetReason reset_reason) { + switch (reset_reason) { + case Http::StreamResetReason::ConnectionFailure: + return "connection failure"; + case Http::StreamResetReason::ConnectionTermination: + return "connection termination"; + case Http::StreamResetReason::LocalReset: + return "local reset"; + case Http::StreamResetReason::LocalRefusedStreamReset: + return "local refused stream reset"; + case Http::StreamResetReason::Overflow: + return "overflow"; + case Http::StreamResetReason::RemoteReset: + return "remote reset"; + case Http::StreamResetReason::RemoteRefusedStreamReset: + return "remote refused stream reset"; + } + + NOT_REACHED_GCOVR_EXCL_LINE; +} + void Utility::transformUpgradeRequestFromH1toH2(HeaderMap& headers) { ASSERT(Utility::isUpgrade(headers)); const HeaderString& upgrade = headers.Upgrade()->value(); headers.insertMethod().value().setReference(Http::Headers::get().MethodValues.Connect); - headers.insertProtocol().value().setCopy(upgrade.c_str(), upgrade.size()); + headers.insertProtocol().value().setCopy(upgrade.getStringView()); headers.removeUpgrade(); headers.removeConnection(); // nghttp2 rejects upgrade requests/responses with content length, so strip // any unnecessary content length header. if (headers.ContentLength() != nullptr && - absl::string_view("0") == headers.ContentLength()->value().c_str()) { + headers.ContentLength()->value().getStringView() == "0") { headers.removeContentLength(); } } @@ -467,7 +498,7 @@ void Utility::transformUpgradeResponseFromH1toH2(HeaderMap& headers) { headers.removeUpgrade(); headers.removeConnection(); if (headers.ContentLength() != nullptr && - absl::string_view("0") == headers.ContentLength()->value().c_str()) { + headers.ContentLength()->value().getStringView() == "0") { headers.removeContentLength(); } } @@ -477,14 +508,14 @@ void Utility::transformUpgradeRequestFromH2toH1(HeaderMap& headers) { const HeaderString& protocol = headers.Protocol()->value(); headers.insertMethod().value().setReference(Http::Headers::get().MethodValues.Get); - headers.insertUpgrade().value().setCopy(protocol.c_str(), protocol.size()); + headers.insertUpgrade().value().setCopy(protocol.getStringView()); headers.insertConnection().value().setReference(Http::Headers::get().ConnectionValues.Upgrade); headers.removeProtocol(); } void Utility::transformUpgradeResponseFromH2toH1(HeaderMap& headers, absl::string_view upgrade) { if (getResponseStatus(headers) == 200) { - headers.insertUpgrade().value().setCopy(upgrade.data(), upgrade.size()); + headers.insertUpgrade().value().setCopy(upgrade); headers.insertConnection().value().setReference(Http::Headers::get().ConnectionValues.Upgrade); headers.insertStatus().value().setInteger(101); } diff --git a/source/common/http/utility.h b/source/common/http/utility.h index f57c86a68abfa..24cb394c6fa32 100644 --- a/source/common/http/utility.h +++ b/source/common/http/utility.h @@ -24,19 +24,19 @@ namespace Utility { /** * Given a fully qualified URL, splits the string_view provided into scheme, - * host and path components. + * host and path with query parameters components. */ class Url { public: bool initialize(absl::string_view absolute_url); absl::string_view scheme() { return scheme_; } absl::string_view host_and_port() { return host_and_port_; } - absl::string_view path() { return path_; } + absl::string_view path_and_query_params() { return path_and_query_params_; } private: absl::string_view scheme_; absl::string_view host_and_port_; - absl::string_view path_; + absl::string_view path_and_query_params_; }; /** @@ -70,10 +70,11 @@ QueryParams parseQueryString(absl::string_view url); /** * Finds the start of the query string in a path * @param path supplies a HeaderString& to search for the query string - * @return const char* a pointer to the beginning of the query string, or the end of the - * path if there is no query + * @return absl::string_view starting at the beginning of the query string, + * or a string_view starting at the end of the path if there was + * no query string. */ -const char* findQueryStringStart(const HeaderString& path); +absl::string_view findQueryStringStart(const HeaderString& path); /** * Parse a particular value out of a cookie @@ -229,6 +230,11 @@ MessagePtr prepareHeaders(const ::envoy::api::v2::core::HttpUri& http_uri); */ std::string queryParamsToString(const QueryParams& query_params); +/** + * Returns string representation of StreamResetReason. + */ +const std::string resetReasonToString(const Http::StreamResetReason reset_reason); + /** * Transforms the supplied headers from an HTTP/1 Upgrade request to an H2 style upgrade. * Changes the method to connection, moves the Upgrade to a :protocol header, diff --git a/source/common/init/BUILD b/source/common/init/BUILD new file mode 100644 index 0000000000000..6fef3006865be --- /dev/null +++ b/source/common/init/BUILD @@ -0,0 +1,40 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "watcher_lib", + srcs = ["watcher_impl.cc"], + hdrs = ["watcher_impl.h"], + deps = [ + "//include/envoy/init:watcher_interface", + "//source/common/common:logger_lib", + ], +) + +envoy_cc_library( + name = "target_lib", + srcs = ["target_impl.cc"], + hdrs = ["target_impl.h"], + deps = [ + "//include/envoy/init:target_interface", + "//source/common/common:logger_lib", + ], +) + +envoy_cc_library( + name = "manager_lib", + srcs = ["manager_impl.cc"], + hdrs = ["manager_impl.h"], + deps = [ + ":watcher_lib", + "//include/envoy/init:manager_interface", + "//source/common/common:logger_lib", + ], +) diff --git a/source/common/init/manager_impl.cc b/source/common/init/manager_impl.cc new file mode 100644 index 0000000000000..f60ddc64a9e90 --- /dev/null +++ b/source/common/init/manager_impl.cc @@ -0,0 +1,79 @@ +#include "common/init/manager_impl.h" + +#include "common/common/assert.h" + +namespace Envoy { +namespace Init { + +ManagerImpl::ManagerImpl(absl::string_view name) + : name_(fmt::format("init manager {}", name)), state_(State::Uninitialized), count_(0), + watcher_(name_, [this]() { onTargetReady(); }) {} + +Manager::State ManagerImpl::state() const { return state_; } + +void ManagerImpl::add(const Target& target) { + ++count_; + TargetHandlePtr target_handle(target.createHandle(name_)); + switch (state_) { + case State::Uninitialized: + // If the manager isn't initialized yet, save the target handle to be initialized later. + ENVOY_LOG(debug, "added {} to {}", target.name(), name_); + target_handles_.push_back(std::move(target_handle)); + return; + case State::Initializing: + // If the manager is already initializing, initialize the new target immediately. Note that + // it's important in this case that count_ was incremented above before calling the target, + // because if the target calls the init manager back immediately, count_ will be decremented + // here (see the definition of watcher_ above). + target_handle->initialize(watcher_); + return; + case State::Initialized: + // If the manager has already completed initialization, consider this a programming error. + ASSERT(false, fmt::format("attempted to add {} to initialized {}", target.name(), name_)); + } +} + +void ManagerImpl::initialize(const Watcher& watcher) { + // If the manager is already initializing or initialized, consider this a programming error. + ASSERT(state_ == State::Uninitialized, fmt::format("attempted to initialize {} twice", name_)); + + // Create a handle to notify when initialization is complete. + watcher_handle_ = watcher.createHandle(name_); + + if (count_ == 0) { + // If we have no targets, initialization trivially completes. This can happen, and is fine. + ENVOY_LOG(debug, "{} contains no targets", name_); + ready(); + } else { + // If we have some targets, start initialization... + ENVOY_LOG(debug, "{} initializing", name_); + state_ = State::Initializing; + + // Attempt to initialize each target. If a target is unavailable, treat it as though it + // completed immediately. + for (const auto& target_handle : target_handles_) { + if (!target_handle->initialize(watcher_)) { + onTargetReady(); + } + } + } +} + +void ManagerImpl::onTargetReady() { + // If there are no remaining targets and one mysteriously calls us back, this manager is haunted. + ASSERT(count_ != 0, fmt::format("{} called back by target after initialization complete")); + + // If there are no uninitialized targets remaining when called back by a target, that means it was + // the last. Signal `ready` to the handle we saved in `initialize`. + if (--count_ == 0) { + ready(); + } +} + +void ManagerImpl::ready() { + state_ = State::Initialized; + watcher_handle_->ready(); +} + +} // namespace Init +} // namespace Envoy diff --git a/source/common/init/manager_impl.h b/source/common/init/manager_impl.h new file mode 100644 index 0000000000000..b92ac102fd729 --- /dev/null +++ b/source/common/init/manager_impl.h @@ -0,0 +1,62 @@ +#pragma once + +#include + +#include "envoy/init/manager.h" + +#include "common/common/logger.h" +#include "common/init/watcher_impl.h" + +namespace Envoy { +namespace Init { + +/** + * Init::ManagerImpl coordinates initialization of one or more "targets." See comments in + * include/envoy/init/manager.h for an overview. + * + * When the logging level is set to "debug" or "trace," the log will contain entries for all + * significant events in the initialization flow: + * + * - Targets added to the manager + * - Initialization started for the manager and for each target + * - Initialization completed for each target and for the manager + * - Destruction of targets and watchers + * - Callbacks to "unavailable" (deleted) targets, manager, or watchers + */ +class ManagerImpl : public Manager, Logger::Loggable { +public: + /** + * @param name a human-readable manager name, for logging / debugging. + */ + ManagerImpl(absl::string_view name); + + // Init::Manager + State state() const override; + void add(const Target& target) override; + void initialize(const Watcher& watcher) override; + +private: + void onTargetReady(); + void ready(); + + // Human-readable name for logging + const std::string name_; + + // Current state + State state_; + + // Current number of registered targets that have not yet initialized + uint32_t count_; + + // Handle to the watcher passed in `initialize`, to be called when initialization completes + WatcherHandlePtr watcher_handle_; + + // Watcher to receive ready notifications from each target + const WatcherImpl watcher_; + + // All registered targets + std::list target_handles_; +}; + +} // namespace Init +} // namespace Envoy diff --git a/source/common/init/target_impl.cc b/source/common/init/target_impl.cc new file mode 100644 index 0000000000000..5bf0288b82980 --- /dev/null +++ b/source/common/init/target_impl.cc @@ -0,0 +1,54 @@ +#include "common/init/target_impl.h" + +namespace Envoy { +namespace Init { + +TargetHandleImpl::TargetHandleImpl(absl::string_view handle_name, absl::string_view name, + std::weak_ptr fn) + : handle_name_(handle_name), name_(name), fn_(std::move(fn)) {} + +bool TargetHandleImpl::initialize(const Watcher& watcher) const { + auto locked_fn(fn_.lock()); + if (locked_fn) { + // If we can "lock" a shared pointer to the target's callback function, call it + // with a new handle to the ManagerImpl's watcher that was passed in. + ENVOY_LOG(debug, "{} initializing {}", handle_name_, name_); + (*locked_fn)(watcher.createHandle(name_)); + return true; + } else { + // If not, the target was already destroyed. + ENVOY_LOG(debug, "{} can't initialize {} (unavailable)", handle_name_, name_); + return false; + } +} + +TargetImpl::TargetImpl(absl::string_view name, InitializeFn fn) + : name_(fmt::format("target {}", name)), + fn_(std::make_shared([this, fn](WatcherHandlePtr watcher_handle) { + watcher_handle_ = std::move(watcher_handle); + fn(); + })) {} + +TargetImpl::~TargetImpl() { ENVOY_LOG(debug, "{} destroyed", name_); } + +absl::string_view TargetImpl::name() const { return name_; } + +TargetHandlePtr TargetImpl::createHandle(absl::string_view handle_name) const { + // Note: can't use std::make_unique here because TargetHandleImpl ctor is private. + return std::unique_ptr( + new TargetHandleImpl(handle_name, name_, std::weak_ptr(fn_))); +} + +bool TargetImpl::ready() { + if (watcher_handle_) { + // If we have a handle for the ManagerImpl's watcher, signal it and then reset so it can't be + // accidentally signaled again. + const bool result = watcher_handle_->ready(); + watcher_handle_.reset(); + return result; + } + return false; +} + +} // namespace Init +} // namespace Envoy diff --git a/source/common/init/target_impl.h b/source/common/init/target_impl.h new file mode 100644 index 0000000000000..ad2757433f0de --- /dev/null +++ b/source/common/init/target_impl.h @@ -0,0 +1,89 @@ +#pragma once + +#include + +#include "envoy/init/target.h" + +#include "common/common/logger.h" + +namespace Envoy { +namespace Init { + +/** + * A target is just a glorified callback function, called by the manager it was registered with. + */ +using InitializeFn = std::function; + +/** + * Internally, the callback is slightly more sophisticated: it actually takes a WatcherHandlePtr + * that it uses to notify the manager when the target is ready. It saves this pointer when invoked + * and resets it later in `ready`. Users needn't care about this implementation detail, they only + * need to provide an `InitializeFn` above when constructing a target. + */ +using InternalInitalizeFn = std::function; + +/** + * A TargetHandleImpl functions as a weak reference to a TargetImpl. It is how a ManagerImpl safely + * tells a target to `initialize` with no guarantees about the target's lifetime. + */ +class TargetHandleImpl : public TargetHandle, Logger::Loggable { +private: + friend class TargetImpl; + TargetHandleImpl(absl::string_view handle_name, absl::string_view name, + std::weak_ptr fn); + +public: + // Init::TargetHandle + bool initialize(const Watcher& watcher) const override; + +private: + // Name of the handle (almost always the name of the ManagerImpl calling the target) + const std::string handle_name_; + + // Name of the target + const std::string name_; + + // The target's callback function, only called if the weak pointer can be "locked" + const std::weak_ptr fn_; +}; + +/** + * A TargetImpl is an entity that can be registered with a Manager for initialization. It can only + * be invoked through a TargetHandle. + */ +class TargetImpl : public Target, Logger::Loggable { +public: + /** + * @param name a human-readable target name, for logging / debugging + * @fn a callback function to invoke when `initialize` is called on the handle. Note that this + * doesn't take a WatcherHandlePtr (like TargetFn does). Managing the watcher handle is done + * internally to simplify usage. + */ + TargetImpl(absl::string_view name, InitializeFn fn); + ~TargetImpl() override; + + // Init::Target + absl::string_view name() const override; + TargetHandlePtr createHandle(absl::string_view handle_name) const override; + + /** + * Signal to the init manager that this target has finished initializing. This is safe to call + * any time. Calling it before initialization begins or after initialization has already ended + * will have no effect. + * @return true if the init manager received this call, false otherwise. + */ + bool ready(); + +private: + // Human-readable name for logging + const std::string name_; + + // Handle to the ManagerImpl's internal watcher, to call when this target is initialized + WatcherHandlePtr watcher_handle_; + + // The callback function, called via TargetHandleImpl by the manager + const std::shared_ptr fn_; +}; + +} // namespace Init +} // namespace Envoy diff --git a/source/common/init/watcher_impl.cc b/source/common/init/watcher_impl.cc new file mode 100644 index 0000000000000..b69fe3e7cf846 --- /dev/null +++ b/source/common/init/watcher_impl.cc @@ -0,0 +1,38 @@ +#include "common/init/watcher_impl.h" + +namespace Envoy { +namespace Init { + +WatcherHandleImpl::WatcherHandleImpl(absl::string_view handle_name, absl::string_view name, + std::weak_ptr fn) + : handle_name_(handle_name), name_(name), fn_(std::move(fn)) {} + +bool WatcherHandleImpl::ready() const { + auto locked_fn(fn_.lock()); + if (locked_fn) { + // If we can "lock" a shared pointer to the watcher's callback function, call it. + ENVOY_LOG(debug, "{} initialized, notifying {}", handle_name_, name_); + (*locked_fn)(); + return true; + } else { + // If not, the watcher was already destroyed. + ENVOY_LOG(debug, "{} initialized, but can't notify {} (unavailable)", handle_name_, name_); + return false; + } +} + +WatcherImpl::WatcherImpl(absl::string_view name, ReadyFn fn) + : name_(name), fn_(std::make_shared(std::move(fn))) {} + +WatcherImpl::~WatcherImpl() { ENVOY_LOG(debug, "{} destroyed", name_); } + +absl::string_view WatcherImpl::name() const { return name_; } + +WatcherHandlePtr WatcherImpl::createHandle(absl::string_view handle_name) const { + // Note: can't use std::make_unique because WatcherHandleImpl ctor is private + return std::unique_ptr( + new WatcherHandleImpl(handle_name, name_, std::weak_ptr(fn_))); +} + +} // namespace Init +} // namespace Envoy diff --git a/source/common/init/watcher_impl.h b/source/common/init/watcher_impl.h new file mode 100644 index 0000000000000..816a37c860eb2 --- /dev/null +++ b/source/common/init/watcher_impl.h @@ -0,0 +1,73 @@ +#pragma once + +#include + +#include "envoy/init/watcher.h" + +#include "common/common/logger.h" + +namespace Envoy { +namespace Init { + +/** + * A watcher is just a glorified callback function, called by a target or a manager when + * initialization completes. + */ +using ReadyFn = std::function; + +/** + * A WatcherHandleImpl functions as a weak reference to a Watcher. It is how a TargetImpl safely + * notifies a ManagerImpl that it has initialized, and likewise it's how ManagerImpl safely tells + * its client that all registered targets have initialized, with no guarantees about the lifetimes + * of the manager or client. + */ +class WatcherHandleImpl : public WatcherHandle, Logger::Loggable { +private: + friend class WatcherImpl; + WatcherHandleImpl(absl::string_view handle_name, absl::string_view name, + std::weak_ptr fn); + +public: + // Init::WatcherHandle + bool ready() const override; + +private: + // Name of the handle (either the name of the target calling the manager, or the name of the + // manager calling the client) + const std::string handle_name_; + + // Name of the watcher (either the name of the manager, or the name of the client) + const std::string name_; + + // The watcher's callback function, only called if the weak pointer can be "locked" + const std::weak_ptr fn_; +}; + +/** + * A WatcherImpl is an entity that listens for notifications that either an initialization target or + * all targets registered with a manager have initialized. It can only be invoked through a + * WatcherHandleImpl. + */ +class WatcherImpl : public Watcher, Logger::Loggable { +public: + /** + * @param name a human-readable watcher name, for logging / debugging + * @param fn a callback function to invoke when `ready` is called on the handle + */ + WatcherImpl(absl::string_view name, ReadyFn fn); + ~WatcherImpl() override; + + // Init::Watcher + absl::string_view name() const override; + WatcherHandlePtr createHandle(absl::string_view handle_name) const override; + +private: + // Human-readable name for logging + const std::string name_; + + // The callback function, called via WatcherHandleImpl by either the target or the manager + const std::shared_ptr fn_; +}; + +} // namespace Init +} // namespace Envoy diff --git a/source/common/json/config_schemas.cc b/source/common/json/config_schemas.cc index 25e33549a9345..8e2977b73e2ad 100644 --- a/source/common/json/config_schemas.cc +++ b/source/common/json/config_schemas.cc @@ -1473,9 +1473,6 @@ const std::string Json::Schema::CLUSTER_SCHEMA(R"EOF( "minimum_ring_size" : { "type" : "integer", "minimum" : 0 - }, - "use_std_hash" : { - "type" : "boolean" } } }, diff --git a/source/common/json/json_loader.cc b/source/common/json/json_loader.cc index f8baabcf198c4..b645512464f3d 100644 --- a/source/common/json/json_loader.cc +++ b/source/common/json/json_loader.cc @@ -646,7 +646,7 @@ bool ObjectHandler::Uint(unsigned value) { } bool ObjectHandler::Int64(int64_t value) { return handleValueEvent(Field::createValue(value)); } bool ObjectHandler::Uint64(uint64_t value) { - if (value > std::numeric_limits::max()) { + if (value > static_cast(std::numeric_limits::max())) { throw Exception(fmt::format("JSON value from line {} is larger than int64_t (not supported)", stream_.getLineNumber())); } diff --git a/source/common/local_info/local_info_impl.h b/source/common/local_info/local_info_impl.h index 4464c613f88d6..6430c07762353 100644 --- a/source/common/local_info/local_info_impl.h +++ b/source/common/local_info/local_info_impl.h @@ -24,13 +24,10 @@ class LocalInfoImpl : public LocalInfo { } } - // TODO(PiotrSikora): Revert https://github.com/envoyproxy/envoy/pull/1500 once protobuf string - // types converge. - Network::Address::InstanceConstSharedPtr address() const override { return address_; } - const std::string zoneName() const override { return node_.locality().zone(); } - const std::string clusterName() const override { return node_.cluster(); } - const std::string nodeName() const override { return node_.id(); } + const std::string& zoneName() const override { return node_.locality().zone(); } + const std::string& clusterName() const override { return node_.cluster(); } + const std::string& nodeName() const override { return node_.id(); } const envoy::api::v2::core::Node& node() const override { return node_; } private: diff --git a/source/common/memory/BUILD b/source/common/memory/BUILD index 30e2487125960..2b22e25f9f28d 100644 --- a/source/common/memory/BUILD +++ b/source/common/memory/BUILD @@ -13,4 +13,26 @@ envoy_cc_library( srcs = ["stats.cc"], hdrs = ["stats.h"], tcmalloc_dep = 1, + deps = [ + "//source/common/common:logger_lib", + ], +) + +envoy_cc_library( + name = "utils_lib", + srcs = ["utils.cc"], + hdrs = ["utils.h"], + tcmalloc_dep = 1, +) + +envoy_cc_library( + name = "heap_shrinker_lib", + srcs = ["heap_shrinker.cc"], + hdrs = ["heap_shrinker.h"], + deps = [ + ":utils_lib", + "//include/envoy/event:dispatcher_interface", + "//include/envoy/server:overload_manager_interface", + "//include/envoy/stats:stats_interface", + ], ) diff --git a/source/common/memory/heap_shrinker.cc b/source/common/memory/heap_shrinker.cc new file mode 100644 index 0000000000000..2582cfe76c84f --- /dev/null +++ b/source/common/memory/heap_shrinker.cc @@ -0,0 +1,38 @@ +#include "common/memory/heap_shrinker.h" + +#include "common/memory/utils.h" + +#include "absl/strings/str_cat.h" + +namespace Envoy { +namespace Memory { + +// TODO(eziskind): make this configurable +constexpr std::chrono::milliseconds kTimerInterval = std::chrono::milliseconds(10000); + +HeapShrinker::HeapShrinker(Event::Dispatcher& dispatcher, Server::OverloadManager& overload_manager, + Stats::Scope& stats) + : active_(false) { + const auto action_name = Server::OverloadActionNames::get().ShrinkHeap; + if (overload_manager.registerForAction(action_name, dispatcher, + [this](Server::OverloadActionState state) { + active_ = (state == Server::OverloadActionState::Active); + })) { + shrink_counter_ = &stats.counter(absl::StrCat("overload.", action_name, ".shrink_count")); + timer_ = dispatcher.createTimer([this] { + shrinkHeap(); + timer_->enableTimer(kTimerInterval); + }); + timer_->enableTimer(kTimerInterval); + } +} + +void HeapShrinker::shrinkHeap() { + if (active_) { + Utils::releaseFreeMemory(); + shrink_counter_->inc(); + } +} + +} // namespace Memory +} // namespace Envoy diff --git a/source/common/memory/heap_shrinker.h b/source/common/memory/heap_shrinker.h new file mode 100644 index 0000000000000..6c4a88bfbbb20 --- /dev/null +++ b/source/common/memory/heap_shrinker.h @@ -0,0 +1,29 @@ +#pragma once + +#include "envoy/event/dispatcher.h" +#include "envoy/server/overload_manager.h" +#include "envoy/stats/scope.h" +#include "envoy/stats/stats.h" + +namespace Envoy { +namespace Memory { + +/** + * A utility class to periodically attempt to shrink the heap by releasing free memory + * to the system if the "shrink heap" overload action has been configured and triggered. + */ +class HeapShrinker { +public: + HeapShrinker(Event::Dispatcher& dispatcher, Server::OverloadManager& overload_manager, + Envoy::Stats::Scope& stats); + +private: + void shrinkHeap(); + + bool active_; + Envoy::Stats::Counter* shrink_counter_; + Envoy::Event::TimerPtr timer_; +}; + +} // namespace Memory +} // namespace Envoy diff --git a/source/common/memory/stats.cc b/source/common/memory/stats.cc index 3b77e428ba295..d35b2add1aeba 100644 --- a/source/common/memory/stats.cc +++ b/source/common/memory/stats.cc @@ -2,6 +2,8 @@ #include +#include "common/common/logger.h" + #ifdef TCMALLOC #include "gperftools/malloc_extension.h" @@ -40,6 +42,13 @@ uint64_t Stats::totalPageHeapUnmapped() { return value; } +void Stats::dumpStatsToLog() { + constexpr int buffer_size = 100000; + auto buffer = std::make_unique(buffer_size); + MallocExtension::instance()->GetStats(buffer.get(), buffer_size); + ENVOY_LOG_MISC(debug, "TCMalloc stats:\n{}", buffer.get()); +} + } // namespace Memory } // namespace Envoy @@ -53,6 +62,7 @@ uint64_t Stats::totalThreadCacheBytes() { return 0; } uint64_t Stats::totalCurrentlyReserved() { return 0; } uint64_t Stats::totalPageHeapUnmapped() { return 0; } uint64_t Stats::totalPageHeapFree() { return 0; } +void Stats::dumpStatsToLog() {} } // namespace Memory } // namespace Envoy diff --git a/source/common/memory/stats.h b/source/common/memory/stats.h index c35e6a418fdff..21b8cdd1052d1 100644 --- a/source/common/memory/stats.h +++ b/source/common/memory/stats.h @@ -39,6 +39,11 @@ class Stats { * swapped out by the OS, they also count towards physical memory usage. */ static uint64_t totalPageHeapFree(); + + /** + * Log detailed stats about current memory allocation. Intended for debugging purposes. + */ + static void dumpStatsToLog(); }; } // namespace Memory diff --git a/source/common/memory/utils.cc b/source/common/memory/utils.cc new file mode 100644 index 0000000000000..dec4a124f9756 --- /dev/null +++ b/source/common/memory/utils.cc @@ -0,0 +1,17 @@ +#include "common/memory/utils.h" + +#ifdef TCMALLOC +#include "gperftools/malloc_extension.h" +#endif + +namespace Envoy { +namespace Memory { + +void Utils::releaseFreeMemory() { +#ifdef TCMALLOC + MallocExtension::instance()->ReleaseFreeMemory(); +#endif +} + +} // namespace Memory +} // namespace Envoy diff --git a/source/common/memory/utils.h b/source/common/memory/utils.h new file mode 100644 index 0000000000000..d6311204a2a18 --- /dev/null +++ b/source/common/memory/utils.h @@ -0,0 +1,12 @@ +#pragma once + +namespace Envoy { +namespace Memory { + +class Utils { +public: + static void releaseFreeMemory(); +}; + +} // namespace Memory +} // namespace Envoy diff --git a/source/common/network/BUILD b/source/common/network/BUILD index 5f356c4f73887..6fe04e26417a6 100644 --- a/source/common/network/BUILD +++ b/source/common/network/BUILD @@ -102,12 +102,27 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "io_socket_error_lib", + srcs = ["io_socket_error_impl.cc"], + hdrs = ["io_socket_error_impl.h"], + deps = [ + "//include/envoy/api:io_error_interface", + "//source/common/common:assert_lib", + ], +) + envoy_cc_library( name = "io_socket_handle_lib", + srcs = ["io_socket_handle_impl.cc"], hdrs = ["io_socket_handle_impl.h"], deps = [ + ":io_socket_error_lib", + "//include/envoy/buffer:buffer_interface", "//include/envoy/network:io_handle_interface", + "//source/common/api:os_sys_calls_lib", "//source/common/common:assert_lib", + "//source/common/common:stack_array", ], ) diff --git a/source/common/network/address_impl.cc b/source/common/network/address_impl.cc index 438e4e6a66e91..0b6536074d3eb 100644 --- a/source/common/network/address_impl.cc +++ b/source/common/network/address_impl.cc @@ -160,8 +160,10 @@ IoHandlePtr InstanceBase::socketFromSocketType(SocketType socketType) const { domain = AF_UNIX; } - IoHandlePtr io_handle = std::make_unique(::socket(domain, flags, 0)); - RELEASE_ASSERT(io_handle->fd() != -1, ""); + const Api::SysCallIntResult result = Api::OsSysCallsSingleton::get().socket(domain, flags, 0); + RELEASE_ASSERT(result.rc_ != -1, + fmt::format("socket(2) failed, got error: {}", strerror(result.errno_))); + IoHandlePtr io_handle = std::make_unique(result.rc_); #ifdef __APPLE__ // Cannot set SOCK_NONBLOCK as a ::socket flag. @@ -217,9 +219,9 @@ bool Ipv4Instance::operator==(const Instance& rhs) const { } Api::SysCallIntResult Ipv4Instance::bind(int fd) const { - const int rc = ::bind(fd, reinterpret_cast(&ip_.ipv4_.address_), - sizeof(ip_.ipv4_.address_)); - return {rc, errno}; + auto& os_syscalls = Api::OsSysCallsSingleton::get(); + return os_syscalls.bind(fd, reinterpret_cast(&ip_.ipv4_.address_), + sizeof(ip_.ipv4_.address_)); } Api::SysCallIntResult Ipv4Instance::connect(int fd) const { @@ -308,9 +310,9 @@ bool Ipv6Instance::operator==(const Instance& rhs) const { } Api::SysCallIntResult Ipv6Instance::bind(int fd) const { - const int rc = ::bind(fd, reinterpret_cast(&ip_.ipv6_.address_), - sizeof(ip_.ipv6_.address_)); - return {rc, errno}; + auto& os_syscalls = Api::OsSysCallsSingleton::get(); + return os_syscalls.bind(fd, reinterpret_cast(&ip_.ipv6_.address_), + sizeof(ip_.ipv6_.address_)); } Api::SysCallIntResult Ipv6Instance::connect(int fd) const { @@ -368,17 +370,16 @@ PipeInstance::PipeInstance(const std::string& pipe_path) : InstanceBase(Type::Pi bool PipeInstance::operator==(const Instance& rhs) const { return asString() == rhs.asString(); } Api::SysCallIntResult PipeInstance::bind(int fd) const { + auto& os_syscalls = Api::OsSysCallsSingleton::get(); if (abstract_namespace_) { - const int rc = ::bind(fd, reinterpret_cast(&address_), - offsetof(struct sockaddr_un, sun_path) + address_length_); - return {rc, errno}; + return os_syscalls.bind(fd, reinterpret_cast(&address_), + offsetof(struct sockaddr_un, sun_path) + address_length_); } // Try to unlink an existing filesystem object at the requested path. Ignore // errors -- it's fine if the path doesn't exist, and if it exists but can't // be unlinked then `::bind()` will generate a reasonable errno. unlink(address_.sun_path); - const int rc = ::bind(fd, reinterpret_cast(&address_), sizeof(address_)); - return {rc, errno}; + return os_syscalls.bind(fd, reinterpret_cast(&address_), sizeof(address_)); } Api::SysCallIntResult PipeInstance::connect(int fd) const { diff --git a/source/common/network/cidr_range.cc b/source/common/network/cidr_range.cc index 407778fb384ec..e8ee2c7d65372 100644 --- a/source/common/network/cidr_range.cc +++ b/source/common/network/cidr_range.cc @@ -128,8 +128,7 @@ CidrRange CidrRange::create(const std::string& range) { InstanceConstSharedPtr ptr = Utility::parseInternetAddress(std::string{parts[0]}); if (ptr->type() == Type::Ip) { uint64_t length64; - const std::string part{parts[1]}; - if (StringUtil::atoul(part.c_str(), length64, 10)) { + if (absl::SimpleAtoi(parts[1], &length64)) { if ((ptr->ip()->version() == IpVersion::v6 && length64 <= 128) || (ptr->ip()->version() == IpVersion::v4 && length64 <= 32)) { return create(std::move(ptr), static_cast(length64)); diff --git a/source/common/network/cidr_range.h b/source/common/network/cidr_range.h index ece8fdf40e525..309bae37c1476 100644 --- a/source/common/network/cidr_range.h +++ b/source/common/network/cidr_range.h @@ -78,7 +78,7 @@ class CidrRange { * TODO(ccaraman): Update CidrRange::create to support only constructing valid ranges. * @return a CidrRange instance with the specified address and length, modified so that the only * bits that might be non-zero are in the high-order length bits, and so that length is - * in the appropriate range (0 to 32 for IPv4, 0 to 128 for IPv6). If the the address or + * in the appropriate range (0 to 32 for IPv4, 0 to 128 for IPv6). If the address or * length is invalid, then the range will be invalid (i.e. length == -1). */ static CidrRange create(InstanceConstSharedPtr address, int length); diff --git a/source/common/network/connection_impl.cc b/source/common/network/connection_impl.cc index 84a9289071a7f..f9f62f12e6375 100644 --- a/source/common/network/connection_impl.cc +++ b/source/common/network/connection_impl.cc @@ -47,7 +47,7 @@ std::atomic ConnectionImpl::next_global_id_; ConnectionImpl::ConnectionImpl(Event::Dispatcher& dispatcher, ConnectionSocketPtr&& socket, TransportSocketPtr&& transport_socket, bool connected) : transport_socket_(std::move(transport_socket)), socket_(std::move(socket)), - filter_manager_(*this, *this), stream_info_(dispatcher.timeSystem()), + filter_manager_(*this), stream_info_(dispatcher.timeSource()), write_buffer_( dispatcher.getWatermarkFactory().create([this]() -> void { this->onLowWatermark(); }, [this]() -> void { this->onHighWatermark(); })), @@ -70,7 +70,7 @@ ConnectionImpl::ConnectionImpl(Event::Dispatcher& dispatcher, ConnectionSocketPt } ConnectionImpl::~ConnectionImpl() { - ASSERT(ioHandle().fd() == -1 && delayed_close_timer_ == nullptr, + ASSERT(!ioHandle().isOpen() && delayed_close_timer_ == nullptr, "ConnectionImpl was unexpectedly torn down without being closed."); // In general we assume that owning code has called close() previously to the destructor being @@ -93,12 +93,13 @@ void ConnectionImpl::addReadFilter(ReadFilterSharedPtr filter) { bool ConnectionImpl::initializeReadFilters() { return filter_manager_.initializeReadFilters(); } void ConnectionImpl::close(ConnectionCloseType type) { - if (ioHandle().fd() == -1) { + if (!ioHandle().isOpen()) { return; } uint64_t data_to_write = write_buffer_->length(); ENVOY_CONN_LOG(debug, "closing data_to_write={} type={}", *this, data_to_write, enumToInt(type)); + const bool delayed_close_timeout_set = delayedCloseTimeout().count() > 0; if (data_to_write == 0 || type == ConnectionCloseType::NoFlush || !transport_socket_->canFlushClose()) { if (data_to_write > 0) { @@ -107,13 +108,25 @@ void ConnectionImpl::close(ConnectionCloseType type) { transport_socket_->doWrite(*write_buffer_, true); } - closeSocket(ConnectionEvent::LocalClose); + if (type == ConnectionCloseType::FlushWriteAndDelay && delayed_close_timeout_set) { + // The socket is being closed and either there is no more data to write or the data can not be + // flushed (!transport_socket_->canFlushClose()). Since a delayed close has been requested, + // start the delayed close timer if it hasn't been done already by a previous close(). + // NOTE: Even though the delayed_close_state_ is being set to CloseAfterFlushAndWait, since + // a write event is not being registered for the socket, this logic is simply setting the + // timer and waiting for it to trigger to close the socket. + if (!inDelayedClose()) { + initializeDelayedCloseTimer(); + delayed_close_state_ = DelayedCloseState::CloseAfterFlushAndWait; + } + } else { + closeSocket(ConnectionEvent::LocalClose); + } } else { ASSERT(type == ConnectionCloseType::FlushWrite || type == ConnectionCloseType::FlushWriteAndDelay); - // No need to continue if a FlushWrite/FlushWriteAndDelay has already been issued and there is a - // pending delayed close. + // If there is a pending delayed close, simply update the delayed close state. // // An example of this condition manifests when a downstream connection is closed early by Envoy, // such as when a route can't be matched: @@ -123,35 +136,31 @@ void ConnectionImpl::close(ConnectionCloseType type) { // ConnectionManagerImpl::checkForDeferredClose() // 2) A second close is issued by a subsequent call to // ConnectionManagerImpl::checkForDeferredClose() prior to returning from onData() - if (delayed_close_) { + if (inDelayedClose()) { + // Validate that a delayed close timer is already enabled unless it was disabled via + // configuration. + ASSERT(!delayed_close_timeout_set || delayed_close_timer_ != nullptr); + if (type == ConnectionCloseType::FlushWrite || !delayed_close_timeout_set) { + delayed_close_state_ = DelayedCloseState::CloseAfterFlush; + } else { + delayed_close_state_ = DelayedCloseState::CloseAfterFlushAndWait; + } return; } - delayed_close_ = true; - const bool delayed_close_timeout_set = delayedCloseTimeout().count() > 0; - - // NOTE: the delayed close timeout (if set) affects both FlushWrite and FlushWriteAndDelay - // closes: - // 1. For FlushWrite, the timeout sets an upper bound on how long to wait for the flush to - // complete before the connection is locally closed. - // 2. For FlushWriteAndDelay, the timeout specifies an upper bound on how long to wait for the - // flush to complete and the peer to close the connection before it is locally closed. - // All close types that follow do not actually close() the socket immediately so that buffered // data can be written. However, we do want to stop reading to apply TCP backpressure. read_enabled_ = false; - // Force a closeSocket() after the write buffer is flushed if the close_type calls for it or if - // no delayed close timeout is set. - close_after_flush_ = !delayed_close_timeout_set || type == ConnectionCloseType::FlushWrite; - - // Create and activate a timer which will immediately close the connection if triggered. - // A config value of 0 disables the timeout. + // NOTE: At this point, it's already been validated that the connection is not already in + // delayed close processing and therefore the timer has not yet been created. if (delayed_close_timeout_set) { - delayed_close_timer_ = dispatcher_.createTimer([this]() -> void { onDelayedCloseTimeout(); }); - ENVOY_CONN_LOG(debug, "setting delayed close timer with timeout {} ms", *this, - delayedCloseTimeout().count()); - delayed_close_timer_->enableTimer(delayedCloseTimeout()); + initializeDelayedCloseTimer(); + delayed_close_state_ = (type == ConnectionCloseType::FlushWrite) + ? DelayedCloseState::CloseAfterFlush + : DelayedCloseState::CloseAfterFlushAndWait; + } else { + delayed_close_state_ = DelayedCloseState::CloseAfterFlush; } file_event_->setEnabled(Event::FileReadyType::Write | @@ -160,9 +169,9 @@ void ConnectionImpl::close(ConnectionCloseType type) { } Connection::State ConnectionImpl::state() const { - if (ioHandle().fd() == -1) { + if (!ioHandle().isOpen()) { return State::Closed; - } else if (delayed_close_) { + } else if (inDelayedClose()) { return State::Closing; } else { return State::Open; @@ -170,7 +179,7 @@ Connection::State ConnectionImpl::state() const { } void ConnectionImpl::closeSocket(ConnectionEvent close_type) { - if (ioHandle().fd() == -1) { + if (!ioHandle().isOpen()) { return; } @@ -204,7 +213,7 @@ void ConnectionImpl::noDelay(bool enable) { // invalid. For this call instead of plumbing through logic that will immediately indicate that a // connect failed, we will just ignore the noDelay() call if the socket is invalid since error is // going to be raised shortly anyway and it makes the calling code simpler. - if (ioHandle().fd() == -1) { + if (!ioHandle().isOpen()) { return; } @@ -347,7 +356,15 @@ void ConnectionImpl::addBytesSentCallback(BytesSentCb cb) { bytes_sent_callbacks_.emplace_back(cb); } +void ConnectionImpl::rawWrite(Buffer::Instance& data, bool end_stream) { + write(data, end_stream, false); +} + void ConnectionImpl::write(Buffer::Instance& data, bool end_stream) { + write(data, end_stream, true); +} + +void ConnectionImpl::write(Buffer::Instance& data, bool end_stream, bool through_filter_chain) { ASSERT(!end_stream || enable_half_close_); if (write_end_stream_) { @@ -359,16 +376,18 @@ void ConnectionImpl::write(Buffer::Instance& data, bool end_stream) { return; } - // NOTE: This is kind of a hack, but currently we don't support restart/continue on the write - // path, so we just pass around the buffer passed to us in this function. If we ever support - // buffer/restart/continue on the write path this needs to get more complicated. - current_write_buffer_ = &data; - current_write_end_stream_ = end_stream; - FilterStatus status = filter_manager_.onWrite(); - current_write_buffer_ = nullptr; + if (through_filter_chain) { + // NOTE: This is kind of a hack, but currently we don't support restart/continue on the write + // path, so we just pass around the buffer passed to us in this function. If we ever + // support buffer/restart/continue on the write path this needs to get more complicated. + current_write_buffer_ = &data; + current_write_end_stream_ = end_stream; + FilterStatus status = filter_manager_.onWrite(); + current_write_buffer_ = nullptr; - if (FilterStatus::StopIteration == status) { - return; + if (FilterStatus::StopIteration == status) { + return; + } } write_end_stream_ = end_stream; @@ -469,7 +488,7 @@ void ConnectionImpl::onFileEvent(uint32_t events) { // It's possible for a write event callback to close the socket (which will cause fd_ to be -1). // In this case ignore write event processing. - if (ioHandle().fd() != -1 && (events & Event::FileReadyType::Read)) { + if (ioHandle().isOpen() && (events & Event::FileReadyType::Read)) { onReadReady(); } } @@ -504,6 +523,23 @@ void ConnectionImpl::onReadReady() { } } +absl::optional +ConnectionImpl::unixSocketPeerCredentials() const { + // TODO(snowp): Support non-linux platforms. +#ifndef SO_PEERCRED + return absl::nullopt; +#else + struct ucred ucred; + socklen_t ucred_size = sizeof(ucred); + int rc = getsockopt(ioHandle().fd(), SOL_SOCKET, SO_PEERCRED, &ucred, &ucred_size); + if (rc == -1) { + return absl::nullopt; + } + + return {{ucred.pid, ucred.uid, ucred.gid}}; +#endif +} + void ConnectionImpl::onWriteReady() { ENVOY_CONN_LOG(trace, "write ready", *this); @@ -534,21 +570,37 @@ void ConnectionImpl::onWriteReady() { uint64_t new_buffer_size = write_buffer_->length(); updateWriteBufferStats(result.bytes_processed_, new_buffer_size); + // NOTE: If the delayed_close_timer_ is set, it must only trigger after a delayed_close_timeout_ + // period of inactivity from the last write event. Therefore, the timer must be reset to its + // original timeout value unless the socket is going to be closed as a result of the doWrite(). + if (result.action_ == PostIoAction::Close) { // It is possible (though unlikely) for the connection to have already been closed during the // write callback. This can happen if we manage to complete the SSL handshake in the write // callback, raise a connected event, and close the connection. closeSocket(ConnectionEvent::RemoteClose); - } else if ((close_after_flush_ && new_buffer_size == 0) || bothSidesHalfClosed()) { + } else if ((inDelayedClose() && new_buffer_size == 0) || bothSidesHalfClosed()) { ENVOY_CONN_LOG(debug, "write flush complete", *this); - closeSocket(ConnectionEvent::LocalClose); - } else if (result.action_ == PostIoAction::KeepOpen && result.bytes_processed_ > 0) { - for (BytesSentCb& cb : bytes_sent_callbacks_) { - cb(result.bytes_processed_); - - // If a callback closes the socket, stop iterating. - if (ioHandle().fd() == -1) { - return; + if (delayed_close_state_ == DelayedCloseState::CloseAfterFlushAndWait) { + ASSERT(delayed_close_timer_ != nullptr); + delayed_close_timer_->enableTimer(delayedCloseTimeout()); + } else { + ASSERT(bothSidesHalfClosed() || delayed_close_state_ == DelayedCloseState::CloseAfterFlush); + closeSocket(ConnectionEvent::LocalClose); + } + } else { + ASSERT(result.action_ == PostIoAction::KeepOpen); + if (delayed_close_timer_ != nullptr) { + delayed_close_timer_->enableTimer(delayedCloseTimeout()); + } + if (result.bytes_processed_ > 0) { + for (BytesSentCb& cb : bytes_sent_callbacks_) { + cb(result.bytes_processed_); + + // If a callback closes the socket, stop iterating. + if (!ioHandle().isOpen()) { + return; + } } } } @@ -587,6 +639,7 @@ bool ConnectionImpl::bothSidesHalfClosed() { } void ConnectionImpl::onDelayedCloseTimeout() { + delayed_close_timer_.reset(); ENVOY_CONN_LOG(debug, "triggered delayed close", *this); if (connection_stats_ != nullptr && connection_stats_->delayed_close_timeouts_ != nullptr) { connection_stats_->delayed_close_timeouts_->inc(); @@ -594,6 +647,18 @@ void ConnectionImpl::onDelayedCloseTimeout() { closeSocket(ConnectionEvent::LocalClose); } +void ConnectionImpl::initializeDelayedCloseTimer() { + const auto timeout = delayedCloseTimeout().count(); + ASSERT(delayed_close_timer_ == nullptr && timeout > 0); + delayed_close_timer_ = dispatcher_.createTimer([this]() -> void { onDelayedCloseTimeout(); }); + ENVOY_CONN_LOG(debug, "setting delayed close timer with timeout {} ms", *this, timeout); + delayed_close_timer_->enableTimer(delayedCloseTimeout()); +} + +absl::string_view ConnectionImpl::transportFailureReason() const { + return transport_socket_->failureReason(); +} + ClientConnectionImpl::ClientConnectionImpl( Event::Dispatcher& dispatcher, const Address::InstanceConstSharedPtr& remote_address, const Network::Address::InstanceConstSharedPtr& source_address, @@ -621,6 +686,7 @@ ClientConnectionImpl::ClientConnectionImpl( if (source_to_use != nullptr) { const Api::SysCallIntResult result = source_to_use->bind(ioHandle().fd()); if (result.rc_ < 0) { + // TODO(lizan): consider add this error into transportFailureReason. ENVOY_LOG_MISC(debug, "Bind failure. Failed to bind to {}: {}", source_to_use->asString(), strerror(result.errno_)); bind_error_ = true; diff --git a/source/common/network/connection_impl.h b/source/common/network/connection_impl.h index 80a222949ce1e..b0ba9671de933 100644 --- a/source/common/network/connection_impl.h +++ b/source/common/network/connection_impl.h @@ -44,10 +44,9 @@ class ConnectionImplUtility { }; /** - * Implementation of Network::Connection. + * Implementation of Network::Connection and Network::FilterManagerConnection. */ -class ConnectionImpl : public virtual Connection, - public BufferSource, +class ConnectionImpl : public FilterManagerConnection, public TransportSocketCallbacks, protected Logger::Loggable { public: @@ -80,8 +79,9 @@ class ConnectionImpl : public virtual Connection, const Address::InstanceConstSharedPtr& localAddress() const override { return socket_->localAddress(); } + absl::optional unixSocketPeerCredentials() const override; void setConnectionStats(const ConnectionStats& stats) override; - const Ssl::Connection* ssl() const override { return transport_socket_->ssl(); } + const Ssl::ConnectionInfo* ssl() const override { return transport_socket_->ssl(); } State state() const override; void write(Buffer::Instance& data, bool end_stream) override; void setBufferLimits(uint32_t limit) override; @@ -94,12 +94,15 @@ class ConnectionImpl : public virtual Connection, absl::string_view requestedServerName() const override { return socket_->requestedServerName(); } StreamInfo::StreamInfo& streamInfo() override { return stream_info_; } const StreamInfo::StreamInfo& streamInfo() const override { return stream_info_; } - void setWriteFilterOrder(bool reversed) override { reverse_write_filter_order_ = reversed; } - bool reverseWriteFilterOrder() const override { return reverse_write_filter_order_; } + absl::string_view transportFailureReason() const override; - // Network::BufferSource - BufferSource::StreamBuffer getReadBuffer() override { return {read_buffer_, read_end_stream_}; } - BufferSource::StreamBuffer getWriteBuffer() override { + // Network::FilterManagerConnection + void rawWrite(Buffer::Instance& data, bool end_stream) override; + + // Network::ReadBufferSource + StreamBuffer getReadBuffer() override { return {read_buffer_, read_end_stream_}; } + // Network::WriteBufferSource + StreamBuffer getWriteBuffer() override { return {*current_write_buffer_, current_write_end_stream_}; } @@ -123,6 +126,8 @@ class ConnectionImpl : public virtual Connection, static uint64_t nextGlobalIdForTest() { return next_global_id_; } void setDelayedCloseTimeout(std::chrono::milliseconds timeout) override { + // Validate that this is only called prior to issuing a close() or closeSocket(). + ASSERT(delayed_close_timer_ == nullptr && ioHandle().isOpen()); delayed_close_timeout_ = timeout; } std::chrono::milliseconds delayedCloseTimeout() const override { return delayed_close_timeout_; } @@ -162,22 +167,41 @@ class ConnectionImpl : public virtual Connection, void updateReadBufferStats(uint64_t num_read, uint64_t new_size); void updateWriteBufferStats(uint64_t num_written, uint64_t new_size); + // Write data to the connection bypassing filter chain (optionally). + void write(Buffer::Instance& data, bool end_stream, bool through_filter_chain); + // Returns true iff end of stream has been both written and read. bool bothSidesHalfClosed(); // Callback issued when a delayed close timeout triggers. void onDelayedCloseTimeout(); + void initializeDelayedCloseTimer(); + bool inDelayedClose() const { return delayed_close_state_ != DelayedCloseState::None; } + static std::atomic next_global_id_; + // States associated with delayed closing of the connection (i.e., when the underlying socket is + // not immediately close()d as a result of a ConnectionImpl::close()). + enum class DelayedCloseState { + None, + // The socket will be closed immediately after the buffer is flushed _or_ if a period of + // inactivity after the last write event greater than or equal to delayed_close_timeout_ has + // elapsed. + CloseAfterFlush, + // The socket will be closed after a grace period of delayed_close_timeout_ has elapsed after + // the socket is flushed _or_ if a period of inactivity after the last write event greater than + // or equal to delayed_close_timeout_ has elapsed. + CloseAfterFlushAndWait + }; + DelayedCloseState delayed_close_state_{DelayedCloseState::None}; + Event::Dispatcher& dispatcher_; const uint64_t id_; Event::TimerPtr delayed_close_timer_; std::list callbacks_; std::list bytes_sent_callbacks_; bool read_enabled_{true}; - bool close_after_flush_{false}; - bool delayed_close_{false}; bool above_high_watermark_{false}; bool detect_early_close_{true}; bool enable_half_close_{false}; @@ -193,7 +217,6 @@ class ConnectionImpl : public virtual Connection, // readDisabled(true) this allows the connection to only resume reads when readDisabled(false) // has been called N times. uint32_t read_disable_count_{0}; - bool reverse_write_filter_order_{false}; }; /** diff --git a/source/common/network/dns_impl.cc b/source/common/network/dns_impl.cc index 1ece7d020fcab..810dd19429d51 100644 --- a/source/common/network/dns_impl.cc +++ b/source/common/network/dns_impl.cc @@ -25,11 +25,6 @@ DnsResolverImpl::DnsResolverImpl( const std::vector& resolvers) : dispatcher_(dispatcher), timer_(dispatcher.createTimer([this] { onEventCallback(ARES_SOCKET_BAD, 0); })) { - // This is also done in main(), to satisfy the requirement that c-ares is - // initialized prior to threading. The additional call to ares_library_init() - // here is a nop in normal execution, but exists for testing where we don't - // launch via main(). - ares_library_init(ARES_LIB_INIT_ALL); ares_options options; initializeChannel(&options, 0); @@ -41,7 +36,6 @@ DnsResolverImpl::DnsResolverImpl( // This should be an IP address (i.e. not a pipe). if (resolver->ip() == nullptr) { ares_destroy(channel_); - ares_library_cleanup(); throw EnvoyException( fmt::format("DNS resolver '{}' is not an IP address", resolver->asString())); } @@ -63,7 +57,6 @@ DnsResolverImpl::DnsResolverImpl( DnsResolverImpl::~DnsResolverImpl() { timer_->disableTimer(); ares_destroy(channel_); - ares_library_cleanup(); } void DnsResolverImpl::initializeChannel(ares_options* options, int optmask) { @@ -223,12 +216,12 @@ ActiveDnsQuery* DnsResolverImpl::resolve(const std::string& dns_name, } void DnsResolverImpl::PendingResolution::getHostByName(int family) { - ares_gethostbyname(channel_, dns_name_.c_str(), family, - [](void* arg, int status, int timeouts, hostent* hostent) { - static_cast(arg)->onAresHostCallback(status, timeouts, - hostent); - }, - this); + ares_gethostbyname( + channel_, dns_name_.c_str(), family, + [](void* arg, int status, int timeouts, hostent* hostent) { + static_cast(arg)->onAresHostCallback(status, timeouts, hostent); + }, + this); } } // namespace Network diff --git a/source/common/network/filter_manager_impl.cc b/source/common/network/filter_manager_impl.cc index 5b796dd14217e..3a78ca8499695 100644 --- a/source/common/network/filter_manager_impl.cc +++ b/source/common/network/filter_manager_impl.cc @@ -11,11 +11,9 @@ namespace Network { void FilterManagerImpl::addWriteFilter(WriteFilterSharedPtr filter) { ASSERT(connection_.state() == Connection::State::Open); - if (connection_.reverseWriteFilterOrder()) { - downstream_filters_.emplace_front(filter); - } else { - downstream_filters_.emplace_back(filter); - } + ActiveWriteFilterPtr new_filter(new ActiveWriteFilter{*this, filter}); + filter->initializeWriteFilterCallbacks(*new_filter); + new_filter->moveIntoList(std::move(new_filter), downstream_filters_); } void FilterManagerImpl::addFilter(FilterSharedPtr filter) { @@ -34,11 +32,12 @@ bool FilterManagerImpl::initializeReadFilters() { if (upstream_filters_.empty()) { return false; } - onContinueReading(nullptr); + onContinueReading(nullptr, connection_); return true; } -void FilterManagerImpl::onContinueReading(ActiveReadFilter* filter) { +void FilterManagerImpl::onContinueReading(ActiveReadFilter* filter, + ReadBufferSource& buffer_source) { std::list::iterator entry; if (!filter) { entry = upstream_filters_.begin(); @@ -55,7 +54,7 @@ void FilterManagerImpl::onContinueReading(ActiveReadFilter* filter) { } } - BufferSource::StreamBuffer read_buffer = buffer_source_.getReadBuffer(); + StreamBuffer read_buffer = buffer_source.getReadBuffer(); if (read_buffer.buffer.length() > 0 || read_buffer.end_stream) { FilterStatus status = (*entry)->filter_->onData(read_buffer.buffer, read_buffer.end_stream); if (status == FilterStatus::StopIteration) { @@ -67,13 +66,23 @@ void FilterManagerImpl::onContinueReading(ActiveReadFilter* filter) { void FilterManagerImpl::onRead() { ASSERT(!upstream_filters_.empty()); - onContinueReading(nullptr); + onContinueReading(nullptr, connection_); } -FilterStatus FilterManagerImpl::onWrite() { - for (const WriteFilterSharedPtr& filter : downstream_filters_) { - BufferSource::StreamBuffer write_buffer = buffer_source_.getWriteBuffer(); - FilterStatus status = filter->onWrite(write_buffer.buffer, write_buffer.end_stream); +FilterStatus FilterManagerImpl::onWrite() { return onWrite(nullptr, connection_); } + +FilterStatus FilterManagerImpl::onWrite(ActiveWriteFilter* filter, + WriteBufferSource& buffer_source) { + std::list::iterator entry; + if (!filter) { + entry = downstream_filters_.begin(); + } else { + entry = std::next(filter->entry()); + } + + for (; entry != downstream_filters_.end(); entry++) { + StreamBuffer write_buffer = buffer_source.getWriteBuffer(); + FilterStatus status = (*entry)->filter_->onWrite(write_buffer.buffer, write_buffer.end_stream); if (status == FilterStatus::StopIteration) { return status; } @@ -82,5 +91,14 @@ FilterStatus FilterManagerImpl::onWrite() { return FilterStatus::Continue; } +void FilterManagerImpl::onResumeWriting(ActiveWriteFilter* filter, + WriteBufferSource& buffer_source) { + auto status = onWrite(filter, buffer_source); + if (status == FilterStatus::Continue) { + StreamBuffer write_buffer = buffer_source.getWriteBuffer(); + connection_.rawWrite(write_buffer.buffer, write_buffer.end_stream); + } +} + } // namespace Network } // namespace Envoy diff --git a/source/common/network/filter_manager_impl.h b/source/common/network/filter_manager_impl.h index b2e49b61fe021..6db1332c61749 100644 --- a/source/common/network/filter_manager_impl.h +++ b/source/common/network/filter_manager_impl.h @@ -3,6 +3,7 @@ #include #include +#include "envoy/network/connection.h" #include "envoy/network/filter.h" #include "common/common/linked_object.h" @@ -10,22 +11,30 @@ namespace Envoy { namespace Network { +struct StreamBuffer { + Buffer::Instance& buffer; + const bool end_stream; +}; + /** - * Interface used to obtain read/write buffers. + * Interface used to obtain read buffers. */ -class BufferSource { +class ReadBufferSource { public: - virtual ~BufferSource() {} - - struct StreamBuffer { - Buffer::Instance& buffer; - bool end_stream; - }; + virtual ~ReadBufferSource() {} /** * Fetch the read buffer for the source. */ virtual StreamBuffer getReadBuffer() PURE; +}; + +/** + * Interface used to obtain write buffers. + */ +class WriteBufferSource { +public: + virtual ~WriteBufferSource() {} /** * Fetch the write buffer for the source. @@ -33,13 +42,65 @@ class BufferSource { virtual StreamBuffer getWriteBuffer() PURE; }; +/** + * Adapter that masquerades a given buffer instance as a ReadBufferSource. + */ +class FixedReadBufferSource : public ReadBufferSource { +public: + FixedReadBufferSource(Buffer::Instance& data, bool end_stream) + : data_(data), end_stream_(end_stream) {} + + StreamBuffer getReadBuffer() override { return {data_, end_stream_}; } + +private: + Buffer::Instance& data_; + const bool end_stream_; +}; + +/** + * Adapter that masquerades a given buffer instance as a WriteBufferSource. + */ +class FixedWriteBufferSource : public WriteBufferSource { +public: + FixedWriteBufferSource(Buffer::Instance& data, bool end_stream) + : data_(data), end_stream_(end_stream) {} + + StreamBuffer getWriteBuffer() override { return {data_, end_stream_}; } + +private: + Buffer::Instance& data_; + const bool end_stream_; +}; + +/** + * Connection enriched with methods for advanced cases, i.e. write data bypassing filter chain. + * + * Since FilterManager is only user of those methods for now, the class is named after it. + */ +class FilterManagerConnection : public virtual Connection, + public ReadBufferSource, + public WriteBufferSource { +public: + virtual ~FilterManagerConnection() {} + + /** + * Write data to the connection bypassing filter chain. + * + * I.e., consider a scenario where iteration over the filter chain is stopped at some point + * and later is resumed via a call to WriteFilterCallbacks::injectWriteDataToFilterChain(). + * + * @param data supplies the data to write to the connection. + * @param end_stream supplies whether this is the last byte to write on the connection. + */ + virtual void rawWrite(Buffer::Instance& data, bool end_stream) PURE; +}; + /** * This is a filter manager for TCP (L4) filters. It is split out for ease of testing. */ class FilterManagerImpl { public: - FilterManagerImpl(Connection& connection, BufferSource& buffer_source) - : connection_(connection), buffer_source_(buffer_source) {} + FilterManagerImpl(FilterManagerConnection& connection) : connection_(connection) {} void addWriteFilter(WriteFilterSharedPtr filter); void addFilter(FilterSharedPtr filter); @@ -54,7 +115,11 @@ class FilterManagerImpl { : parent_(parent), filter_(filter) {} Connection& connection() override { return parent_.connection_; } - void continueReading() override { parent_.onContinueReading(this); } + void continueReading() override { parent_.onContinueReading(this, parent_.connection_); } + void injectReadDataToFilterChain(Buffer::Instance& data, bool end_stream) override { + FixedReadBufferSource buffer_source{data, end_stream}; + parent_.onContinueReading(this, buffer_source); + } Upstream::HostDescriptionConstSharedPtr upstreamHost() override { return parent_.host_description_; } @@ -69,13 +134,31 @@ class FilterManagerImpl { typedef std::unique_ptr ActiveReadFilterPtr; - void onContinueReading(ActiveReadFilter* filter); + struct ActiveWriteFilter : public WriteFilterCallbacks, LinkedObject { + ActiveWriteFilter(FilterManagerImpl& parent, WriteFilterSharedPtr filter) + : parent_(parent), filter_(filter) {} + + Connection& connection() override { return parent_.connection_; } + void injectWriteDataToFilterChain(Buffer::Instance& data, bool end_stream) override { + FixedWriteBufferSource buffer_source{data, end_stream}; + parent_.onResumeWriting(this, buffer_source); + } + + FilterManagerImpl& parent_; + WriteFilterSharedPtr filter_; + }; + + typedef std::unique_ptr ActiveWriteFilterPtr; + + void onContinueReading(ActiveReadFilter* filter, ReadBufferSource& buffer_source); + + FilterStatus onWrite(ActiveWriteFilter* filter, WriteBufferSource& buffer_source); + void onResumeWriting(ActiveWriteFilter* filter, WriteBufferSource& buffer_source); - Connection& connection_; - BufferSource& buffer_source_; + FilterManagerConnection& connection_; Upstream::HostDescriptionConstSharedPtr host_description_; std::list upstream_filters_; - std::list downstream_filters_; + std::list downstream_filters_; }; } // namespace Network diff --git a/source/common/network/io_socket_error_impl.cc b/source/common/network/io_socket_error_impl.cc new file mode 100644 index 0000000000000..8f625e3c02a5e --- /dev/null +++ b/source/common/network/io_socket_error_impl.cc @@ -0,0 +1,42 @@ +#include "common/network/io_socket_error_impl.h" + +#include "common/common/assert.h" + +namespace Envoy { +namespace Network { + +Api::IoError::IoErrorCode IoSocketError::getErrorCode() const { + switch (errno_) { + case EAGAIN: + ASSERT(this == IoSocketError::getIoSocketEagainInstance(), + "Didn't use getIoSocketEagainInstance() to generate `Again`."); + return IoErrorCode::Again; + case ENOTSUP: + return IoErrorCode::NoSupport; + case EAFNOSUPPORT: + return IoErrorCode::AddressFamilyNoSupport; + case EINPROGRESS: + return IoErrorCode::InProgress; + case EPERM: + return IoErrorCode::Permission; + default: + return IoErrorCode::UnknownError; + } +} + +std::string IoSocketError::getErrorDetails() const { return ::strerror(errno_); } + +IoSocketError* IoSocketError::getIoSocketEagainInstance() { + static auto* instance = new IoSocketError(EAGAIN); + return instance; +} + +void IoSocketError::deleteIoError(Api::IoError* err) { + ASSERT(err != nullptr); + if (err->getErrorCode() != Api::IoError::IoErrorCode::Again) { + delete err; + } +} + +} // namespace Network +} // namespace Envoy diff --git a/source/common/network/io_socket_error_impl.h b/source/common/network/io_socket_error_impl.h new file mode 100644 index 0000000000000..a28e5c841eec0 --- /dev/null +++ b/source/common/network/io_socket_error_impl.h @@ -0,0 +1,32 @@ +#pragma once + +#include "envoy/api/io_error.h" + +#include "common/common/assert.h" + +namespace Envoy { +namespace Network { + +class IoSocketError : public Api::IoError { +public: + explicit IoSocketError(int sys_errno) : errno_(sys_errno) {} + + ~IoSocketError() override {} + + Api::IoError::IoErrorCode getErrorCode() const override; + std::string getErrorDetails() const override; + + // IoErrorCode::Again is used frequently. Define it to be a singleton to avoid frequent memory + // allocation of such instance. If this is used, IoHandleCallResult has to be instantiated with + // deleter deleteIoError() below to avoid deallocating memory for this error. + static IoSocketError* getIoSocketEagainInstance(); + + // Deallocate memory only if the error is not Again. + static void deleteIoError(Api::IoError* err); + +private: + int errno_; +}; + +} // namespace Network +} // namespace Envoy diff --git a/source/common/network/io_socket_handle_impl.cc b/source/common/network/io_socket_handle_impl.cc new file mode 100644 index 0000000000000..2265b5787ea77 --- /dev/null +++ b/source/common/network/io_socket_handle_impl.cc @@ -0,0 +1,89 @@ +#include "common/network/io_socket_handle_impl.h" + +#include + +#include + +#include "envoy/buffer/buffer.h" + +#include "common/api/os_sys_calls_impl.h" +#include "common/common/stack_array.h" +#include "common/network/io_socket_error_impl.h" + +using Envoy::Api::SysCallIntResult; +using Envoy::Api::SysCallSizeResult; + +namespace Envoy { +namespace Network { + +IoSocketHandleImpl::~IoSocketHandleImpl() { + if (fd_ != -1) { + IoSocketHandleImpl::close(); + } +} + +Api::IoCallUint64Result IoSocketHandleImpl::close() { + ASSERT(fd_ != -1); + const int rc = ::close(fd_); + fd_ = -1; + return Api::IoCallUint64Result(rc, Api::IoErrorPtr(nullptr, IoSocketError::deleteIoError)); +} + +bool IoSocketHandleImpl::isOpen() const { return fd_ != -1; } + +Api::IoCallUint64Result IoSocketHandleImpl::readv(uint64_t max_length, Buffer::RawSlice* slices, + uint64_t num_slice) { + STACK_ARRAY(iov, iovec, num_slice); + uint64_t num_slices_to_read = 0; + uint64_t num_bytes_to_read = 0; + for (; num_slices_to_read < num_slice && num_bytes_to_read < max_length; num_slices_to_read++) { + iov[num_slices_to_read].iov_base = slices[num_slices_to_read].mem_; + const size_t slice_length = std::min(slices[num_slices_to_read].len_, + static_cast(max_length - num_bytes_to_read)); + iov[num_slices_to_read].iov_len = slice_length; + num_bytes_to_read += slice_length; + } + ASSERT(num_bytes_to_read <= max_length); + auto& os_syscalls = Api::OsSysCallsSingleton::get(); + const Api::SysCallSizeResult result = + os_syscalls.readv(fd_, iov.begin(), static_cast(num_slices_to_read)); + return sysCallResultToIoCallResult(result); +} + +Api::IoCallUint64Result IoSocketHandleImpl::writev(const Buffer::RawSlice* slices, + uint64_t num_slice) { + STACK_ARRAY(iov, iovec, num_slice); + uint64_t num_slices_to_write = 0; + for (uint64_t i = 0; i < num_slice; i++) { + if (slices[i].mem_ != nullptr && slices[i].len_ != 0) { + iov[num_slices_to_write].iov_base = slices[i].mem_; + iov[num_slices_to_write].iov_len = slices[i].len_; + num_slices_to_write++; + } + } + if (num_slices_to_write == 0) { + return Api::ioCallUint64ResultNoError(); + } + auto& os_syscalls = Api::OsSysCallsSingleton::get(); + const Api::SysCallSizeResult result = os_syscalls.writev(fd_, iov.begin(), num_slices_to_write); + return sysCallResultToIoCallResult(result); +} + +Api::IoCallUint64Result +IoSocketHandleImpl::sysCallResultToIoCallResult(const Api::SysCallSizeResult& result) { + if (result.rc_ >= 0) { + // Return nullptr as IoError upon success. + return Api::IoCallUint64Result(result.rc_, + Api::IoErrorPtr(nullptr, IoSocketError::deleteIoError)); + } + return Api::IoCallUint64Result( + /*rc=*/0, + (result.errno_ == EAGAIN + // EAGAIN is frequent enough that its memory allocation should be avoided. + ? Api::IoErrorPtr(IoSocketError::getIoSocketEagainInstance(), + IoSocketError::deleteIoError) + : Api::IoErrorPtr(new IoSocketError(result.errno_), IoSocketError::deleteIoError))); +} + +} // namespace Network +} // namespace Envoy diff --git a/source/common/network/io_socket_handle_impl.h b/source/common/network/io_socket_handle_impl.h index d54399a393ea0..3ba413a009a25 100644 --- a/source/common/network/io_socket_handle_impl.h +++ b/source/common/network/io_socket_handle_impl.h @@ -1,34 +1,40 @@ #pragma once +#include "envoy/api/io_error.h" +#include "envoy/api/os_sys_calls.h" #include "envoy/network/io_handle.h" -#include "common/common/assert.h" - namespace Envoy { namespace Network { /** * IoHandle derivative for sockets */ -class IoSocketHandle : public IoHandle { +class IoSocketHandleImpl : public IoHandle { public: - IoSocketHandle(int fd = -1) : fd_(fd) {} + explicit IoSocketHandleImpl(int fd = -1) : fd_(fd) {} - // TODO(sbelair2) Call close() in destructor - ~IoSocketHandle() { ASSERT(fd_ == -1); } + // Close underlying socket if close() hasn't been call yet. + ~IoSocketHandleImpl() override; // TODO(sbelair2) To be removed when the fd is fully abstracted from clients. int fd() const override { return fd_; } - // Currently this close() is just for the IoHandle, and the close() system call - // happens elsewhere. In coming changes, the close() syscall will be made from the IoHandle. - // In particular, the close should also close the fd. - void close() override { fd_ = -1; } + Api::IoCallUint64Result close() override; + + bool isOpen() const override; + + Api::IoCallUint64Result readv(uint64_t max_length, Buffer::RawSlice* slices, + uint64_t num_slice) override; + + Api::IoCallUint64Result writev(const Buffer::RawSlice* slices, uint64_t num_slice) override; private: + // Converts a SysCallSizeResult to IoCallUint64Result. + Api::IoCallUint64Result sysCallResultToIoCallResult(const Api::SysCallSizeResult& result); + int fd_; }; -typedef std::unique_ptr IoSocketHandlePtr; } // namespace Network } // namespace Envoy diff --git a/source/common/network/lc_trie.h b/source/common/network/lc_trie.h index 51fc6921340a4..0eca74ea0f0f1 100644 --- a/source/common/network/lc_trie.h +++ b/source/common/network/lc_trie.h @@ -731,7 +731,7 @@ LcTrie::LcTrieInternal::getData(const IpType& ip_addres } // The path taken through the trie to match the ip_address may have contained skips, - // so it is necessary to check whether the the matched prefix really contains the + // so it is necessary to check whether the matched prefix really contains the // ip_address. const auto& prefix = ip_prefixes_[address]; if (prefix.contains(ip_address)) { diff --git a/source/common/network/listen_socket_impl.h b/source/common/network/listen_socket_impl.h index 86795687ec687..9e7403159c3c4 100644 --- a/source/common/network/listen_socket_impl.h +++ b/source/common/network/listen_socket_impl.h @@ -27,8 +27,7 @@ class SocketImpl : public virtual Socket { IoHandle& ioHandle() override { return *io_handle_; } const IoHandle& ioHandle() const override { return *io_handle_; } void close() override { - if (io_handle_->fd() != -1) { - ::close(io_handle_->fd()); + if (io_handle_->isOpen()) { io_handle_->close(); } } diff --git a/source/common/network/listener_impl.cc b/source/common/network/listener_impl.cc index 998b52c25c466..77b5978be4dde 100644 --- a/source/common/network/listener_impl.cc +++ b/source/common/network/listener_impl.cc @@ -21,8 +21,8 @@ void ListenerImpl::listenCallback(evconnlistener*, evutil_socket_t fd, sockaddr* int remote_addr_len, void* arg) { ListenerImpl* listener = static_cast(arg); - // Create the IoSocketHandle for the fd here. - IoHandlePtr io_handle = std::make_unique(fd); + // Create the IoSocketHandleImpl for the fd here. + IoHandlePtr io_handle = std::make_unique(fd); // Get the local address from the new socket if the listener is listening on IP ANY // (e.g., 0.0.0.0 for IPv4) (local_address_ is nullptr in this case). diff --git a/source/common/network/raw_buffer_socket.cc b/source/common/network/raw_buffer_socket.cc index 21dff87a8e0f5..7da6e9c6c12da 100644 --- a/source/common/network/raw_buffer_socket.cc +++ b/source/common/network/raw_buffer_socket.cc @@ -17,26 +17,28 @@ IoResult RawBufferSocket::doRead(Buffer::Instance& buffer) { bool end_stream = false; do { // 16K read is arbitrary. TODO(mattklein123) PERF: Tune the read size. - Api::SysCallIntResult result = buffer.read(callbacks_->ioHandle().fd(), 16384); - ENVOY_CONN_LOG(trace, "read returns: {}", callbacks_->connection(), result.rc_); + Api::IoCallUint64Result result = buffer.read(callbacks_->ioHandle(), 16384); - if (result.rc_ == 0) { - // Remote close. - end_stream = true; - break; - } else if (result.rc_ == -1) { - // Remote error (might be no data). - ENVOY_CONN_LOG(trace, "read error: {}", callbacks_->connection(), result.errno_); - if (result.errno_ != EAGAIN) { - action = PostIoAction::Close; + if (result.ok()) { + ENVOY_CONN_LOG(trace, "read returns: {}", callbacks_->connection(), result.rc_); + if (result.rc_ == 0) { + // Remote close. + end_stream = true; + break; } - break; - } else { bytes_read += result.rc_; if (callbacks_->shouldDrainReadBuffer()) { callbacks_->setReadBufferReady(); break; } + } else { + // Remote error (might be no data). + ENVOY_CONN_LOG(trace, "read error: {}", callbacks_->connection(), + result.err_->getErrorDetails()); + if (result.err_->getErrorCode() != Api::IoError::IoErrorCode::Again) { + action = PostIoAction::Close; + } + break; } } while (true); @@ -58,20 +60,20 @@ IoResult RawBufferSocket::doWrite(Buffer::Instance& buffer, bool end_stream) { action = PostIoAction::KeepOpen; break; } - Api::SysCallIntResult result = buffer.write(callbacks_->ioHandle().fd()); - ENVOY_CONN_LOG(trace, "write returns: {}", callbacks_->connection(), result.rc_); + Api::IoCallUint64Result result = buffer.write(callbacks_->ioHandle()); - if (result.rc_ == -1) { - ENVOY_CONN_LOG(trace, "write error: {} ({})", callbacks_->connection(), result.errno_, - strerror(result.errno_)); - if (result.errno_ == EAGAIN) { + if (result.ok()) { + ENVOY_CONN_LOG(trace, "write returns: {}", callbacks_->connection(), result.rc_); + bytes_written += result.rc_; + } else { + ENVOY_CONN_LOG(trace, "write error: {}", callbacks_->connection(), + result.err_->getErrorDetails()); + if (result.err_->getErrorCode() == Api::IoError::IoErrorCode::Again) { action = PostIoAction::KeepOpen; } else { action = PostIoAction::Close; } break; - } else { - bytes_written += result.rc_; } } while (true); @@ -79,6 +81,7 @@ IoResult RawBufferSocket::doWrite(Buffer::Instance& buffer, bool end_stream) { } std::string RawBufferSocket::protocol() const { return EMPTY_STRING; } +absl::string_view RawBufferSocket::failureReason() const { return EMPTY_STRING; } void RawBufferSocket::onConnected() { callbacks_->raiseEvent(ConnectionEvent::Connected); } diff --git a/source/common/network/raw_buffer_socket.h b/source/common/network/raw_buffer_socket.h index aeb48825e949f..5183ce18fbc9e 100644 --- a/source/common/network/raw_buffer_socket.h +++ b/source/common/network/raw_buffer_socket.h @@ -14,12 +14,13 @@ class RawBufferSocket : public TransportSocket, protected Logger::Loggablereserve(read_length, &slice, 1); ASSERT(num_slices == 1); - // TODO(conqerAtapple): Use os_syscalls - const ssize_t rc = ::recvfrom(socket_.ioHandle().fd(), slice.mem_, read_length, 0, - reinterpret_cast(&peer_addr), &addr_len); - if (rc < 0) { - return ReceiveResult{Api::SysCallIntResult{static_cast(rc), errno}, nullptr}; - } - slice.len_ = std::min(slice.len_, static_cast(rc)); + auto& os_sys_calls = Api::OsSysCallsSingleton::get(); + const Api::SysCallSizeResult result = + os_sys_calls.recvfrom(socket_.ioHandle().fd(), slice.mem_, read_length, 0, + reinterpret_cast(&peer_addr), &addr_len); + if (result.rc_ < 0) { + return ReceiveResult{Api::SysCallIntResult{static_cast(result.rc_), result.errno_}, + nullptr}; + } + slice.len_ = std::min(slice.len_, static_cast(result.rc_)); buffer->commit(&slice, 1); - return ReceiveResult{Api::SysCallIntResult{static_cast(rc), 0}, std::move(buffer)}; + return ReceiveResult{Api::SysCallIntResult{static_cast(result.rc_), 0}, std::move(buffer)}; } void UdpListenerImpl::onSocketEvent(short flags) { diff --git a/source/common/network/utility.cc b/source/common/network/utility.cc index b495ce628effc..e7f7039e9a405 100644 --- a/source/common/network/utility.cc +++ b/source/common/network/utility.cc @@ -150,7 +150,7 @@ Address::InstanceConstSharedPtr Utility::parseInternetAddressAndPort(const std:: const auto ip_str = ip_address.substr(1, pos - 1); const auto port_str = ip_address.substr(pos + 2); uint64_t port64 = 0; - if (port_str.empty() || !StringUtil::atoul(port_str.c_str(), port64, 10) || port64 > 65535) { + if (port_str.empty() || !absl::SimpleAtoi(port_str, &port64) || port64 > 65535) { throwWithMalformedIp(ip_address); } sockaddr_in6 sa6; @@ -170,7 +170,7 @@ Address::InstanceConstSharedPtr Utility::parseInternetAddressAndPort(const std:: const auto ip_str = ip_address.substr(0, pos); const auto port_str = ip_address.substr(pos + 1); uint64_t port64 = 0; - if (port_str.empty() || !StringUtil::atoul(port_str.c_str(), port64, 10) || port64 > 65535) { + if (port_str.empty() || !absl::SimpleAtoi(port_str, &port64) || port64 > 65535) { throwWithMalformedIp(ip_address); } sockaddr_in sa4; diff --git a/source/common/profiler/profiler.cc b/source/common/profiler/profiler.cc index e223cfc8aef56..74fb9478cba35 100644 --- a/source/common/profiler/profiler.cc +++ b/source/common/profiler/profiler.cc @@ -18,6 +18,26 @@ bool Cpu::startProfiler(const std::string& output_path) { void Cpu::stopProfiler() { ProfilerStop(); } +bool Heap::profilerEnabled() { + // determined by PROFILER_AVAILABLE + return true; +} + +bool Heap::isProfilerStarted() { return IsHeapProfilerRunning(); } +bool Heap::startProfiler(const std::string& output_file_name_prefix) { + HeapProfilerStart(output_file_name_prefix.c_str()); + return true; +} + +bool Heap::stopProfiler() { + if (!IsHeapProfilerRunning()) { + return false; + } + HeapProfilerDump("stop and dump"); + HeapProfilerStop(); + return true; +} + void Heap::forceLink() { // Currently this is here to force the inclusion of the heap profiler during static linking. // Without this call the heap profiler will not be included and cannot be started via env @@ -37,6 +57,10 @@ bool Cpu::profilerEnabled() { return false; } bool Cpu::startProfiler(const std::string&) { return false; } void Cpu::stopProfiler() {} +bool Heap::profilerEnabled() { return false; } +bool Heap::isProfilerStarted() { return false; } +bool Heap::startProfiler(const std::string&) { return false; } +bool Heap::stopProfiler() { return false; } } // namespace Profiler } // namespace Envoy diff --git a/source/common/profiler/profiler.h b/source/common/profiler/profiler.h index bd2e05fcf2719..d61ff851058cf 100644 --- a/source/common/profiler/profiler.h +++ b/source/common/profiler/profiler.h @@ -38,6 +38,29 @@ class Cpu { * Process wide heap profiling */ class Heap { +public: + /** + * @return whether the profiler is enabled in this build or not. + */ + static bool profilerEnabled(); + + /** + * @return whether the profiler is started or not + */ + static bool isProfilerStarted(); + + /** + * Start the profiler and write to the specified path. + * @return bool whether the call to start the profiler succeeded. + */ + static bool startProfiler(const std::string& output_path); + + /** + * Stop the profiler. + * @return bool whether the file is dumped + */ + static bool stopProfiler(); + private: static void forceLink(); }; diff --git a/source/common/protobuf/BUILD b/source/common/protobuf/BUILD index c07212310aa75..a563c0d95c965 100644 --- a/source/common/protobuf/BUILD +++ b/source/common/protobuf/BUILD @@ -41,6 +41,7 @@ envoy_cc_library( deps = [ ":protobuf", "//include/envoy/api:api_interface", + "//include/envoy/runtime:runtime_interface", "//source/common/common:assert_lib", "//source/common/common:hash_lib", "//source/common/common:utility_lib", diff --git a/source/common/protobuf/protobuf.h b/source/common/protobuf/protobuf.h index 2a1a1d7354744..fbeffd9dcb4b4 100644 --- a/source/common/protobuf/protobuf.h +++ b/source/common/protobuf/protobuf.h @@ -10,7 +10,8 @@ #include "google/protobuf/empty.pb.h" #include "google/protobuf/io/coded_stream.h" #include "google/protobuf/io/zero_copy_stream.h" -#include "google/protobuf/io/zero_copy_stream_impl_lite.h" +#include "google/protobuf/io/zero_copy_stream_impl.h" +#include "google/protobuf/map.h" #include "google/protobuf/message.h" #include "google/protobuf/repeated_field.h" #include "google/protobuf/service.h" @@ -39,14 +40,12 @@ namespace ProtobufUtil = google::protobuf::util; // namespace. namespace ProtobufWkt = google::protobuf; -// Alternative protobuf implementations might not use std::string as a string -// type. Below we provide wrappers to facilitate remapping of the type during -// import. +// Alternative protobuf implementations might not have the same basic types. +// Below we provide wrappers to facilitate remapping of the type during import. namespace ProtobufTypes { typedef std::unique_ptr MessagePtr; -typedef std::string String; typedef int64_t Int64; } // namespace ProtobufTypes diff --git a/source/common/protobuf/utility.cc b/source/common/protobuf/utility.cc index 404abb2e572c2..a1aa4036704b9 100644 --- a/source/common/protobuf/utility.cc +++ b/source/common/protobuf/utility.cc @@ -6,8 +6,36 @@ #include "common/protobuf/protobuf.h" #include "absl/strings/match.h" +#include "yaml-cpp/yaml.h" namespace Envoy { +namespace { + +absl::string_view filenameFromPath(absl::string_view full_path) { + size_t index = full_path.rfind("/"); + if (index == std::string::npos || index == full_path.size()) { + return full_path; + } + return full_path.substr(index + 1, full_path.size()); +} + +void blockFormat(YAML::Node node) { + node.SetStyle(YAML::EmitterStyle::Block); + + if (node.Type() == YAML::NodeType::Sequence) { + for (auto it : node) { + blockFormat(it); + } + } + if (node.Type() == YAML::NodeType::Map) { + for (auto it : node) { + blockFormat(it.second); + } + } +} + +} // namespace + namespace ProtobufPercentHelper { uint64_t checkAndReturnDefault(uint64_t default_value, uint64_t max_value) { @@ -21,6 +49,11 @@ uint64_t convertPercent(double percent, uint64_t max_value) { return max_value * (percent / 100.0); } +bool evaluateFractionalPercent(envoy::type::FractionalPercent percent, uint64_t random_value) { + return random_value % fractionalPercentDenominatorToInt(percent.denominator()) < + percent.numerator(); +} + uint64_t fractionalPercentDenominatorToInt( const envoy::type::FractionalPercent::DenominatorType& denominator) { switch (denominator) { @@ -62,6 +95,7 @@ void MessageUtil::loadFromJsonEx(const std::string& json, Protobuf::Message& mes if (proto_unknown_fields == ProtoUnknownFieldsMode::Allow) { options.ignore_unknown_fields = true; } + options.case_insensitive_enum_parsing = true; const auto status = Protobuf::util::JsonStringToMessage(json, &message, options); if (!status.ok()) { throw EnvoyException("Unable to parse JSON as proto (" + status.ToString() + "): " + json); @@ -82,7 +116,7 @@ void MessageUtil::loadFromYaml(const std::string& yaml, Protobuf::Message& messa void MessageUtil::loadFromFile(const std::string& path, Protobuf::Message& message, Api::Api& api) { const std::string contents = api.fileSystem().fileReadToEnd(path); // If the filename ends with .pb, attempt to parse it as a binary proto. - if (absl::EndsWith(path, ".pb")) { + if (absl::EndsWith(path, FileExtensions::get().ProtoBinary)) { // Attempt to parse the binary format. if (message.ParseFromString(contents)) { MessageUtil::checkUnknownFields(message); @@ -92,21 +126,21 @@ void MessageUtil::loadFromFile(const std::string& path, Protobuf::Message& messa message.GetTypeName() + ")"); } // If the filename ends with .pb_text, attempt to parse it as a text proto. - if (absl::EndsWith(path, ".pb_text")) { + if (absl::EndsWith(path, FileExtensions::get().ProtoText)) { if (Protobuf::TextFormat::ParseFromString(contents, &message)) { return; } throw EnvoyException("Unable to parse file \"" + path + "\" as a text protobuf (type " + message.GetTypeName() + ")"); } - if (absl::EndsWith(path, ".yaml")) { + if (absl::EndsWith(path, FileExtensions::get().Yaml)) { loadFromYaml(contents, message); } else { loadFromJson(contents, message); } } -void MessageUtil::checkForDeprecation(const Protobuf::Message& message, bool warn_only) { +void MessageUtil::checkForDeprecation(const Protobuf::Message& message, Runtime::Loader* runtime) { const Protobuf::Descriptor* descriptor = message.GetDescriptor(); const Protobuf::Reflection* reflection = message.GetReflection(); for (int i = 0; i < descriptor->field_count(); ++i) { @@ -118,17 +152,33 @@ void MessageUtil::checkForDeprecation(const Protobuf::Message& message, bool war continue; } + bool warn_only = true; + absl::string_view filename = filenameFromPath(field->file()->name()); + // Allow runtime to be null both to not crash if this is called before server initialization, + // and so proto validation works in context where runtime singleton is not set up (e.g. + // standalone config validation utilities) + if (runtime && field->options().deprecated() && + !runtime->snapshot().deprecatedFeatureEnabled( + absl::StrCat("envoy.deprecated_features.", filename, ":", field->name()))) { + warn_only = false; + } + // If this field is deprecated, warn or throw an error. if (field->options().deprecated()) { std::string err = fmt::format( - "Using deprecated option '{}'. This configuration will be removed from Envoy soon. " - "Please see https://github.com/envoyproxy/envoy/blob/master/DEPRECATED.md for " - "details.", - field->full_name()); + "Using deprecated option '{}' from file {}. This configuration will be removed from " + "Envoy soon. Please see https://www.envoyproxy.io/docs/envoy/latest/intro/deprecated " + "for details.", + field->full_name(), filename); if (warn_only) { ENVOY_LOG_MISC(warn, "{}", err); } else { - throw ProtoValidationException(err, message); + const char fatal_error[] = + " If continued use of this field is absolutely necessary, see " + "https://www.envoyproxy.io/docs/envoy/latest/configuration/runtime" + "#using-runtime-overrides-for-deprecated-features for how to apply a temporary and" + "highly discouraged override."; + throw ProtoValidationException(err + fatal_error, message); } } @@ -137,15 +187,28 @@ void MessageUtil::checkForDeprecation(const Protobuf::Message& message, bool war if (field->is_repeated()) { const int size = reflection->FieldSize(message, field); for (int j = 0; j < size; ++j) { - checkForDeprecation(reflection->GetRepeatedMessage(message, field, j), warn_only); + checkForDeprecation(reflection->GetRepeatedMessage(message, field, j), runtime); } } else { - checkForDeprecation(reflection->GetMessage(message, field), warn_only); + checkForDeprecation(reflection->GetMessage(message, field), runtime); } } } } +std::string MessageUtil::getYamlStringFromMessage(const Protobuf::Message& message, + const bool block_print, + const bool always_print_primitive_fields) { + std::string json = getJsonStringFromMessage(message, false, always_print_primitive_fields); + auto node = YAML::Load(json); + if (block_print) { + blockFormat(node); + } + YAML::Emitter out; + out << node; + return out.c_str(); +} + std::string MessageUtil::getJsonStringFromMessage(const Protobuf::Message& message, const bool pretty_print, const bool always_print_primitive_fields) { @@ -162,7 +225,7 @@ std::string MessageUtil::getJsonStringFromMessage(const Protobuf::Message& messa if (always_print_primitive_fields) { json_options.always_print_primitive_fields = true; } - ProtobufTypes::String json; + std::string json; const auto status = Protobuf::util::MessageToJsonString(message, &json, json_options); // This should always succeed unless something crash-worthy such as out-of-memory. RELEASE_ASSERT(status.ok(), ""); @@ -173,7 +236,7 @@ void MessageUtil::jsonConvert(const Protobuf::Message& source, Protobuf::Message // TODO(htuch): Consolidate with the inflight cleanups here. Protobuf::util::JsonPrintOptions json_options; json_options.preserve_proto_field_names = true; - ProtobufTypes::String json; + std::string json; const auto status = Protobuf::util::MessageToJsonString(source, &json, json_options); if (!status.ok()) { throw EnvoyException(fmt::format("Unable to convert protobuf message to JSON string: {} {}", @@ -190,6 +253,49 @@ ProtobufWkt::Struct MessageUtil::keyValueStruct(const std::string& key, const st return struct_obj; } +// TODO(alyssawilk) see if we can get proto's CodeEnumToString made accessible +// to avoid copying it. Otherwise change this to absl::string_view. +std::string MessageUtil::CodeEnumToString(ProtobufUtil::error::Code code) { + switch (code) { + case ProtobufUtil::error::OK: + return "OK"; + case ProtobufUtil::error::CANCELLED: + return "CANCELLED"; + case ProtobufUtil::error::UNKNOWN: + return "UNKNOWN"; + case ProtobufUtil::error::INVALID_ARGUMENT: + return "INVALID_ARGUMENT"; + case ProtobufUtil::error::DEADLINE_EXCEEDED: + return "DEADLINE_EXCEEDED"; + case ProtobufUtil::error::NOT_FOUND: + return "NOT_FOUND"; + case ProtobufUtil::error::ALREADY_EXISTS: + return "ALREADY_EXISTS"; + case ProtobufUtil::error::PERMISSION_DENIED: + return "PERMISSION_DENIED"; + case ProtobufUtil::error::UNAUTHENTICATED: + return "UNAUTHENTICATED"; + case ProtobufUtil::error::RESOURCE_EXHAUSTED: + return "RESOURCE_EXHAUSTED"; + case ProtobufUtil::error::FAILED_PRECONDITION: + return "FAILED_PRECONDITION"; + case ProtobufUtil::error::ABORTED: + return "ABORTED"; + case ProtobufUtil::error::OUT_OF_RANGE: + return "OUT_OF_RANGE"; + case ProtobufUtil::error::UNIMPLEMENTED: + return "UNIMPLEMENTED"; + case ProtobufUtil::error::INTERNAL: + return "INTERNAL"; + case ProtobufUtil::error::UNAVAILABLE: + return "UNAVAILABLE"; + case ProtobufUtil::error::DATA_LOSS: + return "DATA_LOSS"; + default: + return ""; + } +} + bool ValueUtil::equal(const ProtobufWkt::Value& v1, const ProtobufWkt::Value& v2) { ProtobufWkt::Value::KindCase kind = v1.kind_case(); if (kind != v2.kind_case()) { diff --git a/source/common/protobuf/utility.h b/source/common/protobuf/utility.h index 7496f4aa69edd..4225048cbe6a7 100644 --- a/source/common/protobuf/utility.h +++ b/source/common/protobuf/utility.h @@ -5,12 +5,14 @@ #include "envoy/api/api.h" #include "envoy/common/exception.h" #include "envoy/json/json_object.h" +#include "envoy/runtime/runtime.h" #include "envoy/type/percent.pb.h" #include "common/common/hash.h" #include "common/common/utility.h" #include "common/json/json_loader.h" #include "common/protobuf/protobuf.h" +#include "common/singleton/const_singleton.h" // Obtain the value of a wrapped field (e.g. google.protobuf.UInt32Value) if set. Otherwise, return // the default value. @@ -57,6 +59,15 @@ namespace ProtobufPercentHelper { uint64_t checkAndReturnDefault(uint64_t default_value, uint64_t max_value); uint64_t convertPercent(double percent, uint64_t max_value); +/** + * Given a fractional percent chance of a given event occurring, evaluate to a yes/no decision + * based on a provided random value. + * @param percent the chance of a given event happening. + * @param random_value supplies a numerical value to use to evaluate the event. + * @return bool decision about whether the event should occur. + */ +bool evaluateFractionalPercent(envoy::type::FractionalPercent percent, uint64_t random_value); + /** * Convert a fractional percent denominator enum into an integer. * @param denominator supplies denominator to convert. @@ -93,7 +104,7 @@ class MissingFieldException : public EnvoyException { class RepeatedPtrUtil { public: - static std::string join(const Protobuf::RepeatedPtrField& source, + static std::string join(const Protobuf::RepeatedPtrField& source, const std::string& delimiter) { return StringUtil::join(std::vector(source.begin(), source.end()), delimiter); } @@ -115,7 +126,7 @@ class RepeatedPtrUtil { static std::size_t hash(const Protobuf::RepeatedPtrField& source) { // Use Protobuf::io::CodedOutputStream to force deterministic serialization, so that the same // message doesn't hash to different values. - ProtobufTypes::String text; + std::string text; { // For memory safety, the StringOutputStream needs to be destroyed before // we read the string. @@ -147,10 +158,21 @@ class MessageUtil { return Protobuf::util::MessageDifferencer::Equivalent(lhs, rhs); } + class FileExtensionValues { + public: + const std::string ProtoBinary = ".pb"; + const std::string ProtoBinaryLengthDelimited = ".pb_length_delimited"; + const std::string ProtoText = ".pb_text"; + const std::string Json = ".json"; + const std::string Yaml = ".yaml"; + }; + + typedef ConstSingleton FileExtensions; + static std::size_t hash(const Protobuf::Message& message) { // Use Protobuf::io::CodedOutputStream to force deterministic serialization, so that the same // message doesn't hash to different values. - ProtobufTypes::String text; + std::string text; { // For memory safety, the StringOutputStream needs to be destroyed before // we read the string. @@ -181,11 +203,13 @@ class MessageUtil { /** * Checks for use of deprecated fields in message and all sub-messages. * @param message message to validate. - * @param warn_only if true, logs a warning rather than throwing an exception if deprecated fields - * are in use. - * @throw ProtoValidationException if deprecated fields are used and warn_only is false. + * @param loader optional a pointer to the runtime loader for live deprecation status. + * @throw ProtoValidationException if deprecated fields are used and listed + * in disallowed_features in runtime_features.h */ - static void checkForDeprecation(const Protobuf::Message& message, bool warn_only); + static void + checkForDeprecation(const Protobuf::Message& message, + Runtime::Loader* loader = Runtime::LoaderSingleton::getExisting()); /** * Validate protoc-gen-validate constraints on a given protobuf. @@ -195,8 +219,8 @@ class MessageUtil { * @throw ProtoValidationException if the message does not satisfy its type constraints. */ template static void validate(const MessageType& message) { - // Log warnings if deprecated fields are in use. - checkForDeprecation(message, true); + // Log warnings or throw errors if deprecated fields are in use. + checkForDeprecation(message); std::string err; if (!Validate(message, &err)) { @@ -256,6 +280,18 @@ class MessageUtil { */ static void jsonConvert(const Protobuf::Message& source, Protobuf::Message& dest); + /** + * Extract YAML as string from a google.protobuf.Message. + * @param message message of type type.googleapis.com/google.protobuf.Message. + * @param block_print whether the returned JSON should be in block style rather than flow style. + * @param always_print_primitive_fields whether to include primitive fields set to their default + * values, e.g. an int32 set to 0 or a bool set to false. + * @return std::string of formatted YAML object. + */ + static std::string getYamlStringFromMessage(const Protobuf::Message& message, + const bool block_print = true, + const bool always_print_primitive_fields = false); + /** * Extract JSON as string from a google.protobuf.Message. * @param message message of type type.googleapis.com/google.protobuf.Message. @@ -284,6 +320,13 @@ class MessageUtil { * @param value the string value to associate with the key */ static ProtobufWkt::Struct keyValueStruct(const std::string& key, const std::string& value); + + /** + * Utility method to print a human readable string of the code passed in. + * + * @param code the protobuf error code + */ + static std::string CodeEnumToString(ProtobufUtil::error::Code code); }; class ValueUtil { diff --git a/source/common/router/BUILD b/source/common/router/BUILD index b861e6eeb3037..6f66df20eac95 100644 --- a/source/common/router/BUILD +++ b/source/common/router/BUILD @@ -66,6 +66,46 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "route_config_update_impl_lib", + srcs = ["route_config_update_receiver_impl.cc"], + hdrs = ["route_config_update_receiver_impl.h"], + deps = [ + ":config_lib", + "//include/envoy/router:rds_interface", + "//include/envoy/router:route_config_update_info_interface", + "//source/common/common:assert_lib", + "//source/common/common:minimal_logger_lib", + "//source/common/protobuf:utility_lib", + ], +) + +envoy_cc_library( + name = "vhds_lib", + srcs = ["vhds.cc"], + hdrs = ["vhds.h"], + deps = [ + ":config_lib", + "//include/envoy/config:subscription_interface", + "//include/envoy/http:codes_interface", + "//include/envoy/local_info:local_info_interface", + "//include/envoy/router:rds_interface", + "//include/envoy/router:route_config_provider_manager_interface", + "//include/envoy/router:route_config_update_info_interface", + "//include/envoy/singleton:instance_interface", + "//include/envoy/thread_local:thread_local_interface", + "//source/common/common:assert_lib", + "//source/common/common:minimal_logger_lib", + "//source/common/config:rds_json_lib", + "//source/common/config:subscription_factory_lib", + "//source/common/config:utility_lib", + "//source/common/init:target_lib", + "//source/common/protobuf:utility_lib", + "//source/common/router:route_config_update_impl_lib", + "@envoy_api//envoy/config/filter/network/http_connection_manager/v2:http_connection_manager_cc", + ], +) + envoy_cc_library( name = "rds_lib", srcs = ["rds_impl.cc"], @@ -74,10 +114,10 @@ envoy_cc_library( ":config_lib", "//include/envoy/config:subscription_interface", "//include/envoy/http:codes_interface", - "//include/envoy/init:init_interface", "//include/envoy/local_info:local_info_interface", "//include/envoy/router:rds_interface", "//include/envoy/router:route_config_provider_manager_interface", + "//include/envoy/router:route_config_update_info_interface", "//include/envoy/server:admin_interface", "//include/envoy/singleton:instance_interface", "//include/envoy/thread_local:thread_local_interface", @@ -86,7 +126,10 @@ envoy_cc_library( "//source/common/config:rds_json_lib", "//source/common/config:subscription_factory_lib", "//source/common/config:utility_lib", + "//source/common/init:target_lib", "//source/common/protobuf:utility_lib", + "//source/common/router:route_config_update_impl_lib", + "//source/common/router:vhds_lib", "@envoy_api//envoy/admin/v2alpha:config_dump_cc", "@envoy_api//envoy/api/v2:rds_cc", "@envoy_api//envoy/config/filter/network/http_connection_manager/v2:http_connection_manager_cc", @@ -145,6 +188,7 @@ envoy_cc_library( "//source/common/common:enum_to_int", "//source/common/common:hash_lib", "//source/common/common:hex_lib", + "//source/common/common:linked_object", "//source/common/common:minimal_logger_lib", "//source/common/common:utility_lib", "//source/common/grpc:common_lib", diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index 834ca0f6c4b2d..c8ea73000bf34 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -57,6 +57,14 @@ std::string SslRedirector::newPath(const Http::HeaderMap& headers) const { return Http::Utility::createSslRedirectPath(headers); } +HedgePolicyImpl::HedgePolicyImpl(const envoy::api::v2::route::HedgePolicy& hedge_policy) + : initial_requests_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(hedge_policy, initial_requests, 1)), + additional_request_chance_(hedge_policy.additional_request_chance()), + hedge_on_per_try_timeout_(hedge_policy.hedge_on_per_try_timeout()) {} + +HedgePolicyImpl::HedgePolicyImpl() + : initial_requests_(1), additional_request_chance_({}), hedge_on_per_try_timeout_(false) {} + RetryPolicyImpl::RetryPolicyImpl(const envoy::api::v2::route::RetryPolicy& retry_policy) { per_try_timeout_ = std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(retry_policy, per_try_timeout, 0)); @@ -88,6 +96,27 @@ RetryPolicyImpl::RetryPolicyImpl(const envoy::api::v2::route::RetryPolicy& retry for (auto code : retry_policy.retriable_status_codes()) { retriable_status_codes_.emplace_back(code); } + + if (retry_policy.has_retry_back_off()) { + base_interval_ = std::chrono::milliseconds( + PROTOBUF_GET_MS_REQUIRED(retry_policy.retry_back_off(), base_interval)); + if ((*base_interval_).count() < 1) { + base_interval_ = std::chrono::milliseconds(1); + } + + max_interval_ = PROTOBUF_GET_OPTIONAL_MS(retry_policy.retry_back_off(), max_interval); + if (max_interval_) { + // Apply the same rounding to max interval in case both are set to sub-millisecond values. + if ((*max_interval_).count() < 1) { + max_interval_ = std::chrono::milliseconds(1); + } + + if ((*max_interval_).count() < (*base_interval_).count()) { + throw EnvoyException( + "retry_policy.max_interval must greater than or equal to the base_interval"); + } + } + } } std::vector RetryPolicyImpl::retryHostPredicates() const { @@ -170,7 +199,7 @@ class HeaderHashMethod : public HashMethodImplBase { const Http::HeaderEntry* header = headers.get(header_name_); if (header) { - hash = HashUtil::xxHash64(header->value().c_str()); + hash = HashUtil::xxHash64(header->value().getStringView()); } return hash; } @@ -308,6 +337,7 @@ RouteEntryImplBase::RouteEntryImplBase(const VirtualHostImpl& vhost, timeout_(PROTOBUF_GET_MS_OR_DEFAULT(route.route(), timeout, DEFAULT_ROUTE_TIMEOUT_MS)), idle_timeout_(PROTOBUF_GET_OPTIONAL_MS(route.route(), idle_timeout)), max_grpc_timeout_(PROTOBUF_GET_OPTIONAL_MS(route.route(), max_grpc_timeout)), + grpc_timeout_offset_(PROTOBUF_GET_OPTIONAL_MS(route.route(), grpc_timeout_offset)), loader_(factory_context.runtime()), runtime_(loadRuntimeData(route.match())), scheme_redirect_(route.redirect().scheme_redirect()), host_redirect_(route.redirect().host_redirect()), @@ -318,15 +348,12 @@ RouteEntryImplBase::RouteEntryImplBase(const VirtualHostImpl& vhost, https_redirect_(route.redirect().https_redirect()), prefix_rewrite_redirect_(route.redirect().prefix_rewrite()), strip_query_(route.redirect().strip_query()), + hedge_policy_(buildHedgePolicy(vhost.hedgePolicy(), route.route())), retry_policy_(buildRetryPolicy(vhost.retryPolicy(), route.route())), rate_limit_policy_(route.route().rate_limits()), shadow_policy_(route.route()), priority_(ConfigUtility::parsePriority(route.route().priority())), total_cluster_weight_( PROTOBUF_GET_WRAPPED_OR_DEFAULT(route.route().weighted_clusters(), total_weight, 100UL)), - route_action_request_headers_parser_( - HeaderParser::configure(route.route().request_headers_to_add())), - route_action_response_headers_parser_(HeaderParser::configure( - route.route().response_headers_to_add(), route.route().response_headers_to_remove())), request_headers_parser_(HeaderParser::configure(route.request_headers_to_add(), route.request_headers_to_remove())), response_headers_parser_(HeaderParser::configure(route.response_headers_to_add(), @@ -338,7 +365,7 @@ RouteEntryImplBase::RouteEntryImplBase(const VirtualHostImpl& vhost, direct_response_body_(ConfigUtility::parseDirectResponseBody(route, factory_context.api())), per_filter_configs_(route.typed_per_filter_config(), route.per_filter_config(), factory_context), - time_system_(factory_context.dispatcher().timeSystem()), + time_source_(factory_context.dispatcher().timeSource()), internal_redirect_action_(convertInternalRedirectAction(route.route())) { if (route.route().has_metadata_match()) { const auto filter_it = route.route().metadata_match().filter_metadata().find( @@ -431,7 +458,7 @@ bool RouteEntryImplBase::matchRoute(const Http::HeaderMap& headers, uint64_t ran matches &= Http::HeaderUtility::matchHeaders(headers, config_headers_); if (!config_query_parameters_.empty()) { Http::Utility::QueryParams query_parameters = - Http::Utility::parseQueryString(headers.Path()->value().c_str()); + Http::Utility::parseQueryString(headers.Path()->value().getStringView()); matches &= ConfigUtility::matchQueryParams(query_parameters, config_query_parameters_); } @@ -443,10 +470,8 @@ const std::string& RouteEntryImplBase::clusterName() const { return cluster_name void RouteEntryImplBase::finalizeRequestHeaders(Http::HeaderMap& headers, const StreamInfo::StreamInfo& stream_info, bool insert_envoy_original_path) const { - // Append user-specified request headers in the following order: route-action-level headers, - // route-level headers, virtual host level headers and finally global connection manager level - // headers. - route_action_request_headers_parser_->evaluateHeaders(headers, stream_info); + // Append user-specified request headers in the following order: route-level headers, virtual + // host level headers and finally global connection manager level headers. request_headers_parser_->evaluateHeaders(headers, stream_info); vhost_.requestHeaderParser().evaluateHeaders(headers, stream_info); vhost_.globalRouteConfig().requestHeaderParser().evaluateHeaders(headers, stream_info); @@ -462,10 +487,8 @@ void RouteEntryImplBase::finalizeRequestHeaders(Http::HeaderMap& headers, void RouteEntryImplBase::finalizeResponseHeaders(Http::HeaderMap& headers, const StreamInfo::StreamInfo& stream_info) const { - // Append user-specified response headers in the following order: route-action-level headers, - // route-level headers, virtual host level headers and finally global connection manager level - // headers. - route_action_response_headers_parser_->evaluateHeaders(headers, stream_info); + // Append user-specified response headers in the following order: route-level headers, virtual + // host level headers and finally global connection manager level headers. response_headers_parser_->evaluateHeaders(headers, stream_info); vhost_.responseHeaderParser().evaluateHeaders(headers, stream_info); vhost_.globalRouteConfig().responseHeaderParser().evaluateHeaders(headers, stream_info); @@ -493,7 +516,7 @@ void RouteEntryImplBase::finalizePathHeader(Http::HeaderMap& headers, return; } - std::string path = std::string(headers.Path()->value().c_str(), headers.Path()->value().size()); + std::string path(headers.Path()->value().getStringView()); if (insert_envoy_original_path) { headers.insertEnvoyOriginalPath().value(*headers.Path()); } @@ -541,7 +564,7 @@ absl::string_view RouteEntryImplBase::processRequestHost(const Http::HeaderMap& std::string RouteEntryImplBase::newPath(const Http::HeaderMap& headers) const { ASSERT(isDirectResponse()); - const char* final_scheme; + absl::string_view final_scheme; absl::string_view final_host; absl::string_view final_port; absl::string_view final_path; @@ -549,10 +572,10 @@ std::string RouteEntryImplBase::newPath(const Http::HeaderMap& headers) const { if (!scheme_redirect_.empty()) { final_scheme = scheme_redirect_.c_str(); } else if (https_redirect_) { - final_scheme = Http::Headers::get().SchemeValues.Https.c_str(); + final_scheme = Http::Headers::get().SchemeValues.Https; } else { ASSERT(headers.ForwardedProto()); - final_scheme = headers.ForwardedProto()->value().c_str(); + final_scheme = headers.ForwardedProto()->value().getStringView(); } if (!port_redirect_.empty()) { @@ -572,7 +595,7 @@ std::string RouteEntryImplBase::newPath(const Http::HeaderMap& headers) const { final_path = path_redirect_.c_str(); } else { ASSERT(headers.Path()); - final_path = absl::string_view(headers.Path()->value().c_str(), headers.Path()->value().size()); + final_path = headers.Path()->value().getStringView(); if (strip_query_) { size_t path_end = final_path.find("?"); if (path_end != absl::string_view::npos) { @@ -602,6 +625,23 @@ RouteEntryImplBase::parseOpaqueConfig(const envoy::api::v2::route::Route& route) return ret; } +HedgePolicyImpl RouteEntryImplBase::buildHedgePolicy( + const absl::optional& vhost_hedge_policy, + const envoy::api::v2::route::RouteAction& route_config) const { + // Route specific policy wins, if available. + if (route_config.has_hedge_policy()) { + return HedgePolicyImpl(route_config.hedge_policy()); + } + + // If not, we fall back to the virtual host policy if there is one. + if (vhost_hedge_policy) { + return HedgePolicyImpl(vhost_hedge_policy.value()); + } + + // Otherwise, an empty policy will do. + return HedgePolicyImpl(); +} + RetryPolicyImpl RouteEntryImplBase::buildRetryPolicy( const absl::optional& vhost_retry_policy, const envoy::api::v2::route::RouteAction& route_config) const { @@ -659,7 +699,7 @@ RouteConstSharedPtr RouteEntryImplBase::clusterEntry(const Http::HeaderMap& head const Http::HeaderEntry* entry = headers.get(cluster_header_name_); std::string final_cluster_name; if (entry) { - final_cluster_name = entry->value().c_str(); + final_cluster_name = std::string(entry->value().getStringView()); } // NOTE: Though we return a shared_ptr here, the current ownership model assumes that @@ -772,17 +812,17 @@ RouteConstSharedPtr PathRouteEntryImpl::matches(const Http::HeaderMap& headers, uint64_t random_value) const { if (RouteEntryImplBase::matchRoute(headers, random_value)) { const Http::HeaderString& path = headers.Path()->value(); - const char* query_string_start = Http::Utility::findQueryStringStart(path); + absl::string_view query_string = Http::Utility::findQueryStringStart(path); size_t compare_length = path.size(); - if (query_string_start != nullptr) { - compare_length = query_string_start - path.c_str(); + if (query_string.length() > 0) { + compare_length = compare_length - query_string.length(); } if (compare_length != path_.size()) { return nullptr; } - absl::string_view path_section(path.c_str(), compare_length); + const absl::string_view path_section = path.getStringView().substr(0, compare_length); if (case_sensitive_) { if (absl::string_view(path_) == path_section) { return clusterEntry(headers, random_value); @@ -806,11 +846,14 @@ RegexRouteEntryImpl::RegexRouteEntryImpl(const VirtualHostImpl& vhost, void RegexRouteEntryImpl::rewritePathHeader(Http::HeaderMap& headers, bool insert_envoy_original_path) const { const Http::HeaderString& path = headers.Path()->value(); - const char* query_string_start = Http::Utility::findQueryStringStart(path); + const absl::string_view query_string = Http::Utility::findQueryStringStart(path); + const size_t path_string_length = path.size() - query_string.length(); // TODO(yuval-k): This ASSERT can happen if the path was changed by a filter without clearing the // route cache. We should consider if ASSERT-ing is the desired behavior in this case. - ASSERT(std::regex_match(path.c_str(), query_string_start, regex_)); - std::string matched_path(path.c_str(), query_string_start); + + const absl::string_view path_view = path.getStringView(); + ASSERT(std::regex_match(path_view.begin(), path_view.begin() + path_string_length, regex_)); + const std::string matched_path(path_view.begin(), path_view.begin() + path_string_length); finalizePathHeader(headers, matched_path, insert_envoy_original_path); } @@ -819,8 +862,10 @@ RouteConstSharedPtr RegexRouteEntryImpl::matches(const Http::HeaderMap& headers, uint64_t random_value) const { if (RouteEntryImplBase::matchRoute(headers, random_value)) { const Http::HeaderString& path = headers.Path()->value(); - const char* query_string_start = Http::Utility::findQueryStringStart(path); - if (std::regex_match(path.c_str(), query_string_start, regex_)) { + const absl::string_view query_string = Http::Utility::findQueryStringStart(path); + if (std::regex_match(path.getStringView().begin(), + path.getStringView().begin() + (path.size() - query_string.length()), + regex_)) { return clusterEntry(headers, random_value); } } @@ -854,10 +899,13 @@ VirtualHostImpl::VirtualHostImpl(const envoy::api::v2::route::VirtualHost& virtu NOT_REACHED_GCOVR_EXCL_LINE; } - // Retry Policy must be set before routes, since they may use it. + // Retry and Hedge policies must be set before routes, since they may use them. if (virtual_host.has_retry_policy()) { retry_policy_ = virtual_host.retry_policy(); } + if (virtual_host.has_hedge_policy()) { + hedge_policy_ = virtual_host.hedge_policy(); + } for (const auto& route : virtual_host.routes()) { const bool has_prefix = @@ -912,19 +960,21 @@ const RouteSpecificFilterConfig* VirtualHostImpl::perFilterConfig(const std::str return per_filter_configs_.get(name); } -const VirtualHostImpl* RouteMatcher::findWildcardVirtualHost(const std::string& host) const { - // We do a longest wildcard suffix match against the host that's passed in. - // (e.g. foo-bar.baz.com should match *-bar.baz.com before matching *.baz.com) - // This is done by scanning the length => wildcards map looking for every - // wildcard whose size is < length. - for (const auto& iter : wildcard_virtual_host_suffixes_) { +const VirtualHostImpl* RouteMatcher::findWildcardVirtualHost( + const std::string& host, const RouteMatcher::WildcardVirtualHosts& wildcard_virtual_hosts, + RouteMatcher::SubstringFunction substring_function) const { + // We do a longest wildcard match against the host that's passed in + // (e.g. foo-bar.baz.com should match *-bar.baz.com before matching *.baz.com for suffix + // wildcards). This is done by scanning the length => wildcards map looking for every wildcard + // whose size is < length. + for (const auto& iter : wildcard_virtual_hosts) { const uint32_t wildcard_length = iter.first; const auto& wildcard_map = iter.second; // >= because *.foo.com shouldn't match .foo.com. if (wildcard_length >= host.size()) { continue; } - const auto& match = wildcard_map.find(host.substr(host.size() - wildcard_length)); + const auto& match = wildcard_map.find(substring_function(host, wildcard_length)); if (match != wildcard_map.end()) { return match->second.get(); } @@ -941,20 +991,26 @@ RouteMatcher::RouteMatcher(const envoy::api::v2::RouteConfiguration& route_confi factory_context, validate_clusters)); for (const std::string& domain_name : virtual_host_config.domains()) { const std::string domain = Http::LowerCaseString(domain_name).get(); + bool duplicate_found = false; if ("*" == domain) { if (default_virtual_host_) { throw EnvoyException(fmt::format("Only a single wildcard domain is permitted")); } default_virtual_host_ = virtual_host; - } else if (domain.size() > 0 && '*' == domain[0]) { - wildcard_virtual_host_suffixes_[domain.size() - 1].emplace(domain.substr(1), virtual_host); + } else if (!domain.empty() && '*' == domain[0]) { + duplicate_found = !wildcard_virtual_host_suffixes_[domain.size() - 1] + .emplace(domain.substr(1), virtual_host) + .second; + } else if (!domain.empty() && '*' == domain[domain.size() - 1]) { + duplicate_found = !wildcard_virtual_host_prefixes_[domain.size() - 1] + .emplace(domain.substr(0, domain.size() - 1), virtual_host) + .second; } else { - if (virtual_hosts_.find(domain) != virtual_hosts_.end()) { - throw EnvoyException(fmt::format( - "Only unique values for domains are permitted. Duplicate entry of domain {}", - domain)); - } - virtual_hosts_.emplace(domain, virtual_host); + duplicate_found = !virtual_hosts_.emplace(domain, virtual_host).second; + } + if (duplicate_found) { + throw EnvoyException(fmt::format( + "Only unique values for domains are permitted. Duplicate entry of domain {}", domain)); } } } @@ -962,11 +1018,18 @@ RouteMatcher::RouteMatcher(const envoy::api::v2::RouteConfiguration& route_confi RouteConstSharedPtr VirtualHostImpl::getRouteFromEntries(const Http::HeaderMap& headers, uint64_t random_value) const { + // No x-forwarded-proto header. This normally only happens when ActiveStream::decodeHeaders + // bails early (as it rejects a request), so there is no routing is going to happen anyway. + const auto* forwarded_proto_header = headers.ForwardedProto(); + if (forwarded_proto_header == nullptr) { + return nullptr; + } + // First check for ssl redirect. - if (ssl_requirements_ == SslRequirements::ALL && headers.ForwardedProto()->value() != "https") { + if (ssl_requirements_ == SslRequirements::ALL && forwarded_proto_header->value() != "https") { return SSL_REDIRECT_ROUTE; } else if (ssl_requirements_ == SslRequirements::EXTERNAL_ONLY && - headers.ForwardedProto()->value() != "https" && !headers.EnvoyInternalRequest()) { + forwarded_proto_header->value() != "https" && !headers.EnvoyInternalRequest()) { return SSL_REDIRECT_ROUTE; } @@ -983,19 +1046,31 @@ RouteConstSharedPtr VirtualHostImpl::getRouteFromEntries(const Http::HeaderMap& const VirtualHostImpl* RouteMatcher::findVirtualHost(const Http::HeaderMap& headers) const { // Fast path the case where we only have a default virtual host. - if (virtual_hosts_.empty() && wildcard_virtual_host_suffixes_.empty() && default_virtual_host_) { + if (virtual_hosts_.empty() && wildcard_virtual_host_suffixes_.empty() && + wildcard_virtual_host_prefixes_.empty()) { return default_virtual_host_.get(); } // TODO (@rshriram) Match Origin header in WebSocket // request with VHost, using wildcard match - const std::string host = Http::LowerCaseString(headers.Host()->value().c_str()).get(); + const std::string host = + Http::LowerCaseString(std::string(headers.Host()->value().getStringView())).get(); const auto& iter = virtual_hosts_.find(host); if (iter != virtual_hosts_.end()) { return iter->second.get(); } if (!wildcard_virtual_host_suffixes_.empty()) { - const VirtualHostImpl* vhost = findWildcardVirtualHost(host); + const VirtualHostImpl* vhost = findWildcardVirtualHost( + host, wildcard_virtual_host_suffixes_, + [](const std::string& h, int l) -> std::string { return h.substr(h.size() - l); }); + if (vhost != nullptr) { + return vhost; + } + } + if (!wildcard_virtual_host_prefixes_.empty()) { + const VirtualHostImpl* vhost = findWildcardVirtualHost( + host, wildcard_virtual_host_prefixes_, + [](const std::string& h, int l) -> std::string { return h.substr(0, l); }); if (vhost != nullptr) { return vhost; } @@ -1022,14 +1097,15 @@ const VirtualCluster* VirtualHostImpl::virtualClusterFromEntries(const Http::HeaderMap& headers) const { for (const VirtualClusterEntry& entry : virtual_clusters_) { bool method_matches = - !entry.method_ || headers.Method()->value().c_str() == entry.method_.value(); + !entry.method_ || headers.Method()->value().getStringView() == entry.method_.value(); - if (method_matches && std::regex_match(headers.Path()->value().c_str(), entry.pattern_)) { + absl::string_view path_view = headers.Path()->value().getStringView(); + if (method_matches && std::regex_match(path_view.begin(), path_view.end(), entry.pattern_)) { return &entry; } } - if (virtual_clusters_.size() > 0) { + if (!virtual_clusters_.empty()) { return &VIRTUAL_CLUSTER_CATCH_ALL; } @@ -1039,7 +1115,7 @@ VirtualHostImpl::virtualClusterFromEntries(const Http::HeaderMap& headers) const ConfigImpl::ConfigImpl(const envoy::api::v2::RouteConfiguration& config, Server::Configuration::FactoryContext& factory_context, bool validate_clusters_default) - : name_(config.name()) { + : name_(config.name()), uses_vhds_(config.has_vhds()) { route_matcher_ = std::make_unique( config, *this, factory_context, PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, validate_clusters, validate_clusters_default)); @@ -1070,8 +1146,8 @@ createRouteSpecificFilterConfig(const std::string& name, const ProtobufWkt::Any& } // namespace PerFilterConfigs::PerFilterConfigs( - const Protobuf::Map& typed_configs, - const Protobuf::Map& configs, + const Protobuf::Map& typed_configs, + const Protobuf::Map& configs, Server::Configuration::FactoryContext& factory_context) { if (!typed_configs.empty() && !configs.empty()) { throw EnvoyException("Only one of typed_configs or configs can be specified"); diff --git a/source/common/router/config_impl.h b/source/common/router/config_impl.h index 6074c7c9156f4..286afc9135ab2 100644 --- a/source/common/router/config_impl.h +++ b/source/common/router/config_impl.h @@ -50,8 +50,8 @@ class Matchable { class PerFilterConfigs { public: - PerFilterConfigs(const Protobuf::Map& typed_configs, - const Protobuf::Map& configs, + PerFilterConfigs(const Protobuf::Map& typed_configs, + const Protobuf::Map& configs, Server::Configuration::FactoryContext& factory_context); const RouteSpecificFilterConfig* get(const std::string& name) const; @@ -162,6 +162,9 @@ class VirtualHostImpl : public VirtualHost { const absl::optional& retryPolicy() const { return retry_policy_; } + const absl::optional& hedgePolicy() const { + return hedge_policy_; + } private: enum class SslRequirements { NONE, EXTERNAL_ONLY, ALL }; @@ -200,6 +203,7 @@ class VirtualHostImpl : public VirtualHost { PerFilterConfigs per_filter_configs_; const bool include_attempt_count_; absl::optional retry_policy_; + absl::optional hedge_policy_; }; typedef std::shared_ptr VirtualHostSharedPtr; @@ -223,6 +227,8 @@ class RetryPolicyImpl : public RetryPolicy { const std::vector& retriableStatusCodes() const override { return retriable_status_codes_; } + absl::optional baseInterval() const override { return base_interval_; } + absl::optional maxInterval() const override { return max_interval_; } private: std::chrono::milliseconds per_try_timeout_{0}; @@ -237,6 +243,8 @@ class RetryPolicyImpl : public RetryPolicy { std::pair retry_priority_config_; uint32_t host_selection_attempts_{1}; std::vector retriable_status_codes_; + absl::optional base_interval_; + absl::optional max_interval_; }; /** @@ -288,6 +296,28 @@ class HashPolicyImpl : public HashPolicy { std::vector hash_impls_; }; +/** + * Implementation of HedgePolicy that reads from the proto route or virtual host config. + */ +class HedgePolicyImpl : public HedgePolicy { + +public: + explicit HedgePolicyImpl(const envoy::api::v2::route::HedgePolicy& hedge_policy); + HedgePolicyImpl(); + + // Router::HedgePolicy + uint32_t initialRequests() const override { return initial_requests_; } + const envoy::type::FractionalPercent& additionalRequestChance() const override { + return additional_request_chance_; + } + bool hedgeOnPerTryTimeout() const override { return hedge_on_per_try_timeout_; } + +private: + const uint32_t initial_requests_; + const envoy::type::FractionalPercent additional_request_chance_; + const bool hedge_on_per_try_timeout_; +}; + /** * Implementation of Decorator that reads from the proto route decorator. */ @@ -346,6 +376,8 @@ class RouteEntryImplBase : public RouteEntry, const StreamInfo::StreamInfo& stream_info) const override; const HashPolicy* hashPolicy() const override { return hash_policy_.get(); } + const HedgePolicy& hedgePolicy() const override { return hedge_policy_; } + const MetadataMatchCriteria* metadataMatchCriteria() const override { return metadata_match_criteria_.get(); } @@ -361,6 +393,9 @@ class RouteEntryImplBase : public RouteEntry, absl::optional maxGrpcTimeout() const override { return max_grpc_timeout_; } + absl::optional grpcTimeoutOffset() const override { + return grpc_timeout_offset_; + } const VirtualHost& virtualHost() const override { return vhost_; } bool autoHostRewrite() const override { return auto_host_rewrite_; } const std::multimap& opaqueConfig() const override { @@ -437,6 +472,7 @@ class RouteEntryImplBase : public RouteEntry, const CorsPolicy* corsPolicy() const override { return parent_->corsPolicy(); } const HashPolicy* hashPolicy() const override { return parent_->hashPolicy(); } + const HedgePolicy& hedgePolicy() const override { return parent_->hedgePolicy(); } Upstream::ResourcePriority priority() const override { return parent_->priority(); } const RateLimitPolicy& rateLimitPolicy() const override { return parent_->rateLimitPolicy(); } const RetryPolicy& retryPolicy() const override { return parent_->retryPolicy(); } @@ -448,6 +484,9 @@ class RouteEntryImplBase : public RouteEntry, absl::optional maxGrpcTimeout() const override { return parent_->maxGrpcTimeout(); } + absl::optional grpcTimeoutOffset() const override { + return parent_->maxGrpcTimeout(); + } const MetadataMatchCriteria* metadataMatchCriteria() const override { return parent_->metadataMatchCriteria(); } @@ -550,6 +589,10 @@ class RouteEntryImplBase : public RouteEntry, bool evaluateRuntimeMatch(const uint64_t random_value) const; + HedgePolicyImpl + buildHedgePolicy(const absl::optional& vhost_hedge_policy, + const envoy::api::v2::route::RouteAction& route_config) const; + RetryPolicyImpl buildRetryPolicy(const absl::optional& vhost_retry_policy, const envoy::api::v2::route::RouteAction& route_config) const; @@ -567,6 +610,7 @@ class RouteEntryImplBase : public RouteEntry, const std::chrono::milliseconds timeout_; const absl::optional idle_timeout_; const absl::optional max_grpc_timeout_; + const absl::optional grpc_timeout_offset_; Runtime::Loader& loader_; const absl::optional runtime_; const std::string scheme_redirect_; @@ -576,6 +620,7 @@ class RouteEntryImplBase : public RouteEntry, const bool https_redirect_; const std::string prefix_rewrite_redirect_; const bool strip_query_; + const HedgePolicyImpl hedge_policy_; const RetryPolicyImpl retry_policy_; const RateLimitPolicyImpl rate_limit_policy_; const ShadowPolicyImpl shadow_policy_; @@ -588,8 +633,6 @@ class RouteEntryImplBase : public RouteEntry, const uint64_t total_cluster_weight_; std::unique_ptr hash_policy_; MetadataMatchCriteriaConstPtr metadata_match_criteria_; - HeaderParserPtr route_action_request_headers_parser_; - HeaderParserPtr route_action_response_headers_parser_; HeaderParserPtr request_headers_parser_; HeaderParserPtr response_headers_parser_; envoy::api::v2::core::Metadata metadata_; @@ -603,7 +646,7 @@ class RouteEntryImplBase : public RouteEntry, const absl::optional direct_response_code_; std::string direct_response_body_; PerFilterConfigs per_filter_configs_; - Event::TimeSystem& time_system_; + TimeSource& time_source_; InternalRedirectAction internal_redirect_action_; }; @@ -688,7 +731,14 @@ class RouteMatcher { private: const VirtualHostImpl* findVirtualHost(const Http::HeaderMap& headers) const; - const VirtualHostImpl* findWildcardVirtualHost(const std::string& host) const; + + typedef std::map, + std::greater> + WildcardVirtualHosts; + typedef std::function SubstringFunction; + const VirtualHostImpl* findWildcardVirtualHost(const std::string& host, + const WildcardVirtualHosts& wildcard_virtual_hosts, + SubstringFunction substring_function) const; std::unordered_map virtual_hosts_; // std::greater as a minor optimization to iterate from more to less specific @@ -700,8 +750,9 @@ class RouteMatcher { // and climbs to about 110ns once there are any entries. // // The break-even is 4 entries. - std::map, std::greater> - wildcard_virtual_host_suffixes_; + WildcardVirtualHosts wildcard_virtual_host_suffixes_; + WildcardVirtualHosts wildcard_virtual_host_prefixes_; + VirtualHostSharedPtr default_virtual_host_; }; @@ -728,12 +779,15 @@ class ConfigImpl : public Config { const std::string& name() const override { return name_; } + bool usesVhds() const override { return uses_vhds_; } + private: std::unique_ptr route_matcher_; std::list internal_only_headers_; HeaderParserPtr request_headers_parser_; HeaderParserPtr response_headers_parser_; const std::string name_; + const bool uses_vhds_; }; /** @@ -749,6 +803,7 @@ class NullConfigImpl : public Config { } const std::string& name() const override { return name_; } + bool usesVhds() const override { return false; } private: std::list internal_only_headers_; diff --git a/source/common/router/header_formatter.cc b/source/common/router/header_formatter.cc index 80077aac6fe78..60f5ca8c5efd4 100644 --- a/source/common/router/header_formatter.cc +++ b/source/common/router/header_formatter.cc @@ -136,7 +136,7 @@ parsePerRequestStateField(absl::string_view param_str) { throw EnvoyException(formatPerRequestStateParseException(param_str)); } modified_param_str = modified_param_str.substr(1, modified_param_str.size() - 2); // trim parens - if (modified_param_str.size() == 0) { + if (modified_param_str.empty()) { throw EnvoyException(formatPerRequestStateParseException(param_str)); } diff --git a/source/common/router/header_parser.cc b/source/common/router/header_parser.cc index baaf7c797065b..05022605347c2 100644 --- a/source/common/router/header_parser.cc +++ b/source/common/router/header_parser.cc @@ -209,7 +209,7 @@ parseInternal(const envoy::api::v2::core::HeaderValueOption& header_value_option formatters.emplace_back(new PlainHeaderFormatter(unescape(literal), append)); } - ASSERT(formatters.size() > 0); + ASSERT(!formatters.empty()); if (formatters.size() == 1) { return std::move(formatters[0]); @@ -236,7 +236,7 @@ HeaderParserPtr HeaderParser::configure( HeaderParserPtr HeaderParser::configure( const Protobuf::RepeatedPtrField& headers_to_add, - const Protobuf::RepeatedPtrField& headers_to_remove) { + const Protobuf::RepeatedPtrField& headers_to_remove) { HeaderParserPtr header_parser = configure(headers_to_add); for (const auto& header : headers_to_remove) { diff --git a/source/common/router/header_parser.h b/source/common/router/header_parser.h index aaeacf4152acd..147574c43abd1 100644 --- a/source/common/router/header_parser.h +++ b/source/common/router/header_parser.h @@ -37,7 +37,7 @@ class HeaderParser { */ static HeaderParserPtr configure( const Protobuf::RepeatedPtrField& headers_to_add, - const Protobuf::RepeatedPtrField& headers_to_remove); + const Protobuf::RepeatedPtrField& headers_to_remove); void evaluateHeaders(Http::HeaderMap& headers, const StreamInfo::StreamInfo& stream_info) const; diff --git a/source/common/router/rds_impl.cc b/source/common/router/rds_impl.cc index 1eb5528a27c88..fd4c908edf1c1 100644 --- a/source/common/router/rds_impl.cc +++ b/source/common/router/rds_impl.cc @@ -12,7 +12,6 @@ #include "common/common/assert.h" #include "common/common/fmt.h" #include "common/config/rds_json.h" -#include "common/config/subscription_factory.h" #include "common/config/utility.h" #include "common/protobuf/utility.h" #include "common/router/config_impl.h" @@ -53,31 +52,35 @@ StaticRouteConfigProviderImpl::~StaticRouteConfigProviderImpl() { } // TODO(htuch): If support for multiple clusters is added per #1170 cluster_name_ -// initialization needs to be fixed. RdsRouteConfigSubscription::RdsRouteConfigSubscription( const envoy::config::filter::network::http_connection_manager::v2::Rds& rds, const uint64_t manager_identifier, Server::Configuration::FactoryContext& factory_context, const std::string& stat_prefix, Envoy::Router::RouteConfigProviderManagerImpl& route_config_provider_manager) - : route_config_name_(rds.route_config_name()), + : route_config_name_(rds.route_config_name()), factory_context_(factory_context), + init_target_(fmt::format("RdsRouteConfigSubscription {}", route_config_name_), + [this]() { subscription_->start({route_config_name_}, *this); }), scope_(factory_context.scope().createScope(stat_prefix + "rds." + route_config_name_ + ".")), - stats_({ALL_RDS_STATS(POOL_COUNTER(*scope_))}), + stat_prefix_(stat_prefix), stats_({ALL_RDS_STATS(POOL_COUNTER(*scope_))}), route_config_provider_manager_(route_config_provider_manager), - manager_identifier_(manager_identifier), time_source_(factory_context.timeSource()), - last_updated_(factory_context.timeSource().systemTime()) { + manager_identifier_(manager_identifier) { Envoy::Config::Utility::checkLocalInfo("rds", factory_context.localInfo()); - subscription_ = Envoy::Config::SubscriptionFactory::subscriptionFromConfigSource< - envoy::api::v2::RouteConfiguration>( + subscription_ = Envoy::Config::SubscriptionFactory::subscriptionFromConfigSource( rds.config_source(), factory_context.localInfo(), factory_context.dispatcher(), factory_context.clusterManager(), factory_context.random(), *scope_, "envoy.api.v2.RouteDiscoveryService.FetchRoutes", - "envoy.api.v2.RouteDiscoveryService.StreamRoutes", factory_context.api()); + "envoy.api.v2.RouteDiscoveryService.StreamRoutes", + Grpc::Common::typeUrl(envoy::api::v2::RouteConfiguration().GetDescriptor()->full_name()), + factory_context.api()); + + config_update_info_ = + std::make_unique(factory_context.timeSource()); } RdsRouteConfigSubscription::~RdsRouteConfigSubscription() { // If we get destroyed during initialization, make sure we signal that we "initialized". - runInitializeCallbackIfAny(); + init_target_.ready(); // The ownership of RdsRouteConfigProviderImpl is shared among all HttpConnectionManagers that // hold a shared_ptr to it. The RouteConfigProviderManager holds weak_ptrs to the @@ -86,20 +89,19 @@ RdsRouteConfigSubscription::~RdsRouteConfigSubscription() { route_config_provider_manager_.route_config_subscriptions_.erase(manager_identifier_); } -void RdsRouteConfigSubscription::onConfigUpdate(const ResourceVector& resources, - const std::string& version_info) { - last_updated_ = time_source_.systemTime(); - +void RdsRouteConfigSubscription::onConfigUpdate( + const Protobuf::RepeatedPtrField& resources, + const std::string& version_info) { if (resources.empty()) { ENVOY_LOG(debug, "Missing RouteConfiguration for {} in onConfigUpdate()", route_config_name_); stats_.update_empty_.inc(); - runInitializeCallbackIfAny(); + init_target_.ready(); return; } if (resources.size() != 1) { throw EnvoyException(fmt::format("Unexpected RDS resource length: {}", resources.size())); } - const auto& route_config = resources[0]; + auto route_config = MessageUtil::anyConvert(resources[0]); MessageUtil::validate(route_config); // TODO(PiotrSikora): Remove this hack once fixed internally. if (!(route_config.name() == route_config_name_)) { @@ -107,76 +109,65 @@ void RdsRouteConfigSubscription::onConfigUpdate(const ResourceVector& resources, route_config_name_, route_config.name())); } - const uint64_t new_hash = MessageUtil::hash(route_config); - if (!config_info_ || new_hash != config_info_.value().last_config_hash_) { - config_info_ = {new_hash, version_info}; - route_config_proto_ = route_config; + if (config_update_info_->onRdsUpdate(route_config, version_info)) { stats_.config_reload_.inc(); - ENVOY_LOG(debug, "rds: loading new configuration: config_name={} hash={}", route_config_name_, - new_hash); - for (auto* provider : route_config_providers_) { - provider->onConfigUpdate(); + + if (config_update_info_->routeConfiguration().has_vhds()) { + ENVOY_LOG(debug, "rds: vhds configuration present, starting vhds: config_name={} hash={}", + route_config_name_, config_update_info_->configHash()); + vhds_subscription_ = std::make_unique( + config_update_info_, factory_context_, stat_prefix_, route_config_providers_); + vhds_subscription_->registerInitTargetWithInitManager(factory_context_.initManager()); + } else { + ENVOY_LOG(debug, "rds: loading new configuration: config_name={} hash={}", route_config_name_, + config_update_info_->configHash()); + + for (auto* provider : route_config_providers_) { + provider->onConfigUpdate(); + } + vhds_subscription_.release(); } } - runInitializeCallbackIfAny(); + init_target_.ready(); } void RdsRouteConfigSubscription::onConfigUpdateFailed(const EnvoyException*) { // We need to allow server startup to continue, even if we have a bad // config. - runInitializeCallbackIfAny(); -} - -void RdsRouteConfigSubscription::registerInitTarget(Init::Manager& init_manager) { - init_manager.registerTarget(*this); -} - -void RdsRouteConfigSubscription::runInitializeCallbackIfAny() { - if (initialize_callback_) { - initialize_callback_(); - initialize_callback_ = nullptr; - } + init_target_.ready(); } RdsRouteConfigProviderImpl::RdsRouteConfigProviderImpl( RdsRouteConfigSubscriptionSharedPtr&& subscription, Server::Configuration::FactoryContext& factory_context) - : subscription_(std::move(subscription)), factory_context_(factory_context), + : subscription_(std::move(subscription)), + config_update_info_(subscription_->routeConfigUpdate()), factory_context_(factory_context), tls_(factory_context.threadLocal().allocateSlot()) { ConfigConstSharedPtr initial_config; - if (subscription_->config_info_.has_value()) { - initial_config = - std::make_shared(subscription_->route_config_proto_, factory_context_, false); + if (config_update_info_->configInfo().has_value()) { + initial_config = std::make_shared(config_update_info_->routeConfiguration(), + factory_context_, false); } else { initial_config = std::make_shared(); } tls_->set([initial_config](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr { return std::make_shared(initial_config); }); - subscription_->route_config_providers_.insert(this); + subscription_->routeConfigProviders().insert(this); } RdsRouteConfigProviderImpl::~RdsRouteConfigProviderImpl() { - subscription_->route_config_providers_.erase(this); + subscription_->routeConfigProviders().erase(this); } Router::ConfigConstSharedPtr RdsRouteConfigProviderImpl::config() { return tls_->getTyped().config_; } -absl::optional RdsRouteConfigProviderImpl::configInfo() const { - if (!subscription_->config_info_) { - return {}; - } else { - return ConfigInfo{subscription_->route_config_proto_, - subscription_->config_info_.value().last_config_version_}; - } -} - void RdsRouteConfigProviderImpl::onConfigUpdate() { ConfigConstSharedPtr new_config( - new ConfigImpl(subscription_->route_config_proto_, factory_context_, false)); + new ConfigImpl(config_update_info_->routeConfiguration(), factory_context_, false)); tls_->runOnAllThreads( [this, new_config]() -> void { tls_->getTyped().config_ = new_config; }); } @@ -206,7 +197,7 @@ Router::RouteConfigProviderPtr RouteConfigProviderManagerImpl::createRdsRouteCon subscription.reset(new RdsRouteConfigSubscription(rds, manager_identifier, factory_context, stat_prefix, *this)); - subscription->registerInitTarget(factory_context.initManager()); + factory_context.initManager().add(subscription->init_target_); route_config_subscriptions_.insert({manager_identifier, subscription}); } else { @@ -241,13 +232,14 @@ RouteConfigProviderManagerImpl::dumpRouteConfigs() const { // of this code, locking the weak_ptr will not fail. auto subscription = element.second.lock(); ASSERT(subscription); - ASSERT(subscription->route_config_providers_.size() > 0); + ASSERT(!subscription->route_config_providers_.empty()); - if (subscription->config_info_) { + if (subscription->routeConfigUpdate()->configInfo()) { auto* dynamic_config = config_dump->mutable_dynamic_route_configs()->Add(); - dynamic_config->set_version_info(subscription->config_info_.value().last_config_version_); - dynamic_config->mutable_route_config()->MergeFrom(subscription->route_config_proto_); - TimestampUtil::systemClockToTimestamp(subscription->last_updated_, + dynamic_config->set_version_info(subscription->routeConfigUpdate()->configVersion()); + dynamic_config->mutable_route_config()->MergeFrom( + subscription->routeConfigUpdate()->routeConfiguration()); + TimestampUtil::systemClockToTimestamp(subscription->routeConfigUpdate()->lastUpdated(), *dynamic_config->mutable_last_updated()); } } diff --git a/source/common/router/rds_impl.h b/source/common/router/rds_impl.h index 4498eb99bb104..2b59fe690fec6 100644 --- a/source/common/router/rds_impl.h +++ b/source/common/router/rds_impl.h @@ -12,10 +12,10 @@ #include "envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.pb.h" #include "envoy/config/subscription.h" #include "envoy/http/codes.h" -#include "envoy/init/init.h" #include "envoy/local_info/local_info.h" #include "envoy/router/rds.h" #include "envoy/router/route_config_provider_manager.h" +#include "envoy/router/route_config_update_receiver.h" #include "envoy/server/admin.h" #include "envoy/server/filter_config.h" #include "envoy/singleton/instance.h" @@ -23,7 +23,11 @@ #include "envoy/thread_local/thread_local.h" #include "common/common/logger.h" +#include "common/config/subscription_factory.h" +#include "common/init/target_impl.h" #include "common/protobuf/utility.h" +#include "common/router/route_config_update_receiver_impl.h" +#include "common/router/vhds.h" namespace Envoy { namespace Router { @@ -62,6 +66,7 @@ class StaticRouteConfigProviderImpl : public RouteConfigProvider { return ConfigInfo{route_config_proto_, ""}; } SystemTime lastUpdated() const override { return last_updated_; } + void onConfigUpdate() override {} private: ConfigConstSharedPtr config_; @@ -93,59 +98,53 @@ class RdsRouteConfigProviderImpl; * A class that fetches the route configuration dynamically using the RDS API and updates them to * RDS config providers. */ -class RdsRouteConfigSubscription - : public Init::Target, - Envoy::Config::SubscriptionCallbacks, - Logger::Loggable { +class RdsRouteConfigSubscription : Envoy::Config::SubscriptionCallbacks, + Logger::Loggable { public: - ~RdsRouteConfigSubscription(); + ~RdsRouteConfigSubscription() override; - // Init::Target - void initialize(std::function callback) override { - initialize_callback_ = callback; - subscription_->start({route_config_name_}, *this); + std::unordered_set& routeConfigProviders() { + return route_config_providers_; } + RouteConfigUpdatePtr& routeConfigUpdate() { return config_update_info_; } // Config::SubscriptionCallbacks - void onConfigUpdate(const ResourceVector& resources, const std::string& version_info) override; + // TODO(fredlas) deduplicate + void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + const std::string& version_info) override; + void onConfigUpdate(const Protobuf::RepeatedPtrField&, + const Protobuf::RepeatedPtrField&, const std::string&) override { + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } void onConfigUpdateFailed(const EnvoyException* e) override; std::string resourceName(const ProtobufWkt::Any& resource) override { return MessageUtil::anyConvert(resource).name(); } private: - struct LastConfigInfo { - uint64_t last_config_hash_; - std::string last_config_version_; - }; - RdsRouteConfigSubscription( const envoy::config::filter::network::http_connection_manager::v2::Rds& rds, const uint64_t manager_identifier, Server::Configuration::FactoryContext& factory_context, const std::string& stat_prefix, RouteConfigProviderManagerImpl& route_config_provider_manager); - void registerInitTarget(Init::Manager& init_manager); - void runInitializeCallbackIfAny(); - - std::unique_ptr> subscription_; - std::function initialize_callback_; + std::unique_ptr subscription_; const std::string route_config_name_; + Server::Configuration::FactoryContext& factory_context_; + Init::TargetImpl init_target_; Stats::ScopePtr scope_; + std::string stat_prefix_; RdsStats stats_; RouteConfigProviderManagerImpl& route_config_provider_manager_; const uint64_t manager_identifier_; - TimeSource& time_source_; - SystemTime last_updated_; - absl::optional config_info_; - envoy::api::v2::RouteConfiguration route_config_proto_; - std::unordered_set route_config_providers_; + std::unordered_set route_config_providers_; + VhdsSubscriptionPtr vhds_subscription_; + RouteConfigUpdatePtr config_update_info_; friend class RouteConfigProviderManagerImpl; - friend class RdsRouteConfigProviderImpl; }; -typedef std::shared_ptr RdsRouteConfigSubscriptionSharedPtr; +using RdsRouteConfigSubscriptionSharedPtr = std::shared_ptr; /** * Implementation of RouteConfigProvider that fetches the route configuration dynamically using @@ -154,20 +153,21 @@ typedef std::shared_ptr RdsRouteConfigSubscriptionSh class RdsRouteConfigProviderImpl : public RouteConfigProvider, Logger::Loggable { public: - ~RdsRouteConfigProviderImpl(); + ~RdsRouteConfigProviderImpl() override; RdsRouteConfigSubscription& subscription() { return *subscription_; } - void onConfigUpdate(); + void onConfigUpdate() override; // Router::RouteConfigProvider Router::ConfigConstSharedPtr config() override; - absl::optional configInfo() const override; - SystemTime lastUpdated() const override { return subscription_->last_updated_; } + absl::optional configInfo() const override { + return config_update_info_->configInfo(); + } + SystemTime lastUpdated() const override { return config_update_info_->lastUpdated(); } private: struct ThreadLocalConfig : public ThreadLocal::ThreadLocalObject { - ThreadLocalConfig(ConfigConstSharedPtr initial_config) : config_(initial_config) {} - + ThreadLocalConfig(ConfigConstSharedPtr initial_config) : config_(std::move(initial_config)) {} ConfigConstSharedPtr config_; }; @@ -175,6 +175,7 @@ class RdsRouteConfigProviderImpl : public RouteConfigProvider, Server::Configuration::FactoryContext& factory_context); RdsRouteConfigSubscriptionSharedPtr subscription_; + RouteConfigUpdatePtr& config_update_info_; Server::Configuration::FactoryContext& factory_context_; ThreadLocal::SlotPtr tls_; diff --git a/source/common/router/retry_state_impl.cc b/source/common/router/retry_state_impl.cc index 4c7ad826a8ca0..a3fa408e80c04 100644 --- a/source/common/router/retry_state_impl.cc +++ b/source/common/router/retry_state_impl.cc @@ -35,7 +35,7 @@ RetryStatePtr RetryStateImpl::create(const RetryPolicy& route_policy, Upstream::ResourcePriority priority) { RetryStatePtr ret; - // We short circuit here and do not both with an allocation if there is no chance we will retry. + // We short circuit here and do not bother with an allocation if there is no chance we will retry. if (request_headers.EnvoyRetryOn() || request_headers.EnvoyRetryGrpcOn() || route_policy.retryOn()) { ret.reset(new RetryStateImpl(route_policy, request_headers, cluster, runtime, random, @@ -57,16 +57,37 @@ RetryStateImpl::RetryStateImpl(const RetryPolicy& route_policy, Http::HeaderMap& retry_priority_(route_policy.retryPriority()), retriable_status_codes_(route_policy.retriableStatusCodes()) { + retry_on_ = route_policy.retryOn(); + retries_remaining_ = std::max(retries_remaining_, route_policy.numRetries()); + + std::chrono::milliseconds base_interval( + runtime_.snapshot().getInteger("upstream.base_retry_backoff_ms", 25)); + if (route_policy.baseInterval()) { + base_interval = *route_policy.baseInterval(); + } + + // By default, cap the max interval to 10 times the base interval to ensure reasonable back-off + // intervals. + std::chrono::milliseconds max_interval = base_interval * 10; + if (route_policy.maxInterval()) { + max_interval = *route_policy.maxInterval(); + } + + backoff_strategy_ = std::make_unique(base_interval.count(), + max_interval.count(), random_); + host_selection_max_attempts_ = route_policy.hostSelectionMaxAttempts(); + + // Merge in the headers. if (request_headers.EnvoyRetryOn()) { - retry_on_ = parseRetryOn(request_headers.EnvoyRetryOn()->value().c_str()); + retry_on_ |= parseRetryOn(request_headers.EnvoyRetryOn()->value().getStringView()); } if (request_headers.EnvoyRetryGrpcOn()) { - retry_on_ |= parseRetryGrpcOn(request_headers.EnvoyRetryGrpcOn()->value().c_str()); + retry_on_ |= parseRetryGrpcOn(request_headers.EnvoyRetryGrpcOn()->value().getStringView()); } if (retry_on_ != 0 && request_headers.EnvoyMaxRetries()) { - const char* max_retries = request_headers.EnvoyMaxRetries()->value().c_str(); uint64_t temp; - if (StringUtil::atoul(max_retries, temp)) { + if (absl::SimpleAtoi(request_headers.EnvoyMaxRetries()->value().getStringView(), &temp)) { + // The max retries header takes precedence if set. retries_remaining_ = temp; } } @@ -74,19 +95,11 @@ RetryStateImpl::RetryStateImpl(const RetryPolicy& route_policy, Http::HeaderMap& for (const auto code : StringUtil::splitToken( request_headers.EnvoyRetriableStatusCodes()->value().getStringView(), ",")) { uint64_t out; - if (StringUtil::atoul(std::string(code).c_str(), out)) { + if (absl::SimpleAtoi(code, &out)) { retriable_status_codes_.emplace_back(out); } } } - - // Merge in the route policy. - retry_on_ |= route_policy.retryOn(); - retries_remaining_ = std::max(retries_remaining_, route_policy.numRetries()); - const uint32_t base = runtime_.snapshot().getInteger("upstream.base_retry_backoff_ms", 25); - // Cap the max interval to 10 times the base interval to ensure reasonable backoff intervals. - backoff_strategy_ = std::make_unique(base, base * 10, random_); - host_selection_max_attempts_ = route_policy.hostSelectionMaxAttempts(); } RetryStateImpl::~RetryStateImpl() { resetRetry(); } @@ -147,26 +160,25 @@ void RetryStateImpl::resetRetry() { } } -RetryStatus RetryStateImpl::shouldRetry(const Http::HeaderMap* response_headers, - const absl::optional& reset_reason, - DoRetryCallback callback) { - - ASSERT((response_headers != nullptr) ^ reset_reason.has_value()); - - if (callback_ && !wouldRetry(response_headers, reset_reason)) { +RetryStatus RetryStateImpl::shouldRetry(bool would_retry, DoRetryCallback callback) { + // If a callback is armed from a previous shouldRetry and we don't need to + // retry this particular request, we can infer that we did a retry earlier + // and it was successful. + if (callback_ && !would_retry) { cluster_.stats().upstream_rq_retry_success_.inc(); } resetRetry(); + if (!would_retry) { + return RetryStatus::No; + } + if (retries_remaining_ == 0) { return RetryStatus::NoRetryLimitExceeded; } retries_remaining_--; - if (!wouldRetry(response_headers, reset_reason)) { - return RetryStatus::No; - } if (!cluster_.resourceManager(priority_).retries().canCreate()) { cluster_.stats().upstream_rq_retry_overflow_.inc(); @@ -185,7 +197,26 @@ RetryStatus RetryStateImpl::shouldRetry(const Http::HeaderMap* response_headers, return RetryStatus::Yes; } +RetryStatus RetryStateImpl::shouldRetryHeaders(const Http::HeaderMap& response_headers, + DoRetryCallback callback) { + return shouldRetry(wouldRetryFromHeaders(response_headers), callback); +} + +RetryStatus RetryStateImpl::shouldRetryReset(Http::StreamResetReason reset_reason, + DoRetryCallback callback) { + return shouldRetry(wouldRetryFromReset(reset_reason), callback); +} + bool RetryStateImpl::wouldRetryFromHeaders(const Http::HeaderMap& response_headers) { + if (response_headers.EnvoyOverloaded() != nullptr) { + return false; + } + + // We never retry if the request is rate limited. + if (response_headers.EnvoyRateLimited() != nullptr) { + return false; + } + if (retry_on_ & RetryPolicy::RETRY_ON_5XX) { if (Http::CodeUtility::is5xx(Http::Utility::getResponseStatus(response_headers))) { return true; @@ -237,7 +268,20 @@ bool RetryStateImpl::wouldRetryFromHeaders(const Http::HeaderMap& response_heade return false; } -bool RetryStateImpl::wouldRetryFromReset(const Http::StreamResetReason& reset_reason) { +bool RetryStateImpl::wouldRetryFromReset(const Http::StreamResetReason reset_reason) { + // First check "never retry" conditions so we can short circuit (we never + // retry if the reset reason is overflow). + if (reset_reason == Http::StreamResetReason::Overflow) { + return false; + } + + if (retry_on_ & (RetryPolicy::RETRY_ON_5XX | RetryPolicy::RETRY_ON_GATEWAY_ERROR)) { + // Currently we count an upstream reset as a "5xx" (since it will result in + // one). We may eventually split this out into its own type. I.e., + // RETRY_ON_RESET. + return true; + } + if ((retry_on_ & RetryPolicy::RETRY_ON_REFUSED_STREAM) && reset_reason == Http::StreamResetReason::RemoteRefusedStreamReset) { return true; @@ -251,39 +295,5 @@ bool RetryStateImpl::wouldRetryFromReset(const Http::StreamResetReason& reset_re return false; } -bool RetryStateImpl::wouldRetry(const Http::HeaderMap* response_headers, - const absl::optional& reset_reason) { - // First check "never retry" conditions so we can short circuit, then delegate to - // helper methods for checks dependent on retry policy. - - // we never retry if the reset reason is overflow. - if (reset_reason && reset_reason.value() == Http::StreamResetReason::Overflow) { - return false; - } - - if (response_headers != nullptr) { - // We never retry if the overloaded header is set. - if (response_headers->EnvoyOverloaded() != nullptr) { - return false; - } - - // We never retry if the request is rate limited. - if (response_headers->EnvoyRateLimited() != nullptr) { - return false; - } - - if (wouldRetryFromHeaders(*response_headers)) { - return true; - } - } else if (retry_on_ & (RetryPolicy::RETRY_ON_5XX | RetryPolicy::RETRY_ON_GATEWAY_ERROR)) { - // wouldRetry() is passed null headers when there was an upstream reset. Currently we count an - // upstream reset as a "5xx" (since it will result in one). We may eventually split this out - // into its own type. I.e., RETRY_ON_RESET. - return true; - } - - return reset_reason && wouldRetryFromReset(*reset_reason); -} - } // namespace Router } // namespace Envoy diff --git a/source/common/router/retry_state_impl.h b/source/common/router/retry_state_impl.h index 15a6d0938c770..78d017cf8db9c 100644 --- a/source/common/router/retry_state_impl.h +++ b/source/common/router/retry_state_impl.h @@ -36,9 +36,10 @@ class RetryStateImpl : public RetryState { // Router::RetryState bool enabled() override { return retry_on_ != 0; } - RetryStatus shouldRetry(const Http::HeaderMap* response_headers, - const absl::optional& reset_reason, - DoRetryCallback callback) override; + RetryStatus shouldRetryHeaders(const Http::HeaderMap& response_headers, + DoRetryCallback callback) override; + RetryStatus shouldRetryReset(const Http::StreamResetReason reset_reason, + DoRetryCallback callback) override; void onHostAttempted(Upstream::HostDescriptionConstSharedPtr host) override { std::for_each(retry_host_predicates_.begin(), retry_host_predicates_.end(), @@ -73,10 +74,9 @@ class RetryStateImpl : public RetryState { void enableBackoffTimer(); void resetRetry(); - bool wouldRetry(const Http::HeaderMap* response_headers, - const absl::optional& reset_reason); - bool wouldRetryFromReset(const Http::StreamResetReason& reset_reason); + bool wouldRetryFromReset(const Http::StreamResetReason reset_reason); bool wouldRetryFromHeaders(const Http::HeaderMap& response_headers); + RetryStatus shouldRetry(bool would_retry, DoRetryCallback callback); const Upstream::ClusterInfo& cluster_; Runtime::Loader& runtime_; diff --git a/source/common/router/route_config_update_receiver_impl.cc b/source/common/router/route_config_update_receiver_impl.cc new file mode 100644 index 0000000000000..9dbdff8ba5415 --- /dev/null +++ b/source/common/router/route_config_update_receiver_impl.cc @@ -0,0 +1,83 @@ +#include "common/router/route_config_update_receiver_impl.h" + +#include + +#include "envoy/api/v2/route/route.pb.validate.h" + +#include "common/common/assert.h" +#include "common/common/fmt.h" +#include "common/protobuf/utility.h" +#include "common/router/config_impl.h" + +namespace Envoy { +namespace Router { + +bool RouteConfigUpdateReceiverImpl::onRdsUpdate(const envoy::api::v2::RouteConfiguration& rc, + const std::string& version_info) { + const uint64_t new_hash = MessageUtil::hash(rc); + if (new_hash == last_config_hash_) { + return false; + } + + route_config_proto_ = rc; + last_config_hash_ = new_hash; + last_config_version_ = version_info; + last_updated_ = time_source_.systemTime(); + initializeVhosts(route_config_proto_); + config_info_.emplace(RouteConfigProvider::ConfigInfo{route_config_proto_, last_config_version_}); + return true; +} + +bool RouteConfigUpdateReceiverImpl::onVhdsUpdate( + const Protobuf::RepeatedPtrField& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& version_info) { + removeVhosts(virtual_hosts_, removed_resources); + updateVhosts(virtual_hosts_, added_resources); + rebuildRouteConfig(virtual_hosts_, route_config_proto_); + + return onRdsUpdate(route_config_proto_, version_info); +} + +void RouteConfigUpdateReceiverImpl::initializeVhosts( + const envoy::api::v2::RouteConfiguration& route_configuration) { + virtual_hosts_.clear(); + for (const auto& vhost : route_configuration.virtual_hosts()) { + virtual_hosts_.emplace(vhost.name(), vhost); + } +} + +void RouteConfigUpdateReceiverImpl::removeVhosts( + std::unordered_map& vhosts, + const Protobuf::RepeatedPtrField& removed_vhost_names) { + for (const auto& vhost_name : removed_vhost_names) { + vhosts.erase(vhost_name); + } +} + +void RouteConfigUpdateReceiverImpl::updateVhosts( + std::unordered_map& vhosts, + const Protobuf::RepeatedPtrField& added_resources) { + for (const auto& resource : added_resources) { + envoy::api::v2::route::VirtualHost vhost = + MessageUtil::anyConvert(resource.resource()); + MessageUtil::validate(vhost); + auto found = vhosts.find(vhost.name()); + if (found != vhosts.end()) { + vhosts.erase(found); + } + vhosts.emplace(vhost.name(), vhost); + } +} + +void RouteConfigUpdateReceiverImpl::rebuildRouteConfig( + const std::unordered_map& vhosts, + envoy::api::v2::RouteConfiguration& route_config) { + route_config.clear_virtual_hosts(); + for (const auto& vhost : vhosts) { + route_config.mutable_virtual_hosts()->Add()->CopyFrom(vhost.second); + } +} + +} // namespace Router +} // namespace Envoy diff --git a/source/common/router/route_config_update_receiver_impl.h b/source/common/router/route_config_update_receiver_impl.h new file mode 100644 index 0000000000000..f1ee423d03aeb --- /dev/null +++ b/source/common/router/route_config_update_receiver_impl.h @@ -0,0 +1,58 @@ +#pragma once + +#include +#include + +#include "envoy/api/v2/route/route.pb.h" +#include "envoy/router/rds.h" +#include "envoy/router/route_config_update_receiver.h" + +#include "common/common/logger.h" +#include "common/protobuf/utility.h" + +namespace Envoy { +namespace Router { + +class RouteConfigUpdateReceiverImpl : public RouteConfigUpdateReceiver { +public: + RouteConfigUpdateReceiverImpl(TimeSource& time_source) + : time_source_(time_source), last_config_hash_(0ull) {} + + void initializeVhosts(const envoy::api::v2::RouteConfiguration& route_configuration); + void removeVhosts(std::unordered_map& vhosts, + const Protobuf::RepeatedPtrField& removed_vhost_names); + void updateVhosts(std::unordered_map& vhosts, + const Protobuf::RepeatedPtrField& added_resources); + void rebuildRouteConfig( + const std::unordered_map& vhosts, + envoy::api::v2::RouteConfiguration& route_config); + + // Router::RouteConfigUpdateReceiver + bool onRdsUpdate(const envoy::api::v2::RouteConfiguration& rc, + const std::string& version_info) override; + bool onVhdsUpdate(const Protobuf::RepeatedPtrField& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& version_info) override; + const std::string& routeConfigName() const override { return route_config_proto_.name(); } + const std::string& configVersion() override { return last_config_version_; } + uint64_t configHash() const override { return last_config_hash_; } + absl::optional configInfo() const override { + return config_info_; + } + const envoy::api::v2::RouteConfiguration& routeConfiguration() override { + return route_config_proto_; + } + SystemTime lastUpdated() const override { return last_updated_; } + +private: + TimeSource& time_source_; + envoy::api::v2::RouteConfiguration route_config_proto_; + uint64_t last_config_hash_; + std::string last_config_version_; + SystemTime last_updated_; + std::unordered_map virtual_hosts_; + absl::optional config_info_; +}; + +} // namespace Router +} // namespace Envoy diff --git a/source/common/router/router.cc b/source/common/router/router.cc index af5e8b6faa144..bac7d181bef6f 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -34,8 +34,9 @@ uint32_t getLength(const Buffer::Instance* instance) { return instance ? instanc bool schemeIsHttp(const Http::HeaderMap& downstream_headers, const Network::Connection& connection) { - if (downstream_headers.ForwardedProto() && downstream_headers.ForwardedProto()->value().c_str() == - Http::Headers::get().SchemeValues.Http) { + if (downstream_headers.ForwardedProto() && + downstream_headers.ForwardedProto()->value().getStringView() == + Http::Headers::get().SchemeValues.Http) { return true; } if (!connection.ssl()) { @@ -77,7 +78,7 @@ bool convertRequestHeadersForInternalRedirect(Http::HeaderMap& downstream_header // Replace the original host, scheme and path. downstream_headers.insertScheme().value(std::string(absolute_url.scheme())); downstream_headers.insertHost().value(std::string(absolute_url.host_and_port())); - downstream_headers.insertPath().value(std::string(absolute_url.path())); + downstream_headers.insertPath().value(std::string(absolute_url.path_and_query_params())); return true; } @@ -124,6 +125,15 @@ FilterUtility::finalTimeout(const RouteEntry& route, Http::HeaderMap& request_he if (grpc_request && route.maxGrpcTimeout()) { const std::chrono::milliseconds max_grpc_timeout = route.maxGrpcTimeout().value(); std::chrono::milliseconds grpc_timeout = Grpc::Common::getGrpcTimeout(request_headers); + if (route.grpcTimeoutOffset()) { + // We only apply the offset if it won't result in grpc_timeout hitting 0 or below, as + // setting it to 0 means infinity and a negative timeout makes no sense. + const auto offset = *route.grpcTimeoutOffset(); + if (offset < grpc_timeout) { + grpc_timeout -= offset; + } + } + // Cap gRPC timeout to the configured maximum considering that 0 means infinity. if (max_grpc_timeout != std::chrono::milliseconds(0) && (grpc_timeout == std::chrono::milliseconds(0) || grpc_timeout > max_grpc_timeout)) { @@ -138,7 +148,7 @@ FilterUtility::finalTimeout(const RouteEntry& route, Http::HeaderMap& request_he Http::HeaderEntry* header_timeout_entry = request_headers.EnvoyUpstreamRequestTimeoutMs(); uint64_t header_timeout; if (header_timeout_entry) { - if (StringUtil::atoul(header_timeout_entry->value().c_str(), header_timeout)) { + if (absl::SimpleAtoi(header_timeout_entry->value().getStringView(), &header_timeout)) { timeout.global_timeout_ = std::chrono::milliseconds(header_timeout); } request_headers.removeEnvoyUpstreamRequestTimeoutMs(); @@ -147,7 +157,7 @@ FilterUtility::finalTimeout(const RouteEntry& route, Http::HeaderMap& request_he // See if there is a per try/retry timeout. If it's >= global we just ignore it. Http::HeaderEntry* per_try_timeout_entry = request_headers.EnvoyUpstreamRequestPerTryTimeoutMs(); if (per_try_timeout_entry) { - if (StringUtil::atoul(per_try_timeout_entry->value().c_str(), header_timeout)) { + if (absl::SimpleAtoi(per_try_timeout_entry->value().getStringView(), &header_timeout)) { timeout.per_try_timeout_ = std::chrono::milliseconds(header_timeout); } request_headers.removeEnvoyUpstreamRequestPerTryTimeoutMs(); @@ -181,7 +191,7 @@ FilterUtility::finalTimeout(const RouteEntry& route, Http::HeaderMap& request_he Filter::~Filter() { // Upstream resources should already have been cleaned. - ASSERT(!upstream_request_); + ASSERT(upstream_requests_.empty()); ASSERT(!retry_state_); } @@ -206,10 +216,6 @@ void Filter::chargeUpstreamCode(uint64_t response_status_code, const bool internal_request = internal_request_header && internal_request_header->value() == "true"; - // TODO(mattklein123): Remove copy when G string compat issues are fixed. - const std::string zone_name = config_.local_info_.zoneName(); - const std::string upstream_zone = upstreamZone(upstream_host); - Http::CodeStats::ResponseStatInfo info{config_.scope_, cluster_->statsScope(), EMPTY_STRING, @@ -218,19 +224,24 @@ void Filter::chargeUpstreamCode(uint64_t response_status_code, route_entry_->virtualHost().name(), request_vcluster_ ? request_vcluster_->name() : EMPTY_STRING, - zone_name, - upstream_zone, + config_.local_info_.zoneName(), + upstreamZone(upstream_host), is_canary}; Http::CodeStats& code_stats = httpContext().codeStats(); code_stats.chargeResponseStat(info); if (!alt_stat_prefix_.empty()) { - Http::CodeStats::ResponseStatInfo info{config_.scope_, cluster_->statsScope(), - alt_stat_prefix_, response_status_code, - internal_request, EMPTY_STRING, - EMPTY_STRING, zone_name, - upstream_zone, is_canary}; + Http::CodeStats::ResponseStatInfo info{config_.scope_, + cluster_->statsScope(), + alt_stat_prefix_, + response_status_code, + internal_request, + EMPTY_STRING, + EMPTY_STRING, + config_.local_info_.zoneName(), + upstreamZone(upstream_host), + is_canary}; code_stats.chargeResponseStat(info); } @@ -274,10 +285,11 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::HeaderMap& headers, bool e if (!route_) { config_.stats_.no_route_.inc(); ENVOY_STREAM_LOG(debug, "no cluster match for URL '{}'", *callbacks_, - headers.Path()->value().c_str()); + headers.Path()->value().getStringView()); callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::NoRouteFound); - callbacks_->sendLocalReply(Http::Code::NotFound, "", nullptr, absl::nullopt); + callbacks_->sendLocalReply(Http::Code::NotFound, "", nullptr, absl::nullopt, + StreamInfo::ResponseCodeDetails::get().RouteNotFound); return Http::FilterHeadersStatus::StopIteration; } @@ -296,7 +308,7 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::HeaderMap& headers, bool e } direct_response->finalizeResponseHeaders(response_headers, callbacks_->streamInfo()); }, - absl::nullopt); + absl::nullopt, StreamInfo::ResponseCodeDetails::get().DirectResponse); return Http::FilterHeadersStatus::StopIteration; } @@ -309,7 +321,8 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::HeaderMap& headers, bool e callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::NoRouteFound); callbacks_->sendLocalReply(route_entry_->clusterNotFoundResponseCode(), "", nullptr, - absl::nullopt); + absl::nullopt, + StreamInfo::ResponseCodeDetails::get().ClusterNotFound); return Http::FilterHeadersStatus::StopIteration; } cluster_ = cluster->info(); @@ -317,11 +330,11 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::HeaderMap& headers, bool e // Set up stat prefixes, etc. request_vcluster_ = route_entry_->virtualCluster(headers); ENVOY_STREAM_LOG(debug, "cluster '{}' match for URL '{}'", *callbacks_, - route_entry_->clusterName(), headers.Path()->value().c_str()); + route_entry_->clusterName(), headers.Path()->value().getStringView()); const Http::HeaderEntry* request_alt_name = headers.EnvoyUpstreamAltStatName(); if (request_alt_name) { - alt_stat_prefix_ = std::string(request_alt_name->value().c_str()) + "."; + alt_stat_prefix_ = std::string(request_alt_name->value().getStringView()) + "."; headers.removeEnvoyUpstreamAltStatName(); } @@ -329,14 +342,14 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::HeaderMap& headers, bool e if (cluster_->maintenanceMode()) { callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::UpstreamOverflow); chargeUpstreamCode(Http::Code::ServiceUnavailable, nullptr, true); - callbacks_->sendLocalReply(Http::Code::ServiceUnavailable, "maintenance mode", - [this](Http::HeaderMap& headers) { - if (!config_.suppress_envoy_headers_) { - headers.insertEnvoyOverloaded().value( - Http::Headers::get().EnvoyOverloadedValues.True); - } - }, - absl::nullopt); + callbacks_->sendLocalReply( + Http::Code::ServiceUnavailable, "maintenance mode", + [this](Http::HeaderMap& headers) { + if (!config_.suppress_envoy_headers_) { + headers.insertEnvoyOverloaded().value(Http::Headers::get().EnvoyOverloadedValues.True); + } + }, + absl::nullopt, StreamInfo::ResponseCodeDetails::get().MaintenanceMode); cluster_->stats().upstream_rq_maintenance_mode_.inc(); return Http::FilterHeadersStatus::StopIteration; } @@ -380,8 +393,9 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::HeaderMap& headers, bool e ENVOY_STREAM_LOG(debug, "router decoding headers:\n{}", *callbacks_, headers); - upstream_request_ = std::make_unique(*this, *conn_pool); - upstream_request_->encodeHeaders(end_stream); + UpstreamRequestPtr upstream_request = std::make_unique(*this, *conn_pool); + upstream_request->moveIntoList(std::move(upstream_request), upstream_requests_); + upstream_requests_.front()->encodeHeaders(end_stream); if (end_stream) { onRequestComplete(); } @@ -409,10 +423,12 @@ void Filter::sendNoHealthyUpstreamResponse() { callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::NoHealthyUpstream); chargeUpstreamCode(Http::Code::ServiceUnavailable, nullptr, false); callbacks_->sendLocalReply(Http::Code::ServiceUnavailable, "no healthy upstream", nullptr, - absl::nullopt); + absl::nullopt, + StreamInfo::ResponseCodeDetails::get().NoHealthyUpstream); } Http::FilterDataStatus Filter::decodeData(Buffer::Instance& data, bool end_stream) { + ASSERT(upstream_requests_.size() == 1); bool buffering = (retry_state_ && retry_state_->enabled()) || do_shadowing_; if (buffering && buffer_limit_ > 0 && getLength(callbacks_->decodingBuffer()) + data.length() > buffer_limit_) { @@ -427,7 +443,7 @@ Http::FilterDataStatus Filter::decodeData(Buffer::Instance& data, bool end_strea // If we are going to buffer for retries or shadowing, we need to make a copy before encoding // since it's all moves from here on. Buffer::OwnedImpl copy(data); - upstream_request_->encodeData(copy, end_stream); + upstream_requests_.front()->encodeData(copy, end_stream); // If we are potentially going to retry or shadow this request we need to buffer. // This will not cause the connection manager to 413 because before we hit the @@ -436,7 +452,7 @@ Http::FilterDataStatus Filter::decodeData(Buffer::Instance& data, bool end_strea // potentially shadow. callbacks_->addDecodedData(data, true); } else { - upstream_request_->encodeData(data, end_stream); + upstream_requests_.front()->encodeData(data, end_stream); } if (end_stream) { @@ -449,7 +465,8 @@ Http::FilterDataStatus Filter::decodeData(Buffer::Instance& data, bool end_strea Http::FilterTrailersStatus Filter::decodeTrailers(Http::HeaderMap& trailers) { ENVOY_STREAM_LOG(debug, "router decoding trailers:\n{}", *callbacks_, trailers); downstream_trailers_ = &trailers; - upstream_request_->encodeTrailers(trailers); + ASSERT(upstream_requests_.size() == 1); + upstream_requests_.front()->encodeTrailers(trailers); onRequestComplete(); return Http::FilterTrailersStatus::StopIteration; } @@ -463,7 +480,15 @@ void Filter::setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callb } void Filter::cleanup() { - upstream_request_.reset(); + ASSERT(upstream_requests_.size() <= 1); + // UpstreamRequests are only destroyed in this method (cleanup()) or when we + // do a retry (setupRetry()). In the latter case we don't want to save the + // upstream timings to the downstream info. + if (upstream_requests_.size() == 1) { + UpstreamRequestPtr upstream_request = + upstream_requests_.back()->removeFromList(upstream_requests_); + callbacks_->streamInfo().setUpstreamTiming(upstream_request->upstream_timing_); + } retry_state_.reset(); if (response_timeout_) { response_timeout_->disableTimer(); @@ -491,12 +516,14 @@ void Filter::maybeDoShadowing() { } void Filter::onRequestComplete() { + // This should be called exactly once, when the downstream request has been received in full. + ASSERT(!downstream_end_stream_); downstream_end_stream_ = true; Event::Dispatcher& dispatcher = callbacks_->dispatcher(); - downstream_request_complete_time_ = dispatcher.timeSystem().monotonicTime(); + downstream_request_complete_time_ = dispatcher.timeSource().monotonicTime(); // Possible that we got an immediate reset. - if (upstream_request_) { + if (upstream_requests_.size() == 1) { // Even if we got an immediate reset, we could still shadow, but that is a riskier change and // seems unnecessary right now. maybeDoShadowing(); @@ -505,12 +532,18 @@ void Filter::onRequestComplete() { response_timeout_ = dispatcher.createTimer([this]() -> void { onResponseTimeout(); }); response_timeout_->enableTimer(timeout_.global_timeout_); } + + for (auto& upstream_request : upstream_requests_) { + if (upstream_request->create_per_try_timeout_on_request_complete_) { + upstream_request->setupPerTryTimeout(); + } + } } } void Filter::onDestroy() { - if (upstream_request_ && !attempting_internal_redirect_with_complete_stream_) { - upstream_request_->resetStream(); + if (upstream_requests_.size() == 1 && !attempting_internal_redirect_with_complete_stream_) { + upstream_requests_.front()->resetStream(); } cleanup(); } @@ -519,85 +552,69 @@ void Filter::onResponseTimeout() { ENVOY_STREAM_LOG(debug, "upstream timeout", *callbacks_); cluster_->stats().upstream_rq_timeout_.inc(); - // It's possible to timeout during a retry backoff delay when we have no upstream request. In - // this case we fake a reset since onUpstreamReset() doesn't care. - if (upstream_request_) { - if (upstream_request_->upstream_host_) { - upstream_request_->upstream_host_->stats().rq_timeout_.inc(); + ASSERT(upstream_requests_.size() <= 1); + if (upstream_requests_.size() == 1) { + if (upstream_requests_.front()->upstream_host_) { + upstream_requests_.front()->upstream_host_->stats().rq_timeout_.inc(); } - upstream_request_->resetStream(); + + updateOutlierDetection(timeout_response_code_, *upstream_requests_.front().get()); + upstream_requests_.front()->resetStream(); } - onUpstreamReset(UpstreamResetType::GlobalTimeout, absl::optional()); + onUpstreamTimeoutAbort(StreamInfo::ResponseFlag::UpstreamRequestTimeout, + StreamInfo::ResponseCodeDetails::get().UpstreamTimeout); } -void Filter::onUpstreamReset(UpstreamResetType type, - const absl::optional& reset_reason) { - ASSERT(type == UpstreamResetType::GlobalTimeout || upstream_request_); - if (type == UpstreamResetType::Reset) { - ENVOY_STREAM_LOG(debug, "upstream reset", *callbacks_); - } +void Filter::onPerTryTimeout(UpstreamRequest& upstream_request) { + updateOutlierDetection(timeout_response_code_, upstream_request); - Upstream::HostDescriptionConstSharedPtr upstream_host; - if (upstream_request_) { - upstream_host = upstream_request_->upstream_host_; - if (upstream_host) { - upstream_host->outlierDetector().putHttpResponseCode( - enumToInt(type == UpstreamResetType::Reset ? Http::Code::ServiceUnavailable - : timeout_response_code_)); - } + if (maybeRetryReset(Http::StreamResetReason::LocalReset, upstream_request)) { + return; } - // We don't retry on a global timeout or if we already started the response. - if (type != UpstreamResetType::GlobalTimeout && !downstream_response_started_ && retry_state_) { - // Notify retry modifiers about the attempted host. - if (upstream_host != nullptr) { - retry_state_->onHostAttempted(upstream_host); - } + onUpstreamTimeoutAbort(StreamInfo::ResponseFlag::UpstreamRequestTimeout, + StreamInfo::ResponseCodeDetails::get().UpstreamPerTryTimeout); +} - RetryStatus retry_status = - retry_state_->shouldRetry(nullptr, reset_reason, [this]() -> void { doRetry(); }); - if (retry_status == RetryStatus::Yes && setupRetry(true)) { - if (upstream_host) { - upstream_host->stats().rq_error_.inc(); - } - return; - } else if (retry_status == RetryStatus::NoOverflow) { - callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::UpstreamOverflow); - } else if (retry_status == RetryStatus::NoRetryLimitExceeded) { - callbacks_->streamInfo().setResponseFlag( - StreamInfo::ResponseFlag::UpstreamRetryLimitExceeded); - } +void Filter::updateOutlierDetection(Http::Code code, UpstreamRequest& upstream_request) { + if (upstream_request.upstream_host_) { + upstream_request.upstream_host_->outlierDetector().putHttpResponseCode(enumToInt(code)); } +} + +void Filter::onUpstreamTimeoutAbort(StreamInfo::ResponseFlag response_flags, + absl::string_view details) { + const absl::string_view body = + timeout_response_code_ == Http::Code::GatewayTimeout ? "upstream request timeout" : ""; + onUpstreamAbort(timeout_response_code_, response_flags, body, false, details); +} +void Filter::onUpstreamAbort(Http::Code code, StreamInfo::ResponseFlag response_flags, + absl::string_view body, bool dropped, absl::string_view details) { + ASSERT(upstream_requests_.size() <= 1); // If we have not yet sent anything downstream, send a response with an appropriate status code. // Otherwise just reset the ongoing response. if (downstream_response_started_) { - if (upstream_request_ != nullptr && upstream_request_->grpc_rq_success_deferred_) { - upstream_request_->upstream_host_->stats().rq_error_.inc(); + if (upstream_requests_.size() == 1 && upstream_requests_.front()->grpc_rq_success_deferred_) { + upstream_requests_.front()->upstream_host_->stats().rq_error_.inc(); + config_.stats_.rq_reset_after_downstream_response_started_.inc(); } // This will destroy any created retry timers. + callbacks_->streamInfo().setResponseCodeDetails(details); cleanup(); callbacks_->resetStream(); } else { + Upstream::HostDescriptionConstSharedPtr upstream_host; + if (upstream_requests_.size() == 1) { + upstream_host = upstream_requests_.front()->upstream_host_; + } + // This will destroy any created retry timers. cleanup(); - Http::Code code; - const char* body; - if (type == UpstreamResetType::GlobalTimeout || type == UpstreamResetType::PerTryTimeout) { - callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout); - code = timeout_response_code_; - body = code == Http::Code::GatewayTimeout ? "upstream request timeout" : ""; - } else { - StreamInfo::ResponseFlag response_flags = - streamResetReasonToResponseFlag(reset_reason.value()); - callbacks_->streamInfo().setResponseFlag(response_flags); - code = Http::Code::ServiceUnavailable; - body = "upstream connect error or disconnect/reset before headers"; - } + callbacks_->streamInfo().setResponseFlag(response_flags); - const bool dropped = reset_reason && reset_reason.value() == Http::StreamResetReason::Overflow; chargeUpstreamCode(code, upstream_host, dropped); // If we had non-5xx but still have been reset by backend or timeout before // starting response, we treat this as an error. We only get non-5xx when @@ -606,15 +623,74 @@ void Filter::onUpstreamReset(UpstreamResetType type, if (upstream_host != nullptr && !Http::CodeUtility::is5xx(enumToInt(code))) { upstream_host->stats().rq_error_.inc(); } - callbacks_->sendLocalReply(code, body, - [dropped, this](Http::HeaderMap& headers) { - if (dropped && !config_.suppress_envoy_headers_) { - headers.insertEnvoyOverloaded().value( - Http::Headers::get().EnvoyOverloadedValues.True); - } - }, - absl::nullopt); + callbacks_->sendLocalReply( + code, body, + [dropped, this](Http::HeaderMap& headers) { + if (dropped && !config_.suppress_envoy_headers_) { + headers.insertEnvoyOverloaded().value(Http::Headers::get().EnvoyOverloadedValues.True); + } + }, + absl::nullopt, details); + } +} + +bool Filter::maybeRetryReset(Http::StreamResetReason reset_reason, + UpstreamRequest& upstream_request) { + // We don't retry if we already started the response. + if (downstream_response_started_ || !retry_state_) { + return false; + } + + Upstream::HostDescriptionConstSharedPtr upstream_host; + upstream_host = upstream_request.upstream_host_; + + // Notify retry modifiers about the attempted host. + if (upstream_host != nullptr) { + retry_state_->onHostAttempted(upstream_host); + } + + const RetryStatus retry_status = + retry_state_->shouldRetryReset(reset_reason, [this]() -> void { doRetry(); }); + if (retry_status == RetryStatus::Yes && setupRetry(true)) { + if (upstream_host) { + upstream_host->stats().rq_error_.inc(); + } + return true; + } else if (retry_status == RetryStatus::NoOverflow) { + callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::UpstreamOverflow); + } else if (retry_status == RetryStatus::NoRetryLimitExceeded) { + callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::UpstreamRetryLimitExceeded); + } + + return false; +} + +void Filter::onUpstreamReset(Http::StreamResetReason reset_reason, + absl::string_view transport_failure_reason, + UpstreamRequest& upstream_request) { + ENVOY_STREAM_LOG(debug, "upstream reset: reset reason {}", *callbacks_, + Http::Utility::resetReasonToString(reset_reason)); + + updateOutlierDetection(Http::Code::ServiceUnavailable, upstream_request); + + if (maybeRetryReset(reset_reason, upstream_request)) { + return; } + + const StreamInfo::ResponseFlag response_flags = streamResetReasonToResponseFlag(reset_reason); + const std::string body = + absl::StrCat("upstream connect error or disconnect/reset before headers. reset reason: ", + Http::Utility::resetReasonToString(reset_reason)); + + const bool dropped = reset_reason == Http::StreamResetReason::Overflow; + callbacks_->streamInfo().setUpstreamTransportFailureReason(transport_failure_reason); + const std::string& basic_details = + downstream_response_started_ ? StreamInfo::ResponseCodeDetails::get().LateUpstreamReset + : StreamInfo::ResponseCodeDetails::get().EarlyUpstreamReset; + const std::string details = absl::StrCat( + basic_details, "{", Http::Utility::resetReasonToString(reset_reason), + transport_failure_reason.empty() ? "" : absl::StrCat(",", transport_failure_reason), "}"); + onUpstreamAbort(Http::Code::ServiceUnavailable, response_flags, body, dropped, details); } StreamInfo::ResponseFlag @@ -634,10 +710,11 @@ Filter::streamResetReasonToResponseFlag(Http::StreamResetReason reset_reason) { return StreamInfo::ResponseFlag::UpstreamRemoteReset; } - throw std::invalid_argument("Unknown reset_reason"); + NOT_REACHED_GCOVR_EXCL_LINE; } -void Filter::handleNon5xxResponseHeaders(const Http::HeaderMap& headers, bool end_stream) { +void Filter::handleNon5xxResponseHeaders(const Http::HeaderMap& headers, + UpstreamRequest& upstream_request, bool end_stream) { // We need to defer gRPC success until after we have processed grpc-status in // the trailers. if (grpc_request_) { @@ -645,19 +722,21 @@ void Filter::handleNon5xxResponseHeaders(const Http::HeaderMap& headers, bool en absl::optional grpc_status = Grpc::Common::getGrpcStatus(headers); if (grpc_status && !Http::CodeUtility::is5xx(Grpc::Utility::grpcToHttpStatus(grpc_status.value()))) { - upstream_request_->upstream_host_->stats().rq_success_.inc(); + upstream_request.upstream_host_->stats().rq_success_.inc(); } else { - upstream_request_->upstream_host_->stats().rq_error_.inc(); + upstream_request.upstream_host_->stats().rq_error_.inc(); } } else { - upstream_request_->grpc_rq_success_deferred_ = true; + upstream_request.grpc_rq_success_deferred_ = true; } } else { - upstream_request_->upstream_host_->stats().rq_success_.inc(); + upstream_request.upstream_host_->stats().rq_success_.inc(); } } -void Filter::onUpstream100ContinueHeaders(Http::HeaderMapPtr&& headers) { +void Filter::onUpstream100ContinueHeaders(Http::HeaderMapPtr&& headers, + UpstreamRequest& upstream_request) { + chargeUpstreamCode(100, *headers, upstream_request.upstream_host_, false); ENVOY_STREAM_LOG(debug, "upstream 100 continue", *callbacks_); downstream_response_started_ = true; @@ -670,25 +749,26 @@ void Filter::onUpstream100ContinueHeaders(Http::HeaderMapPtr&& headers) { callbacks_->encode100ContinueHeaders(std::move(headers)); } -void Filter::onUpstreamHeaders(const uint64_t response_code, Http::HeaderMapPtr&& headers, - bool end_stream) { +void Filter::onUpstreamHeaders(uint64_t response_code, Http::HeaderMapPtr&& headers, + UpstreamRequest& upstream_request, bool end_stream) { + ASSERT(upstream_requests_.size() == 1); ENVOY_STREAM_LOG(debug, "upstream headers complete: end_stream={}", *callbacks_, end_stream); - upstream_request_->upstream_host_->outlierDetector().putHttpResponseCode(response_code); + upstream_request.upstream_host_->outlierDetector().putHttpResponseCode(response_code); if (headers->EnvoyImmediateHealthCheckFail() != nullptr) { - upstream_request_->upstream_host_->healthChecker().setUnhealthy(); + upstream_request.upstream_host_->healthChecker().setUnhealthy(); } if (retry_state_) { // Notify retry modifiers about the attempted host. - retry_state_->onHostAttempted(upstream_request_->upstream_host_); + retry_state_->onHostAttempted(upstream_request.upstream_host_); - RetryStatus retry_status = retry_state_->shouldRetry( - headers.get(), absl::optional(), [this]() -> void { doRetry(); }); // Capture upstream_host since setupRetry() in the following line will clear - // upstream_request_. - const auto upstream_host = upstream_request_->upstream_host_; + // upstream_request. + const auto upstream_host = upstream_request.upstream_host_; + const RetryStatus retry_status = + retry_state_->shouldRetryHeaders(*headers, [this]() -> void { doRetry(); }); if (retry_status == RetryStatus::Yes && setupRetry(end_stream)) { Http::CodeStats& code_stats = httpContext().codeStats(); code_stats.chargeBasicResponseStat(cluster_->statsScope(), "retry.", @@ -709,7 +789,7 @@ void Filter::onUpstreamHeaders(const uint64_t response_code, Http::HeaderMapPtr& if (static_cast(response_code) == Http::Code::Found && route_entry_->internalRedirectAction() == InternalRedirectAction::Handle && - setupRedirect(*headers)) { + setupRedirect(*headers, upstream_request)) { return; // If the redirect could not be handled, fail open and let it pass to the // next downstream. @@ -719,7 +799,7 @@ void Filter::onUpstreamHeaders(const uint64_t response_code, Http::HeaderMapPtr& // premature response. if (DateUtil::timePointValid(downstream_request_complete_time_)) { Event::Dispatcher& dispatcher = callbacks_->dispatcher(); - MonotonicTime response_received_time = dispatcher.timeSystem().monotonicTime(); + MonotonicTime response_received_time = dispatcher.timeSource().monotonicTime(); std::chrono::milliseconds ms = std::chrono::duration_cast( response_received_time - downstream_request_complete_time_); if (!config_.suppress_envoy_headers_) { @@ -727,12 +807,12 @@ void Filter::onUpstreamHeaders(const uint64_t response_code, Http::HeaderMapPtr& } } - upstream_request_->upstream_canary_ = + upstream_request.upstream_canary_ = (headers->EnvoyUpstreamCanary() && headers->EnvoyUpstreamCanary()->value() == "true") || - upstream_request_->upstream_host_->canary(); - chargeUpstreamCode(response_code, *headers, upstream_request_->upstream_host_, false); + upstream_request.upstream_host_->canary(); + chargeUpstreamCode(response_code, *headers, upstream_request.upstream_host_, false); if (!Http::CodeUtility::is5xx(response_code)) { - handleNon5xxResponseHeaders(*headers, end_stream); + handleNon5xxResponseHeaders(*headers, upstream_request, end_stream); } // Append routing cookies @@ -747,35 +827,40 @@ void Filter::onUpstreamHeaders(const uint64_t response_code, Http::HeaderMapPtr& downstream_response_started_ = true; if (end_stream) { - onUpstreamComplete(); + onUpstreamComplete(upstream_request); } + callbacks_->streamInfo().setResponseCodeDetails( + StreamInfo::ResponseCodeDetails::get().ViaUpstream); callbacks_->encodeHeaders(std::move(headers), end_stream); } -void Filter::onUpstreamData(Buffer::Instance& data, bool end_stream) { +void Filter::onUpstreamData(Buffer::Instance& data, UpstreamRequest& upstream_request, + bool end_stream) { + ASSERT(upstream_requests_.size() == 1); if (end_stream) { // gRPC request termination without trailers is an error. - if (upstream_request_->grpc_rq_success_deferred_) { - upstream_request_->upstream_host_->stats().rq_error_.inc(); + if (upstream_request.grpc_rq_success_deferred_) { + upstream_request.upstream_host_->stats().rq_error_.inc(); } - onUpstreamComplete(); + onUpstreamComplete(upstream_request); } callbacks_->encodeData(data, end_stream); } -void Filter::onUpstreamTrailers(Http::HeaderMapPtr&& trailers) { - if (upstream_request_->grpc_rq_success_deferred_) { +void Filter::onUpstreamTrailers(Http::HeaderMapPtr&& trailers, UpstreamRequest& upstream_request) { + ASSERT(upstream_requests_.size() == 1); + if (upstream_request.grpc_rq_success_deferred_) { absl::optional grpc_status = Grpc::Common::getGrpcStatus(*trailers); if (grpc_status && !Http::CodeUtility::is5xx(Grpc::Utility::grpcToHttpStatus(grpc_status.value()))) { - upstream_request_->upstream_host_->stats().rq_success_.inc(); + upstream_request.upstream_host_->stats().rq_success_.inc(); } else { - upstream_request_->upstream_host_->stats().rq_error_.inc(); + upstream_request.upstream_host_->stats().rq_error_.inc(); } } - onUpstreamComplete(); + onUpstreamComplete(upstream_request); callbacks_->encodeTrailers(std::move(trailers)); } @@ -783,38 +868,35 @@ void Filter::onUpstreamMetadata(Http::MetadataMapPtr&& metadata_map) { callbacks_->encodeMetadata(std::move(metadata_map)); } -void Filter::onUpstreamComplete() { +void Filter::onUpstreamComplete(UpstreamRequest& upstream_request) { if (!downstream_end_stream_) { - upstream_request_->resetStream(); + upstream_request.resetStream(); } if (config_.emit_dynamic_stats_ && !callbacks_->streamInfo().healthCheck() && DateUtil::timePointValid(downstream_request_complete_time_)) { Event::Dispatcher& dispatcher = callbacks_->dispatcher(); std::chrono::milliseconds response_time = std::chrono::duration_cast( - dispatcher.timeSystem().monotonicTime() - downstream_request_complete_time_); + dispatcher.timeSource().monotonicTime() - downstream_request_complete_time_); - upstream_request_->upstream_host_->outlierDetector().putResponseTime(response_time); + upstream_request.upstream_host_->outlierDetector().putResponseTime(response_time); const Http::HeaderEntry* internal_request_header = downstream_headers_->EnvoyInternalRequest(); const bool internal_request = internal_request_header && internal_request_header->value() == "true"; - // TODO(mattklein123): Remove copy when G string compat issues are fixed. - const std::string zone_name = config_.local_info_.zoneName(); - Http::CodeStats& code_stats = httpContext().codeStats(); Http::CodeStats::ResponseTimingInfo info{config_.scope_, cluster_->statsScope(), EMPTY_STRING, response_time, - upstream_request_->upstream_canary_, + upstream_request.upstream_canary_, internal_request, route_entry_->virtualHost().name(), request_vcluster_ ? request_vcluster_->name() : EMPTY_STRING, - zone_name, - upstreamZone(upstream_request_->upstream_host_)}; + config_.local_info_.zoneName(), + upstreamZone(upstream_request.upstream_host_)}; code_stats.chargeResponseTiming(info); @@ -823,12 +905,12 @@ void Filter::onUpstreamComplete() { cluster_->statsScope(), alt_stat_prefix_, response_time, - upstream_request_->upstream_canary_, + upstream_request.upstream_canary_, internal_request, EMPTY_STRING, EMPTY_STRING, - zone_name, - upstreamZone(upstream_request_->upstream_host_)}; + config_.local_info_.zoneName(), + upstreamZone(upstream_request.upstream_host_)}; code_stats.chargeResponseTiming(info); } @@ -847,17 +929,18 @@ bool Filter::setupRetry(bool end_stream) { return false; } + ASSERT(upstream_requests_.size() == 1); ENVOY_STREAM_LOG(debug, "performing retry", *callbacks_); if (!end_stream) { - upstream_request_->resetStream(); + upstream_requests_.front()->resetStream(); } - upstream_request_.reset(); - callbacks_->streamInfo().resetUpstreamTimings(); + upstream_requests_.front()->removeFromList(upstream_requests_); + return true; } -bool Filter::setupRedirect(const Http::HeaderMap& headers) { +bool Filter::setupRedirect(const Http::HeaderMap& headers, UpstreamRequest& upstream_request) { ENVOY_STREAM_LOG(debug, "attempting internal redirect", *callbacks_); const Http::HeaderEntry* location = headers.Location(); @@ -871,7 +954,7 @@ bool Filter::setupRedirect(const Http::HeaderMap& headers) { // completion here and check it in onDestroy. This is annoyingly complicated but is better than // needlessly resetting streams. attempting_internal_redirect_with_complete_stream_ = - upstream_request_->stream_info_.lastUpstreamRxByteReceived() && downstream_end_stream_; + upstream_request.upstream_timing_.last_upstream_rx_byte_received_ && downstream_end_stream_; // As with setupRetry, redirects are not supported for streaming requests yet. if (downstream_end_stream_ && @@ -906,34 +989,34 @@ void Filter::doRetry() { } ASSERT(response_timeout_ || timeout_.global_timeout_.count() == 0); - ASSERT(!upstream_request_); - upstream_request_ = std::make_unique(*this, *conn_pool); - upstream_request_->encodeHeaders(!callbacks_->decodingBuffer() && !downstream_trailers_); + UpstreamRequestPtr upstream_request = std::make_unique(*this, *conn_pool); + upstream_request->moveIntoList(std::move(upstream_request), upstream_requests_); + upstream_requests_.front()->encodeHeaders(!callbacks_->decodingBuffer() && !downstream_trailers_); // It's possible we got immediately reset. - if (upstream_request_) { + if (upstream_requests_.size() == 1) { if (callbacks_->decodingBuffer()) { // If we are doing a retry we need to make a copy. Buffer::OwnedImpl copy(*callbacks_->decodingBuffer()); - upstream_request_->encodeData(copy, !downstream_trailers_); + upstream_requests_.front()->encodeData(copy, !downstream_trailers_); } if (downstream_trailers_) { - upstream_request_->encodeTrailers(*downstream_trailers_); + upstream_requests_.front()->encodeTrailers(*downstream_trailers_); } } } Filter::UpstreamRequest::UpstreamRequest(Filter& parent, Http::ConnectionPool::Instance& pool) : parent_(parent), conn_pool_(pool), grpc_rq_success_deferred_(false), - stream_info_(pool.protocol(), parent_.callbacks_->dispatcher().timeSystem()), + stream_info_(pool.protocol(), parent_.callbacks_->dispatcher().timeSource()), calling_encode_headers_(false), upstream_canary_(false), encode_complete_(false), - encode_trailers_(false) { + encode_trailers_(false), create_per_try_timeout_on_request_complete_(false) { if (parent_.config_.start_child_span_) { span_ = parent_.callbacks_->activeSpan().spawnChild( parent_.callbacks_->tracingConfig(), "router " + parent.cluster_->name() + " egress", parent.timeSource().systemTime()); - span_->setTag(Tracing::Tags::get().COMPONENT, Tracing::Tags::get().PROXY); + span_->setTag(Tracing::Tags::get().Component, Tracing::Tags::get().Proxy); } stream_info_.healthCheck(parent_.callbacks_->streamInfo().healthCheck()); @@ -950,40 +1033,44 @@ Filter::UpstreamRequest::~UpstreamRequest() { } clearRequestEncoder(); + stream_info_.setUpstreamTiming(upstream_timing_); stream_info_.onRequestComplete(); for (const auto& upstream_log : parent_.config_.upstream_logs_) { - upstream_log->log(parent_.downstream_headers_, upstream_headers_, upstream_trailers_, - stream_info_); + upstream_log->log(parent_.downstream_headers_, upstream_headers_.get(), + upstream_trailers_.get(), stream_info_); } } void Filter::UpstreamRequest::decode100ContinueHeaders(Http::HeaderMapPtr&& headers) { ASSERT(100 == Http::Utility::getResponseStatus(*headers)); - parent_.onUpstream100ContinueHeaders(std::move(headers)); + parent_.onUpstream100ContinueHeaders(std::move(headers), *this); } void Filter::UpstreamRequest::decodeHeaders(Http::HeaderMapPtr&& headers, bool end_stream) { // TODO(rodaine): This is actually measuring after the headers are parsed and not the first byte. - stream_info_.onFirstUpstreamRxByteReceived(); - parent_.callbacks_->streamInfo().onFirstUpstreamRxByteReceived(); + upstream_timing_.onFirstUpstreamRxByteReceived(parent_.callbacks_->dispatcher().timeSource()); maybeEndDecode(end_stream); - upstream_headers_ = headers.get(); + if (!parent_.config_.upstream_logs_.empty()) { + upstream_headers_ = std::make_unique(*headers); + } const uint64_t response_code = Http::Utility::getResponseStatus(*headers); stream_info_.response_code_ = static_cast(response_code); - parent_.onUpstreamHeaders(response_code, std::move(headers), end_stream); + parent_.onUpstreamHeaders(response_code, std::move(headers), *this, end_stream); } void Filter::UpstreamRequest::decodeData(Buffer::Instance& data, bool end_stream) { maybeEndDecode(end_stream); stream_info_.addBytesReceived(data.length()); - parent_.onUpstreamData(data, end_stream); + parent_.onUpstreamData(data, *this, end_stream); } void Filter::UpstreamRequest::decodeTrailers(Http::HeaderMapPtr&& trailers) { maybeEndDecode(true); - upstream_trailers_ = trailers.get(); - parent_.onUpstreamTrailers(std::move(trailers)); + if (!parent_.config_.upstream_logs_.empty()) { + upstream_trailers_ = std::make_unique(*trailers); + } + parent_.onUpstreamTrailers(std::move(trailers), *this); } void Filter::UpstreamRequest::decodeMetadata(Http::MetadataMapPtr&& metadata_map) { @@ -992,8 +1079,7 @@ void Filter::UpstreamRequest::decodeMetadata(Http::MetadataMapPtr&& metadata_map void Filter::UpstreamRequest::maybeEndDecode(bool end_stream) { if (end_stream) { - stream_info_.onLastUpstreamRxByteReceived(); - parent_.callbacks_->streamInfo().onLastUpstreamRxByteReceived(); + upstream_timing_.onLastUpstreamRxByteReceived(parent_.callbacks_->dispatcher().timeSource()); } } @@ -1029,8 +1115,7 @@ void Filter::UpstreamRequest::encodeData(Buffer::Instance& data, bool end_stream stream_info_.addBytesSent(data.length()); request_encoder_->encodeData(data, end_stream); if (end_stream) { - stream_info_.onLastUpstreamTxByteSent(); - parent_.callbacks_->streamInfo().onLastUpstreamTxByteSent(); + upstream_timing_.onLastUpstreamTxByteSent(parent_.callbacks_->dispatcher().timeSource()); } } } @@ -1045,17 +1130,16 @@ void Filter::UpstreamRequest::encodeTrailers(const Http::HeaderMap& trailers) { } else { ENVOY_STREAM_LOG(trace, "proxying trailers", *parent_.callbacks_); request_encoder_->encodeTrailers(trailers); - stream_info_.onLastUpstreamTxByteSent(); - parent_.callbacks_->streamInfo().onLastUpstreamTxByteSent(); + upstream_timing_.onLastUpstreamTxByteSent(parent_.callbacks_->dispatcher().timeSource()); } } -void Filter::UpstreamRequest::onResetStream(Http::StreamResetReason reason) { +void Filter::UpstreamRequest::onResetStream(Http::StreamResetReason reason, + absl::string_view transport_failure_reason) { clearRequestEncoder(); if (!calling_encode_headers_) { stream_info_.setResponseFlag(parent_.streamResetReasonToResponseFlag(reason)); - parent_.onUpstreamReset(UpstreamResetType::Reset, - absl::optional(reason)); + parent_.onUpstreamReset(reason, transport_failure_reason, *this); } else { deferred_reset_reason_ = reason; } @@ -1096,9 +1180,7 @@ void Filter::UpstreamRequest::onPerTryTimeout() { } resetStream(); stream_info_.setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout); - parent_.onUpstreamReset( - UpstreamResetType::PerTryTimeout, - absl::optional(Http::StreamResetReason::LocalReset)); + parent_.onPerTryTimeout(*this); } else { ENVOY_STREAM_LOG(debug, "ignored upstream per try timeout due to already started downstream response", @@ -1107,6 +1189,7 @@ void Filter::UpstreamRequest::onPerTryTimeout() { } void Filter::UpstreamRequest::onPoolFailure(Http::ConnectionPool::PoolFailureReason reason, + absl::string_view transport_failure_reason, Upstream::HostDescriptionConstSharedPtr host) { Http::StreamResetReason reset_reason = Http::StreamResetReason::ConnectionFailure; switch (reason) { @@ -1120,7 +1203,7 @@ void Filter::UpstreamRequest::onPoolFailure(Http::ConnectionPool::PoolFailureRea // Mimic an upstream reset. onUpstreamHostSelected(host); - onResetStream(reset_reason); + onResetStream(reset_reason, transport_failure_reason); } void Filter::UpstreamRequest::onPoolReady(Http::StreamEncoder& request_encoder, @@ -1131,7 +1214,11 @@ void Filter::UpstreamRequest::onPoolReady(Http::StreamEncoder& request_encoder, onUpstreamHostSelected(host); request_encoder.getStream().addCallbacks(*this); - setupPerTryTimeout(); + if (parent_.downstream_end_stream_) { + setupPerTryTimeout(); + } else { + create_per_try_timeout_on_request_complete_ = true; + } conn_pool_stream_handle_ = nullptr; setRequestEncoder(request_encoder); @@ -1144,8 +1231,7 @@ void Filter::UpstreamRequest::onPoolReady(Http::StreamEncoder& request_encoder, span_->injectContext(*parent_.downstream_headers_); } - stream_info_.onFirstUpstreamTxByteSent(); - parent_.callbacks_->streamInfo().onFirstUpstreamTxByteSent(); + upstream_timing_.onFirstUpstreamTxByteSent(parent_.callbacks_->dispatcher().timeSource()); request_encoder.encodeHeaders(*parent_.downstream_headers_, !buffered_request_body_ && encode_complete_ && !encode_trailers_); calling_encode_headers_ = false; @@ -1156,7 +1242,7 @@ void Filter::UpstreamRequest::onPoolReady(Http::StreamEncoder& request_encoder, // specific example of a case where this happens is if we try to encode a total header size that // is too big in HTTP/2 (64K currently). if (deferred_reset_reason_) { - onResetStream(deferred_reset_reason_.value()); + onResetStream(deferred_reset_reason_.value(), absl::string_view()); } else { if (buffered_request_body_) { stream_info_.addBytesSent(buffered_request_body_->length()); @@ -1168,8 +1254,7 @@ void Filter::UpstreamRequest::onPoolReady(Http::StreamEncoder& request_encoder, } if (encode_complete_) { - stream_info_.onLastUpstreamTxByteSent(); - parent_.callbacks_->streamInfo().onLastUpstreamTxByteSent(); + upstream_timing_.onLastUpstreamTxByteSent(parent_.callbacks_->dispatcher().timeSource()); } } } @@ -1201,14 +1286,19 @@ void Filter::UpstreamRequest::clearRequestEncoder() { void Filter::UpstreamRequest::DownstreamWatermarkManager::onAboveWriteBufferHighWatermark() { ASSERT(parent_.request_encoder_); + ASSERT(parent_.parent_.upstream_requests_.size() == 1); // The downstream connection is overrun. Pause reads from upstream. + // If there are multiple calls to readDisable either the codec (H2) or the underlying + // Network::Connection (H1) will handle reference counting. parent_.parent_.cluster_->stats().upstream_flow_control_paused_reading_total_.inc(); parent_.request_encoder_->getStream().readDisable(true); } void Filter::UpstreamRequest::DownstreamWatermarkManager::onBelowWriteBufferLowWatermark() { ASSERT(parent_.request_encoder_); - // The downstream connection has buffer available. Resume reads from upstream. + ASSERT(parent_.parent_.upstream_requests_.size() == 1); + // One source of connection blockage has buffer available. Pass this on to the stream, which + // will resume reads if this was the last remaining high watermark. parent_.parent_.cluster_->stats().upstream_flow_control_resumed_reading_total_.inc(); parent_.request_encoder_->getStream().readDisable(false); } diff --git a/source/common/router/router.h b/source/common/router/router.h index 9fcafdad5b424..a2cb522f4b759 100644 --- a/source/common/router/router.h +++ b/source/common/router/router.h @@ -21,6 +21,7 @@ #include "common/buffer/watermark_buffer.h" #include "common/common/hash.h" #include "common/common/hex.h" +#include "common/common/linked_object.h" #include "common/common/logger.h" #include "common/config/well_known_names.h" #include "common/http/utility.h" @@ -40,7 +41,8 @@ namespace Router { COUNTER(no_cluster) \ COUNTER(rq_redirect) \ COUNTER(rq_direct_response) \ - COUNTER(rq_total) + COUNTER(rq_total) \ + COUNTER(rq_reset_after_downstream_response_started) // clang-format on /** @@ -112,7 +114,7 @@ class FilterConfig { context.runtime(), context.random(), std::move(shadow_writer), PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, dynamic_stats, true), config.start_child_span(), config.suppress_envoy_headers(), - context.api().timeSystem(), context.httpContext()) { + context.api().timeSource(), context.httpContext()) { for (const auto& upstream_log : config.upstream_log()) { upstream_logs_.push_back(AccessLog::AccessLogFactory::fromProto(upstream_log, context)); } @@ -237,6 +239,10 @@ class Filter : Logger::Loggable, return retry_state_->hostSelectionMaxAttempts(); } + Network::Socket::OptionsSharedPtr upstreamSocketOptions() const override { + return callbacks_->getUpstreamSocketOptions(); + } + /** * Set a computed cookie to be sent with the downstream headers. * @param key supplies the size of the cookie @@ -267,7 +273,8 @@ class Filter : Logger::Loggable, private: struct UpstreamRequest : public Http::StreamDecoder, public Http::StreamCallbacks, - public Http::ConnectionPool::Callbacks { + public Http::ConnectionPool::Callbacks, + public LinkedObject { UpstreamRequest(Filter& parent, Http::ConnectionPool::Instance& pool); ~UpstreamRequest(); @@ -294,21 +301,25 @@ class Filter : Logger::Loggable, void decodeMetadata(Http::MetadataMapPtr&& metadata_map) override; // Http::StreamCallbacks - void onResetStream(Http::StreamResetReason reason) override; + void onResetStream(Http::StreamResetReason reason, + absl::string_view transport_failure_reason) override; void onAboveWriteBufferHighWatermark() override { disableDataFromDownstream(); } void onBelowWriteBufferLowWatermark() override { enableDataFromDownstream(); } void disableDataFromDownstream() { + ASSERT(parent_.upstream_requests_.size() == 1); parent_.cluster_->stats().upstream_flow_control_backed_up_total_.inc(); parent_.callbacks_->onDecoderFilterAboveWriteBufferHighWatermark(); } void enableDataFromDownstream() { + ASSERT(parent_.upstream_requests_.size() == 1); parent_.cluster_->stats().upstream_flow_control_drained_total_.inc(); parent_.callbacks_->onDecoderFilterBelowWriteBufferLowWatermark(); } // Http::ConnectionPool::Callbacks void onPoolFailure(Http::ConnectionPool::PoolFailureReason reason, + absl::string_view transport_failure_reason, Upstream::HostDescriptionConstSharedPtr host) override; void onPoolReady(Http::StreamEncoder& request_encoder, Upstream::HostDescriptionConstSharedPtr host) override; @@ -340,19 +351,23 @@ class Filter : Logger::Loggable, DownstreamWatermarkManager downstream_watermark_manager_{*this}; Tracing::SpanPtr span_; StreamInfo::StreamInfoImpl stream_info_; - Http::HeaderMap* upstream_headers_{}; - Http::HeaderMap* upstream_trailers_{}; + StreamInfo::UpstreamTiming upstream_timing_; + // Copies of upstream headers/trailers. These are only set if upstream + // access logging is configured. + Http::HeaderMapPtr upstream_headers_; + Http::HeaderMapPtr upstream_trailers_; bool calling_encode_headers_ : 1; bool upstream_canary_ : 1; bool encode_complete_ : 1; bool encode_trailers_ : 1; + // Tracks whether we deferred a per try timeout because the downstream request + // had not been completed yet. + bool create_per_try_timeout_on_request_complete_ : 1; }; typedef std::unique_ptr UpstreamRequestPtr; - enum class UpstreamResetType { Reset, GlobalTimeout, PerTryTimeout }; - StreamInfo::ResponseFlag streamResetReasonToResponseFlag(Http::StreamResetReason reset_reason); static const std::string upstreamZone(Upstream::HostDescriptionConstSharedPtr upstream_host); @@ -369,23 +384,36 @@ class Filter : Logger::Loggable, Upstream::ResourcePriority priority) PURE; Http::ConnectionPool::Instance* getConnPool(); void maybeDoShadowing(); + bool maybeRetryReset(Http::StreamResetReason reset_reason, UpstreamRequest& upstream_request); + void onPerTryTimeout(UpstreamRequest& upstream_request); void onRequestComplete(); void onResponseTimeout(); - void onUpstream100ContinueHeaders(Http::HeaderMapPtr&& headers); - void onUpstreamHeaders(uint64_t response_code, Http::HeaderMapPtr&& headers, bool end_stream); - void onUpstreamData(Buffer::Instance& data, bool end_stream); - void onUpstreamTrailers(Http::HeaderMapPtr&& trailers); + void onUpstream100ContinueHeaders(Http::HeaderMapPtr&& headers, + UpstreamRequest& upstream_request); + // Handle an upstream request aborted due to a local timeout. + void onUpstreamTimeoutAbort(StreamInfo::ResponseFlag response_flag, absl::string_view details); + // Handle an "aborted" upstream request, meaning we didn't see response + // headers (e.g. due to a reset). Handles recording stats and responding + // downstream if appropriate. + void onUpstreamAbort(Http::Code code, StreamInfo::ResponseFlag response_flag, + absl::string_view body, bool dropped, absl::string_view details); + void onUpstreamHeaders(uint64_t response_code, Http::HeaderMapPtr&& headers, + UpstreamRequest& upstream_request, bool end_stream); + void onUpstreamData(Buffer::Instance& data, UpstreamRequest& upstream_request, bool end_stream); + void onUpstreamTrailers(Http::HeaderMapPtr&& trailers, UpstreamRequest& upstream_request); void onUpstreamMetadata(Http::MetadataMapPtr&& metadata_map); - void onUpstreamComplete(); - void onUpstreamReset(UpstreamResetType type, - const absl::optional& reset_reason); + void onUpstreamComplete(UpstreamRequest& upstream_request); + void onUpstreamReset(Http::StreamResetReason reset_reason, absl::string_view transport_failure, + UpstreamRequest& upstream_request); void sendNoHealthyUpstreamResponse(); bool setupRetry(bool end_stream); - bool setupRedirect(const Http::HeaderMap& headers); + bool setupRedirect(const Http::HeaderMap& headers, UpstreamRequest& upstream_request); + void updateOutlierDetection(Http::Code code, UpstreamRequest& upstream_request); void doRetry(); // Called immediately after a non-5xx header is received from upstream, performs stats accounting // and handle difference between gRPC and non-gRPC requests. - void handleNon5xxResponseHeaders(const Http::HeaderMap& headers, bool end_stream); + void handleNon5xxResponseHeaders(const Http::HeaderMap& headers, + UpstreamRequest& upstream_request, bool end_stream); TimeSource& timeSource() { return config_.timeSource(); } Http::Context& httpContext() { return config_.http_context_; } @@ -399,7 +427,7 @@ class Filter : Logger::Loggable, Event::TimerPtr response_timeout_; FilterUtility::TimeoutData timeout_; Http::Code timeout_response_code_ = Http::Code::GatewayTimeout; - UpstreamRequestPtr upstream_request_; + std::list upstream_requests_; bool grpc_request_{}; Http::HeaderMap* downstream_headers_{}; Http::HeaderMap* downstream_trailers_{}; diff --git a/source/common/router/router_ratelimit.cc b/source/common/router/router_ratelimit.cc index f1f084265cd5a..f8827836367fa 100644 --- a/source/common/router/router_ratelimit.cc +++ b/source/common/router/router_ratelimit.cc @@ -40,7 +40,8 @@ bool RequestHeadersAction::populateDescriptor(const Router::RouteEntry&, return false; } - descriptor.entries_.push_back({descriptor_key_, header_value->value().c_str()}); + descriptor.entries_.push_back( + {descriptor_key_, std::string(header_value->value().getStringView())}); return true; } diff --git a/source/common/router/shadow_writer_impl.cc b/source/common/router/shadow_writer_impl.cc index 0d1f8a25ecef5..1597fbb8486b7 100644 --- a/source/common/router/shadow_writer_impl.cc +++ b/source/common/router/shadow_writer_impl.cc @@ -23,11 +23,12 @@ void ShadowWriterImpl::shadow(const std::string& cluster, Http::MessagePtr&& req ASSERT(!request->headers().Host()->value().empty()); // Switch authority to add a shadow postfix. This allows upstream logging to make more sense. - auto parts = StringUtil::splitToken(request->headers().Host()->value().c_str(), ":"); - ASSERT(parts.size() > 0 && parts.size() <= 2); + auto parts = StringUtil::splitToken(request->headers().Host()->value().getStringView(), ":"); + ASSERT(!parts.empty() && parts.size() <= 2); request->headers().Host()->value( - parts.size() == 2 ? absl::StrJoin(parts, "-shadow:") - : absl::StrCat(request->headers().Host()->value().c_str(), "-shadow")); + parts.size() == 2 + ? absl::StrJoin(parts, "-shadow:") + : absl::StrCat(request->headers().Host()->value().getStringView(), "-shadow")); // This is basically fire and forget. We don't handle cancelling. cm_.httpAsyncClientForCluster(cluster).send( std::move(request), *this, Http::AsyncClient::RequestOptions().setTimeout(timeout)); diff --git a/source/common/router/vhds.cc b/source/common/router/vhds.cc new file mode 100644 index 0000000000000..07bc3c2cbdf27 --- /dev/null +++ b/source/common/router/vhds.cc @@ -0,0 +1,74 @@ +#include "common/router/vhds.h" + +#include +#include +#include +#include + +#include "envoy/api/v2/rds.pb.validate.h" +#include "envoy/api/v2/route/route.pb.validate.h" + +#include "common/common/assert.h" +#include "common/common/fmt.h" +#include "common/config/utility.h" +#include "common/protobuf/utility.h" +#include "common/router/config_impl.h" + +namespace Envoy { +namespace Router { + +// Implements callbacks to handle DeltaDiscovery protocol for VirtualHostDiscoveryService +VhdsSubscription::VhdsSubscription(RouteConfigUpdatePtr& config_update_info, + Server::Configuration::FactoryContext& factory_context, + const std::string& stat_prefix, + std::unordered_set& route_config_providers, + SubscriptionFactoryFunction factory_function) + : config_update_info_(config_update_info), + init_target_(fmt::format("VhdsConfigSubscription {}", config_update_info_->routeConfigName()), + [this]() { subscription_->start({}, *this); }), + scope_(factory_context.scope().createScope(stat_prefix + "vhds." + + config_update_info_->routeConfigName() + ".")), + stats_({ALL_VHDS_STATS(POOL_COUNTER(*scope_))}), + route_config_providers_(route_config_providers) { + Envoy::Config::Utility::checkLocalInfo("vhds", factory_context.localInfo()); + const auto& config_source = config_update_info_->routeConfiguration() + .vhds() + .config_source() + .api_config_source() + .api_type(); + if (config_source != envoy::api::v2::core::ApiConfigSource::DELTA_GRPC) { + throw EnvoyException("vhds: only 'DELTA_GRPC' is supported as an api_type."); + } + + subscription_ = factory_function( + config_update_info_->routeConfiguration().vhds().config_source(), factory_context.localInfo(), + factory_context.dispatcher(), factory_context.clusterManager(), factory_context.random(), + *scope_, "none", "envoy.api.v2.VirtualHostDiscoveryService.DeltaVirtualHosts", + Grpc::Common::typeUrl(envoy::api::v2::route::VirtualHost().GetDescriptor()->full_name()), + factory_context.api()); +} + +void VhdsSubscription::onConfigUpdateFailed(const EnvoyException*) { + // We need to allow server startup to continue, even if we have a bad + // config. + init_target_.ready(); +} + +void VhdsSubscription::onConfigUpdate( + const Protobuf::RepeatedPtrField& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& version_info) { + if (config_update_info_->onVhdsUpdate(added_resources, removed_resources, version_info)) { + stats_.config_reload_.inc(); + ENVOY_LOG(debug, "vhds: loading new configuration: config_name={} hash={}", + config_update_info_->routeConfigName(), config_update_info_->configHash()); + for (auto* provider : route_config_providers_) { + provider->onConfigUpdate(); + } + } + + init_target_.ready(); +} + +} // namespace Router +} // namespace Envoy diff --git a/source/common/router/vhds.h b/source/common/router/vhds.h new file mode 100644 index 0000000000000..704d021ef5b3b --- /dev/null +++ b/source/common/router/vhds.h @@ -0,0 +1,82 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include "envoy/api/v2/rds.pb.h" +#include "envoy/api/v2/route/route.pb.h" +#include "envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.pb.h" +#include "envoy/config/subscription.h" +#include "envoy/http/codes.h" +#include "envoy/local_info/local_info.h" +#include "envoy/router/rds.h" +#include "envoy/router/route_config_update_receiver.h" +#include "envoy/server/filter_config.h" +#include "envoy/singleton/instance.h" +#include "envoy/stats/scope.h" +#include "envoy/thread_local/thread_local.h" + +#include "common/common/logger.h" +#include "common/config/subscription_factory.h" +#include "common/init/target_impl.h" +#include "common/protobuf/utility.h" + +namespace Envoy { +namespace Router { + +// clang-format off +#define ALL_VHDS_STATS(COUNTER) \ + COUNTER(config_reload) \ + COUNTER(update_empty) + +// clang-format on + +struct VhdsStats { + ALL_VHDS_STATS(GENERATE_COUNTER_STRUCT) +}; + +typedef std::unique_ptr (*SubscriptionFactoryFunction)( + const envoy::api::v2::core::ConfigSource&, const LocalInfo::LocalInfo&, Event::Dispatcher&, + Upstream::ClusterManager&, Envoy::Runtime::RandomGenerator&, Stats::Scope&, const std::string&, + const std::string&, absl::string_view, Api::Api&); + +class VhdsSubscription : Envoy::Config::SubscriptionCallbacks, + Logger::Loggable { +public: + VhdsSubscription(RouteConfigUpdatePtr& config_update_info, + Server::Configuration::FactoryContext& factory_context, + const std::string& stat_prefix, + std::unordered_set& route_config_providers, + SubscriptionFactoryFunction factory_function = + Envoy::Config::SubscriptionFactory::subscriptionFromConfigSource); + ~VhdsSubscription() override { init_target_.ready(); } + + // Config::SubscriptionCallbacks + // TODO(fredlas) deduplicate + void onConfigUpdate(const Protobuf::RepeatedPtrField&, + const std::string&) override { + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } + void onConfigUpdate(const Protobuf::RepeatedPtrField&, + const Protobuf::RepeatedPtrField&, const std::string&) override; + void onConfigUpdateFailed(const EnvoyException* e) override; + std::string resourceName(const ProtobufWkt::Any& resource) override { + return MessageUtil::anyConvert(resource).name(); + } + void registerInitTargetWithInitManager(Init::Manager& m) { m.add(init_target_); } + + RouteConfigUpdatePtr& config_update_info_; + std::unique_ptr subscription_; + Init::TargetImpl init_target_; + Stats::ScopePtr scope_; + VhdsStats stats_; + std::unordered_set& route_config_providers_; +}; + +using VhdsSubscriptionPtr = std::unique_ptr; + +} // namespace Router +} // namespace Envoy diff --git a/source/common/runtime/BUILD b/source/common/runtime/BUILD index 7fecd67d8a967..91874797b0242 100644 --- a/source/common/runtime/BUILD +++ b/source/common/runtime/BUILD @@ -10,8 +10,14 @@ envoy_package() envoy_cc_library( name = "runtime_lib", - srcs = ["runtime_impl.cc"], - hdrs = ["runtime_impl.h"], + srcs = [ + "runtime_features.cc", + "runtime_impl.cc", + ], + hdrs = [ + "runtime_features.h", + "runtime_impl.h", + ], external_deps = ["ssl"], deps = [ "//include/envoy/event:dispatcher_interface", diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc new file mode 100644 index 0000000000000..7ea7af077bc58 --- /dev/null +++ b/source/common/runtime/runtime_features.cc @@ -0,0 +1,67 @@ +#include "common/runtime/runtime_features.h" + +namespace Envoy { +namespace Runtime { + +// Add additional features here to enable the new code paths by default. +// +// Per documentation in CONTRIBUTING.md is is expected that new high risk code paths be guarded +// by runtime feature guards, i.e +// +// if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.my_feature_name")) { +// [new code path] +// else { +// [old_code_path] +// } +// +// Runtime features are false by default, so the old code path is exercised. +// To make a runtime feature true by default, add it to the array below. +// New features should be true-by-default for an Envoy release cycle before the +// old code path is removed. +// +// If issues are found that that require a runtime feature to be disabled, it should be reported +// ASAP by filing a bug on github. Overriding non-buggy code is strongly discouraged to avoid the +// problem of the bugs being found after the old code path has been removed. +constexpr const char* runtime_features[] = { + // Enabled + "envoy.reloadable_features.test_feature_true", +}; + +// This is a list of configuration fields which are disallowed by default in Envoy +// +// By default, use of proto fields marked as deprecated in their api/.../*.proto file will result +// in a logged warning, so that Envoy users have a warning that they are using deprecated fields. +// +// During the Envoy release cycle, the maintainer team runs a script which will upgrade currently +// deprecated features to be disallowed (adding them to the list below) at which point use of said +// feature will cause a hard-failure (ProtoValidationException) instead of a logged warning. +// +// The release cycle after a feature has been marked disallowed, it is officially removable, and +// the maintainer team will run a script creating a tracking issue for proto and code clean up. +// +// TODO(alyssawilk) handle deprecation of reloadable_features and update the above comment. Ideally +// runtime override of a deprecated feature will log(warn) on runtime-load if not deprecated +// and hard-fail once it has been deprecated. + +constexpr const char* disallowed_features[] = { + // Acts as both a test entry for deprecated.proto and a marker for the Envoy + // deprecation scripts. + "envoy.deprecated_features.deprecated.proto:is_deprecated_fatal", + "envoy.deprecated_features.config_source.proto:UNSUPPORTED_REST_LEGACY", + "envoy.deprecated_features.ext_authz.proto:use_alpha", + "envoy.deprecated_features.route.proto:enabled", + "envoy.deprecated_features.fault.proto:type", + "envoy.deprecated_features.route.proto:runtime_key", +}; + +RuntimeFeatures::RuntimeFeatures() { + for (auto& feature : disallowed_features) { + disallowed_features_.insert(feature); + } + for (auto& feature : runtime_features) { + enabled_features_.insert(feature); + } +} + +} // namespace Runtime +} // namespace Envoy diff --git a/source/common/runtime/runtime_features.h b/source/common/runtime/runtime_features.h new file mode 100644 index 0000000000000..cac8726c3f7af --- /dev/null +++ b/source/common/runtime/runtime_features.h @@ -0,0 +1,39 @@ +#pragma once + +#include + +#include "common/singleton/const_singleton.h" + +#include "absl/container/flat_hash_set.h" + +namespace Envoy { +namespace Runtime { + +class RuntimeFeatures { +public: + RuntimeFeatures(); + + // This tracks proto configured features, to determine if a given deprecated + // feature is still allowed, or has been made fatal-by-default per the Envoy + // deprecation process. + bool disallowedByDefault(absl::string_view feature) const { + return disallowed_features_.find(feature) != disallowed_features_.end(); + } + + // This tracks config-guarded code paths, to determine if a given + // runtime-guarded-code-path has the new code run by default or the old code. + bool enabledByDefault(absl::string_view feature) const { + return enabled_features_.find(feature) != enabled_features_.end(); + } + +private: + friend class RuntimeFeaturesPeer; + + absl::flat_hash_set disallowed_features_; + absl::flat_hash_set enabled_features_; +}; + +using RuntimeFeaturesDefaults = ConstSingleton; + +} // namespace Runtime +} // namespace Envoy diff --git a/source/common/runtime/runtime_impl.cc b/source/common/runtime/runtime_impl.cc index 54df51f36daec..2fe56fe65108d 100644 --- a/source/common/runtime/runtime_impl.cc +++ b/source/common/runtime/runtime_impl.cc @@ -14,12 +14,24 @@ #include "common/common/utility.h" #include "common/filesystem/directory.h" #include "common/protobuf/utility.h" +#include "common/runtime/runtime_features.h" +#include "absl/strings/match.h" #include "openssl/rand.h" namespace Envoy { namespace Runtime { +bool runtimeFeatureEnabled(absl::string_view feature) { + ASSERT(absl::StartsWith(feature, "envoy.reloadable_features")); + if (Runtime::LoaderSingleton::getExisting()) { + return Runtime::LoaderSingleton::getExisting()->snapshot().runtimeFeatureEnabled(feature); + } + ENVOY_LOG_TO_LOGGER(Envoy::Logger::Registry::getLog(Envoy::Logger::Id::runtime), warn, + "Unable to use runtime singleton for feature {}", feature); + return RuntimeFeaturesDefaults::get().enabledByDefault(feature); +} + const size_t RandomGeneratorImpl::UUID_LENGTH = 36; uint64_t RandomGeneratorImpl::random() { @@ -144,6 +156,35 @@ std::string RandomGeneratorImpl::uuid() { return std::string(uuid, UUID_LENGTH); } +bool SnapshotImpl::deprecatedFeatureEnabled(const std::string& key) const { + bool allowed = false; + // If the value is not explicitly set as a runtime boolean, the default value is based on + // disallowedByDefault. + if (!getBoolean(key, allowed)) { + allowed = !RuntimeFeaturesDefaults::get().disallowedByDefault(key); + } + + if (!allowed) { + // If either disallowed by default or configured off, the feature is not enabled. + return false; + } + // The feature is allowed. It is assumed this check is called when the feature + // is about to be used, so increment the feature use stat. + stats_.deprecated_feature_use_.inc(); + return true; +} + +bool SnapshotImpl::runtimeFeatureEnabled(absl::string_view key) const { + bool enabled = false; + // If the value is not explicitly set as a runtime boolean, the default value is based on + // disallowedByDefault. + if (!getBoolean(key, enabled)) { + enabled = RuntimeFeaturesDefaults::get().enabledByDefault(key); + } + + return enabled; +} + bool SnapshotImpl::featureEnabled(const std::string& key, uint64_t default_value, uint64_t random_value, uint64_t num_buckets) const { return random_value % num_buckets < std::min(getInteger(key, default_value), num_buckets); @@ -184,23 +225,27 @@ bool SnapshotImpl::featureEnabled(const std::string& key, const envoy::type::FractionalPercent& default_value, uint64_t random_value) const { const auto& entry = values_.find(key); - uint64_t numerator, denominator; + envoy::type::FractionalPercent percent; if (entry != values_.end() && entry->second.fractional_percent_value_.has_value()) { - numerator = entry->second.fractional_percent_value_->numerator(); - denominator = ProtobufPercentHelper::fractionalPercentDenominatorToInt( - entry->second.fractional_percent_value_->denominator()); + percent = entry->second.fractional_percent_value_.value(); } else if (entry != values_.end() && entry->second.uint_value_.has_value()) { - // The runtime value must have been specified as an integer rather than a fractional percent - // proto. To preserve legacy semantics, we'll assume this represents a percentage. - numerator = entry->second.uint_value_.value(); - denominator = 100; + // Check for > 100 because the runtime value is assumed to be specified as + // an integer, and it also ensures that truncating the uint64_t runtime + // value into a uint32_t percent numerator later is safe + if (entry->second.uint_value_.value() > 100) { + return true; + } + + // The runtime value was specified as an integer rather than a fractional + // percent proto. To preserve legacy semantics, we treat it as a percentage + // (i.e. denominator of 100). + percent.set_numerator(entry->second.uint_value_.value()); + percent.set_denominator(envoy::type::FractionalPercent::HUNDRED); } else { - numerator = default_value.numerator(); - denominator = - ProtobufPercentHelper::fractionalPercentDenominatorToInt(default_value.denominator()); + percent = default_value; } - return random_value % denominator < numerator; + return ProtobufPercentHelper::evaluateFractionalPercent(percent, random_value); } uint64_t SnapshotImpl::getInteger(const std::string& key, uint64_t default_value) const { @@ -212,13 +257,22 @@ uint64_t SnapshotImpl::getInteger(const std::string& key, uint64_t default_value } } +bool SnapshotImpl::getBoolean(absl::string_view key, bool& value) const { + auto entry = values_.find(key); + if (entry != values_.end() && entry->second.bool_value_.has_value()) { + value = entry->second.bool_value_.value(); + return true; + } + return false; +} + const std::vector& SnapshotImpl::getLayers() const { return layers_; } SnapshotImpl::SnapshotImpl(RandomGenerator& generator, RuntimeStats& stats, std::vector&& layers) - : layers_{std::move(layers)}, generator_{generator} { + : layers_{std::move(layers)}, generator_{generator}, stats_{stats} { for (const auto& layer : layers_) { for (const auto& kv : layer->values()) { values_.erase(kv.first); @@ -239,9 +293,29 @@ SnapshotImpl::Entry SnapshotImpl::createEntry(const std::string& value) { return entry; } +SnapshotImpl::Entry SnapshotImpl::createEntry(const ProtobufWkt::Value& value) { + // This isn't the smartest way to do it; we're round-tripping via YAML, this should be optimized + // if runtime parsing becomes performance sensitive. + return createEntry(MessageUtil::getYamlStringFromMessage(value, false, false)); +} + +bool SnapshotImpl::parseEntryBooleanValue(Entry& entry) { + absl::string_view stripped = entry.raw_string_value_; + stripped = absl::StripAsciiWhitespace(stripped); + + if (absl::EqualsIgnoreCase(stripped, "true")) { + entry.bool_value_ = true; + return true; + } else if (absl::EqualsIgnoreCase(stripped, "false")) { + entry.bool_value_ = false; + return true; + } + return false; +} + bool SnapshotImpl::parseEntryUintValue(Entry& entry) { uint64_t converted_uint64; - if (StringUtil::atoul(entry.raw_string_value_.c_str(), converted_uint64)) { + if (absl::SimpleAtoi(entry.raw_string_value_, &converted_uint64)) { entry.uint_value_ = converted_uint64; return true; } @@ -283,7 +357,7 @@ void DiskLayer::walkDirectory(const std::string& path, const std::string& prefix Api::Api& api) { ENVOY_LOG(debug, "walking directory: {}", path); if (depth > MaxWalkDepth) { - throw EnvoyException(fmt::format("Walk recursion depth exceded {}", MaxWalkDepth)); + throw EnvoyException(fmt::format("Walk recursion depth exceeded {}", MaxWalkDepth)); } // Check if this is an obviously bad path. if (api.fileSystem().illegalPath(path)) { @@ -333,19 +407,58 @@ void DiskLayer::walkDirectory(const std::string& path, const std::string& prefix } } -LoaderImpl::LoaderImpl(RandomGenerator& generator, Stats::Store& store, - ThreadLocal::SlotAllocator& tls) - : LoaderImpl(DoNotLoadSnapshot{}, generator, store, tls) { - loadNewSnapshot(); +ProtoLayer::ProtoLayer(const ProtobufWkt::Struct& proto) : OverrideLayerImpl{"base"} { + for (const auto& f : proto.fields()) { + walkProtoValue(f.second, f.first); + } } -LoaderImpl::LoaderImpl(DoNotLoadSnapshot /* unused */, RandomGenerator& generator, +void ProtoLayer::walkProtoValue(const ProtobufWkt::Value& v, const std::string& prefix) { + switch (v.kind_case()) { + case ProtobufWkt::Value::KIND_NOT_SET: + case ProtobufWkt::Value::kListValue: + case ProtobufWkt::Value::kNullValue: + throw EnvoyException(fmt::format("Invalid runtime entry value for {}", prefix)); + break; + case ProtobufWkt::Value::kStringValue: + values_.emplace(prefix, SnapshotImpl::createEntry(v.string_value())); + break; + case ProtobufWkt::Value::kNumberValue: + case ProtobufWkt::Value::kBoolValue: + values_.emplace(prefix, SnapshotImpl::createEntry(v)); + break; + case ProtobufWkt::Value::kStructValue: { + const ProtobufWkt::Struct& s = v.struct_value(); + if (s.fields().empty() || s.fields().find("numerator") != s.fields().end() || + s.fields().find("denominator") != s.fields().end()) { + values_.emplace(prefix, SnapshotImpl::createEntry(v)); + break; + } + for (const auto& f : s.fields()) { + walkProtoValue(f.second, prefix + "." + f.first); + } + break; + } + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + +LoaderImpl::LoaderImpl(const ProtobufWkt::Struct& base, RandomGenerator& generator, Stats::Store& store, ThreadLocal::SlotAllocator& tls) - : generator_(generator), stats_(generateStats(store)), admin_layer_(stats_), + : LoaderImpl(DoNotLoadSnapshot{}, base, generator, store, tls) { + loadNewSnapshot(); +} + +LoaderImpl::LoaderImpl(DoNotLoadSnapshot /* unused */, const ProtobufWkt::Struct& base, + RandomGenerator& generator, Stats::Store& store, + ThreadLocal::SlotAllocator& tls) + : generator_(generator), stats_(generateStats(store)), admin_layer_(stats_), base_(base), tls_(tls.allocateSlot()) {} std::unique_ptr LoaderImpl::createNewSnapshot() { std::vector layers; + layers.emplace_back(std::make_unique(base_)); layers.emplace_back(std::make_unique(admin_layer_)); return std::make_unique(generator_, stats_, std::move(layers)); } @@ -364,13 +477,11 @@ void LoaderImpl::mergeValues(const std::unordered_map& loadNewSnapshot(); } -DiskBackedLoaderImpl::DiskBackedLoaderImpl(Event::Dispatcher& dispatcher, - ThreadLocal::SlotAllocator& tls, - const std::string& root_symlink_path, - const std::string& subdir, - const std::string& override_dir, Stats::Store& store, - RandomGenerator& generator, Api::Api& api) - : LoaderImpl(DoNotLoadSnapshot{}, generator, store, tls), +DiskBackedLoaderImpl::DiskBackedLoaderImpl( + Event::Dispatcher& dispatcher, ThreadLocal::SlotAllocator& tls, const ProtobufWkt::Struct& base, + const std::string& root_symlink_path, const std::string& subdir, + const std::string& override_dir, Stats::Store& store, RandomGenerator& generator, Api::Api& api) + : LoaderImpl(DoNotLoadSnapshot{}, base, generator, store, tls), watcher_(dispatcher.createFilesystemWatcher()), root_path_(root_symlink_path + "/" + subdir), override_path_(root_symlink_path + "/" + override_dir), api_(api) { watcher_->addWatch(root_symlink_path, Filesystem::Watcher::Events::MovedTo, @@ -388,6 +499,7 @@ RuntimeStats LoaderImpl::generateStats(Stats::Store& store) { std::unique_ptr DiskBackedLoaderImpl::createNewSnapshot() { std::vector layers; + layers.emplace_back(std::make_unique(base_)); try { layers.push_back(std::make_unique("root", root_path_, api_)); if (api_.fileSystem().directoryExists(override_path_)) { diff --git a/source/common/runtime/runtime_impl.h b/source/common/runtime/runtime_impl.h index 5fb8b85be7272..dca4e8a2f8537 100644 --- a/source/common/runtime/runtime_impl.h +++ b/source/common/runtime/runtime_impl.h @@ -17,12 +17,17 @@ #include "common/common/empty_string.h" #include "common/common/logger.h" #include "common/common/thread.h" +#include "common/singleton/threadsafe_singleton.h" #include "spdlog/spdlog.h" namespace Envoy { namespace Runtime { +bool runtimeFeatureEnabled(absl::string_view feature); + +using RuntimeSingleton = ThreadSafeSingleton; + /** * Implementation of RandomGenerator that uses per-thread RANLUX generators seeded with current * time. @@ -45,6 +50,7 @@ class RandomGeneratorImpl : public RandomGenerator { COUNTER(override_dir_not_exists) \ COUNTER(override_dir_exists) \ COUNTER(load_success) \ + COUNTER(deprecated_feature_use) \ GAUGE (num_keys) \ GAUGE (admin_overrides_active) // clang-format on @@ -67,6 +73,8 @@ class SnapshotImpl : public Snapshot, std::vector&& layers); // Runtime::Snapshot + bool deprecatedFeatureEnabled(const std::string& key) const override; + bool runtimeFeatureEnabled(absl::string_view key) const override; bool featureEnabled(const std::string& key, uint64_t default_value, uint64_t random_value, uint64_t num_buckets) const override; bool featureEnabled(const std::string& key, uint64_t default_value) const override; @@ -81,21 +89,31 @@ class SnapshotImpl : public Snapshot, const std::vector& getLayers() const override; static Entry createEntry(const std::string& value); + static Entry createEntry(const ProtobufWkt::Value& value); + + // Returns true and sets 'value' to the key if found. + // Returns false if the key is not a boolean value. + bool getBoolean(absl::string_view key, bool& value) const; private: static void resolveEntryType(Entry& entry) { + if (parseEntryBooleanValue(entry)) { + return; + } if (parseEntryUintValue(entry)) { return; } parseEntryFractionalPercentValue(entry); } + static bool parseEntryBooleanValue(Entry& entry); static bool parseEntryUintValue(Entry& entry); static void parseEntryFractionalPercentValue(Entry& entry); const std::vector layers_; EntryMap values_; RandomGenerator& generator_; + RuntimeStats& stats_; }; /** @@ -153,6 +171,17 @@ class DiskLayer : public OverrideLayerImpl, Logger::Loggable { +public: + ProtoLayer(const ProtobufWkt::Struct& proto); + +private: + void walkProtoValue(const ProtobufWkt::Value& v, const std::string& prefix); +}; + /** * Implementation of Loader that provides Snapshots of values added via mergeValues(). * A single snapshot is shared among all threads and referenced by shared_ptr such that @@ -161,19 +190,20 @@ class DiskLayer : public OverrideLayerImpl, Logger::Loggable& values) override; protected: - // Identical the the public constructor but does not call loadSnapshot(). Subclasses must call + // Identical the public constructor but does not call loadSnapshot(). Subclasses must call // loadSnapshot() themselves to create the initial snapshot, since loadSnapshot calls the virtual // function createNewSnapshot() and is therefore unsuitable for use in a superclass constructor. struct DoNotLoadSnapshot {}; - LoaderImpl(DoNotLoadSnapshot /* unused */, RandomGenerator& generator, Stats::Store& stats, - ThreadLocal::SlotAllocator& tls); + LoaderImpl(DoNotLoadSnapshot /* unused */, const ProtobufWkt::Struct& base, + RandomGenerator& generator, Stats::Store& stats, ThreadLocal::SlotAllocator& tls); // Create a new Snapshot virtual std::unique_ptr createNewSnapshot(); @@ -183,6 +213,7 @@ class LoaderImpl : public Loader { RandomGenerator& generator_; RuntimeStats stats_; AdminLayer admin_layer_; + const ProtobufWkt::Struct base_; private: RuntimeStats generateStats(Stats::Store& store); @@ -197,9 +228,9 @@ class LoaderImpl : public Loader { class DiskBackedLoaderImpl : public LoaderImpl, Logger::Loggable { public: DiskBackedLoaderImpl(Event::Dispatcher& dispatcher, ThreadLocal::SlotAllocator& tls, - const std::string& root_symlink_path, const std::string& subdir, - const std::string& override_dir, Stats::Store& store, - RandomGenerator& generator, Api::Api& api); + const ProtobufWkt::Struct& base, const std::string& root_symlink_path, + const std::string& subdir, const std::string& override_dir, + Stats::Store& store, RandomGenerator& generator, Api::Api& api); private: std::unique_ptr createNewSnapshot() override; diff --git a/source/common/runtime/uuid_util.cc b/source/common/runtime/uuid_util.cc index a4f18a86536e5..615b5c8ce46d3 100644 --- a/source/common/runtime/uuid_util.cc +++ b/source/common/runtime/uuid_util.cc @@ -13,7 +13,7 @@ bool UuidUtils::uuidModBy(const std::string& uuid, uint64_t& out, uint64_t mod) } uint64_t value; - if (!StringUtil::atoul(uuid.substr(0, 8).c_str(), value, 16)) { + if (!StringUtil::atoull(uuid.substr(0, 8).c_str(), value, 16)) { return false; } @@ -21,7 +21,7 @@ bool UuidUtils::uuidModBy(const std::string& uuid, uint64_t& out, uint64_t mod) return true; } -UuidTraceStatus UuidUtils::isTraceableUuid(const std::string& uuid) { +UuidTraceStatus UuidUtils::isTraceableUuid(absl::string_view uuid) { if (uuid.length() != Runtime::RandomGeneratorImpl::UUID_LENGTH) { return UuidTraceStatus::NoTrace; } diff --git a/source/common/runtime/uuid_util.h b/source/common/runtime/uuid_util.h index b3c85e8254b18..cf2450b4d5e84 100644 --- a/source/common/runtime/uuid_util.h +++ b/source/common/runtime/uuid_util.h @@ -2,6 +2,8 @@ #include +#include "absl/strings/string_view.h" + namespace Envoy { enum class UuidTraceStatus { NoTrace, Sampled, Client, Forced }; @@ -30,7 +32,7 @@ class UuidUtils { /** * @return status of the uuid, to differentiate reason for tracing, etc. */ - static UuidTraceStatus isTraceableUuid(const std::string& uuid); + static UuidTraceStatus isTraceableUuid(absl::string_view uuid); private: // Byte on this position has predefined value of 4 for UUID4. diff --git a/source/common/secret/BUILD b/source/common/secret/BUILD index 5a9f0f94ec7ce..3248ffb331ab8 100644 --- a/source/common/secret/BUILD +++ b/source/common/secret/BUILD @@ -42,7 +42,7 @@ envoy_cc_library( deps = [ "//include/envoy/config:subscription_interface", "//include/envoy/event:dispatcher_interface", - "//include/envoy/init:init_interface", + "//include/envoy/init:manager_interface", "//include/envoy/local_info:local_info_interface", "//include/envoy/runtime:runtime_interface", "//include/envoy/secret:secret_provider_interface", @@ -52,6 +52,7 @@ envoy_cc_library( "//source/common/common:cleanup_lib", "//source/common/config:resources_lib", "//source/common/config:subscription_factory_lib", + "//source/common/init:target_lib", "//source/common/protobuf:utility_lib", "//source/common/ssl:certificate_validation_context_config_impl_lib", "//source/common/ssl:tls_certificate_config_impl_lib", diff --git a/source/common/secret/sds_api.cc b/source/common/secret/sds_api.cc index a805123a79283..de62381c29f74 100644 --- a/source/common/secret/sds_api.cc +++ b/source/common/secret/sds_api.cc @@ -17,7 +17,8 @@ SdsApi::SdsApi(const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispat const envoy::api::v2::core::ConfigSource& sds_config, const std::string& sds_config_name, std::function destructor_cb, Api::Api& api) - : local_info_(local_info), dispatcher_(dispatcher), random_(random), stats_(stats), + : init_target_(fmt::format("SdsApi {}", sds_config_name), [this] { initialize(); }), + local_info_(local_info), dispatcher_(dispatcher), random_(random), stats_(stats), cluster_manager_(cluster_manager), sds_config_(sds_config), sds_config_name_(sds_config_name), secret_hash_(0), clean_up_(destructor_cb), api_(api) { Config::Utility::checkLocalInfo("sds", local_info_); @@ -25,22 +26,11 @@ SdsApi::SdsApi(const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispat // can be chained together to behave as one init_manager. In that way, we let // two listeners which share same SdsApi to register at separate init managers, and // each init manager has a chance to initialize its targets. - init_manager.registerTarget(*this); + init_manager.add(init_target_); } -void SdsApi::initialize(std::function callback) { - initialize_callback_ = callback; - - subscription_ = Envoy::Config::SubscriptionFactory::subscriptionFromConfigSource< - envoy::api::v2::auth::Secret>( - sds_config_, local_info_, dispatcher_, cluster_manager_, random_, stats_, - "envoy.service.discovery.v2.SecretDiscoveryService.FetchSecrets", - "envoy.service.discovery.v2.SecretDiscoveryService.StreamSecrets", api_); - - subscription_->start({sds_config_name_}, *this); -} - -void SdsApi::onConfigUpdate(const ResourceVector& resources, const std::string&) { +void SdsApi::onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + const std::string&) { if (resources.empty()) { throw EnvoyException( fmt::format("Missing SDS resources for {} in onConfigUpdate()", sds_config_name_)); @@ -48,7 +38,7 @@ void SdsApi::onConfigUpdate(const ResourceVector& resources, const std::string&) if (resources.size() != 1) { throw EnvoyException(fmt::format("Unexpected SDS secrets length: {}", resources.size())); } - const auto& secret = resources[0]; + auto secret = MessageUtil::anyConvert(resources[0]); MessageUtil::validate(secret); // Wrap sds_config_name_ in string_view to deal with proto string/std::string incompatibility @@ -66,19 +56,21 @@ void SdsApi::onConfigUpdate(const ResourceVector& resources, const std::string&) update_callback_manager_.runCallbacks(); } - runInitializeCallbackIfAny(); + init_target_.ready(); } void SdsApi::onConfigUpdateFailed(const EnvoyException*) { // We need to allow server startup to continue, even if we have a bad config. - runInitializeCallbackIfAny(); + init_target_.ready(); } -void SdsApi::runInitializeCallbackIfAny() { - if (initialize_callback_) { - initialize_callback_(); - initialize_callback_ = nullptr; - } +void SdsApi::initialize() { + subscription_ = Envoy::Config::SubscriptionFactory::subscriptionFromConfigSource( + sds_config_, local_info_, dispatcher_, cluster_manager_, random_, stats_, + "envoy.service.discovery.v2.SecretDiscoveryService.FetchSecrets", + "envoy.service.discovery.v2.SecretDiscoveryService.StreamSecrets", + Grpc::Common::typeUrl(envoy::api::v2::auth::Secret().GetDescriptor()->full_name()), api_); + subscription_->start({sds_config_name_}, *this); } } // namespace Secret diff --git a/source/common/secret/sds_api.h b/source/common/secret/sds_api.h index b19df9d54023b..13d7790ed157c 100644 --- a/source/common/secret/sds_api.h +++ b/source/common/secret/sds_api.h @@ -7,7 +7,7 @@ #include "envoy/api/v2/core/config_source.pb.h" #include "envoy/config/subscription.h" #include "envoy/event/dispatcher.h" -#include "envoy/init/init.h" +#include "envoy/init/manager.h" #include "envoy/local_info/local_info.h" #include "envoy/runtime/runtime.h" #include "envoy/secret/secret_callbacks.h" @@ -18,6 +18,7 @@ #include "common/common/callback_impl.h" #include "common/common/cleanup.h" +#include "common/init/target_impl.h" #include "common/ssl/certificate_validation_context_config_impl.h" #include "common/ssl/tls_certificate_config_impl.h" @@ -27,8 +28,7 @@ namespace Secret { /** * SDS API implementation that fetches secrets from SDS server via Subscription. */ -class SdsApi : public Init::Target, - public Config::SubscriptionCallbacks { +class SdsApi : public Config::SubscriptionCallbacks { public: SdsApi(const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispatcher, Runtime::RandomGenerator& random, Stats::Store& stats, @@ -36,11 +36,14 @@ class SdsApi : public Init::Target, const envoy::api::v2::core::ConfigSource& sds_config, const std::string& sds_config_name, std::function destructor_cb, Api::Api& api); - // Init::Target - void initialize(std::function callback) override; - // Config::SubscriptionCallbacks - void onConfigUpdate(const ResourceVector& resources, const std::string& version_info) override; + // TODO(fredlas) deduplicate + void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + const std::string& version_info) override; + void onConfigUpdate(const Protobuf::RepeatedPtrField&, + const Protobuf::RepeatedPtrField&, const std::string&) override { + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } void onConfigUpdateFailed(const EnvoyException* e) override; std::string resourceName(const ProtobufWkt::Any& resource) override { return MessageUtil::anyConvert(resource).name(); @@ -53,8 +56,8 @@ class SdsApi : public Init::Target, Common::CallbackManager<> update_callback_manager_; private: - void runInitializeCallbackIfAny(); - + void initialize(); + Init::TargetImpl init_target_; const LocalInfo::LocalInfo& local_info_; Event::Dispatcher& dispatcher_; Runtime::RandomGenerator& random_; @@ -62,8 +65,7 @@ class SdsApi : public Init::Target, Upstream::ClusterManager& cluster_manager_; const envoy::api::v2::core::ConfigSource sds_config_; - std::unique_ptr> subscription_; - std::function initialize_callback_; + std::unique_ptr subscription_; const std::string sds_config_name_; uint64_t secret_hash_; diff --git a/source/common/singleton/threadsafe_singleton.h b/source/common/singleton/threadsafe_singleton.h index fbd3af0a28c39..b1b6ba76e149f 100644 --- a/source/common/singleton/threadsafe_singleton.h +++ b/source/common/singleton/threadsafe_singleton.h @@ -1,5 +1,7 @@ #pragma once +#include + #include "absl/base/call_once.h" namespace Envoy { @@ -41,4 +43,42 @@ template absl::once_flag ThreadSafeSingleton::create_once_; template T* ThreadSafeSingleton::instance_ = nullptr; +// An instance of a singleton class which has the same thread safety properties +// as ThreadSafeSingleton, but must be created via initialize prior to access. +// +// As with ThreadSafeSingleton the use of this class is generally discouraged. +template class InjectableSingleton { +public: + static T& get() { + RELEASE_ASSERT(loader_ != nullptr, "InjectableSingleton used prior to initialization"); + return *loader_; + } + + static T* getExisting() { return loader_; } + + static void initialize(T* value) { + RELEASE_ASSERT(value != nullptr, "InjectableSingleton initialized with non-null value."); + RELEASE_ASSERT(loader_ == nullptr, "InjectableSingleton initialized multiple times."); + loader_ = value; + } + static void clear() { loader_ = nullptr; } + +protected: + static T* loader_; +}; + +template T* InjectableSingleton::loader_ = nullptr; + +template class ScopedInjectableLoader { +public: + ScopedInjectableLoader(std::unique_ptr&& instance) { + instance_ = std::move(instance); + InjectableSingleton::initialize(instance_.get()); + } + ~ScopedInjectableLoader() { InjectableSingleton::clear(); } + +private: + std::unique_ptr instance_; +}; + } // namespace Envoy diff --git a/source/common/stats/BUILD b/source/common/stats/BUILD index 85052c2361946..b766709c0d5b6 100644 --- a/source/common/stats/BUILD +++ b/source/common/stats/BUILD @@ -13,6 +13,7 @@ envoy_cc_library( srcs = ["heap_stat_data.cc"], hdrs = ["heap_stat_data.h"], deps = [ + ":metric_impl_lib", ":stat_data_allocator_lib", "//source/common/common:assert_lib", "//source/common/common:hash_lib", @@ -42,9 +43,11 @@ envoy_cc_library( srcs = ["isolated_store_impl.cc"], hdrs = ["isolated_store_impl.h"], deps = [ + ":fake_symbol_table_lib", ":histogram_lib", + ":scope_prefixer_lib", ":stats_lib", - ":stats_options_lib", + ":store_impl_lib", "//include/envoy/stats:stats_macros", "//source/common/stats:heap_stat_data_lib", ], @@ -52,24 +55,32 @@ envoy_cc_library( envoy_cc_library( name = "metric_impl_lib", + srcs = ["metric_impl.cc"], hdrs = ["metric_impl.h"], deps = [ + ":symbol_table_lib", "//include/envoy/stats:stats_interface", "//source/common/common:assert_lib", ], ) envoy_cc_library( - name = "raw_stat_data_lib", - srcs = ["raw_stat_data.cc"], - hdrs = ["raw_stat_data.h"], + name = "store_impl_lib", + hdrs = ["store_impl.h"], deps = [ - ":stat_data_allocator_lib", + ":symbol_table_lib", + "//include/envoy/stats:stats_interface", + ], +) + +envoy_cc_library( + name = "scope_prefixer_lib", + srcs = ["scope_prefixer.cc"], + hdrs = ["scope_prefixer.h"], + deps = [ + ":symbol_table_lib", + ":utility_lib", "//include/envoy/stats:stats_interface", - "//source/common/common:assert_lib", - "//source/common/common:block_memory_hash_set_lib", - "//source/common/common:hash_lib", - "//source/common/common:thread_lib", ], ) @@ -95,14 +106,23 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "stat_merger_lib", + srcs = ["stat_merger.cc"], + hdrs = ["stat_merger.h"], + deps = [ + ":symbol_table_lib", + "//include/envoy/stats:stats_interface", + "//source/common/protobuf", + ], +) + envoy_cc_library( name = "stats_lib", deps = [ ":histogram_lib", ":metric_impl_lib", - ":raw_stat_data_lib", ":source_impl_lib", - ":stats_options_lib", ":symbol_table_lib", ":tag_extractor_lib", ":utility_lib", @@ -130,6 +150,7 @@ envoy_cc_library( "//include/envoy/stats:symbol_table_interface", "//source/common/common:assert_lib", "//source/common/common:logger_lib", + "//source/common/common:stack_array", "//source/common/common:thread_lib", "//source/common/common:utility_lib", ], @@ -141,14 +162,6 @@ envoy_cc_library( deps = [":symbol_table_lib"], ) -envoy_cc_library( - name = "stats_options_lib", - hdrs = ["stats_options_impl.h"], - deps = [ - "//include/envoy/stats:stats_interface", - ], -) - envoy_cc_library( name = "tag_extractor_lib", srcs = ["tag_extractor_impl.cc"], @@ -191,6 +204,7 @@ envoy_cc_library( hdrs = ["thread_local_store.h"], deps = [ ":heap_stat_data_lib", + ":scope_prefixer_lib", ":stats_lib", ":stats_matcher_lib", ":tag_producer_lib", @@ -202,4 +216,5 @@ envoy_cc_library( name = "utility_lib", srcs = ["utility.cc"], hdrs = ["utility.h"], + deps = [":symbol_table_lib"], ) diff --git a/source/common/stats/fake_symbol_table_impl.h b/source/common/stats/fake_symbol_table_impl.h index 6c7e2f37bdf86..f26d7513ea11e 100644 --- a/source/common/stats/fake_symbol_table_impl.h +++ b/source/common/stats/fake_symbol_table_impl.h @@ -44,12 +44,57 @@ namespace Stats { * that backs each StatName, so there is no sharing or memory savings, but also * no state associated with the SymbolTable, and thus no locks needed. * - * TODO(jmarantz): delete this class once SymbolTable is fully deployed in the + * TODO(#6307): delete this class once SymbolTable is fully deployed in the * Envoy codebase. */ class FakeSymbolTableImpl : public SymbolTable { public: - SymbolEncoding encode(absl::string_view name) override { return encodeHelper(name); } + // SymbolTable + void populateList(const absl::string_view* names, uint32_t num_names, + StatNameList& list) override { + // This implementation of populateList is similar to + // SymbolTableImpl::populateList. This variant is more efficient for + // FakeSymbolTableImpl, because it avoid "encoding" each name in names. The + // strings are laid out abutting each other with 2-byte length prefixes, so + // encoding isn't needed, and doing a dummy encoding step would cost one + // memory allocation per element, adding significant overhead as measured by + // thread_local_store_speed_test. + + // We encode the number of names in a single byte, thus there must be less + // than 256 of them. + RELEASE_ASSERT(num_names < 256, "Maximum number elements in a StatNameList exceeded"); + + // First encode all the names. The '1' here represents the number of + // names. The num_names * StatNameSizeEncodingBytes reserves space for the + // lengths of each name. + size_t total_size_bytes = 1 + num_names * StatNameSizeEncodingBytes; + + for (uint32_t i = 0; i < num_names; ++i) { + total_size_bytes += names[i].size(); + } + + // Now allocate the exact number of bytes required and move the encodings + // into storage. + auto storage = std::make_unique(total_size_bytes); + uint8_t* p = &storage[0]; + *p++ = num_names; + for (uint32_t i = 0; i < num_names; ++i) { + auto& name = names[i]; + size_t sz = name.size(); + p = SymbolTableImpl::writeLengthReturningNext(sz, p); + if (!name.empty()) { + memcpy(p, name.data(), sz * sizeof(uint8_t)); + p += sz; + } + } + + // This assertion double-checks the arithmetic where we computed + // total_size_bytes. After appending all the encoded data into the + // allocated byte array, we should wind up with a pointer difference of + // total_size_bytes from the beginning of the allocation. + ASSERT(p == &storage[0] + total_size_bytes); + list.moveStorageIntoList(std::move(storage)); + } std::string toString(const StatName& stat_name) const override { return std::string(toStringView(stat_name)); @@ -60,36 +105,35 @@ class FakeSymbolTableImpl : public SymbolTable { } void free(const StatName&) override {} void incRefCount(const StatName&) override {} + StoragePtr encode(absl::string_view name) override { return encodeHelper(name); } SymbolTable::StoragePtr join(const std::vector& names) const override { std::vector strings; for (StatName name : names) { - absl::string_view str = toStringView(name); - if (!str.empty()) { - strings.push_back(str); + if (!name.empty()) { + strings.push_back(toStringView(name)); } } - return stringToStorage(absl::StrJoin(strings, ".")); + return encodeHelper(absl::StrJoin(strings, ".")); } #ifndef ENVOY_CONFIG_COVERAGE void debugPrint() const override {} #endif -private: - SymbolEncoding encodeHelper(absl::string_view name) const { - SymbolEncoding encoding; - encoding.addStringForFakeSymbolTable(name); - return encoding; + void callWithStringView(StatName stat_name, + const std::function& fn) const override { + fn(toStringView(stat_name)); } +private: absl::string_view toStringView(const StatName& stat_name) const { return {reinterpret_cast(stat_name.data()), stat_name.dataSize()}; } - SymbolTable::StoragePtr stringToStorage(absl::string_view name) const { - SymbolEncoding encoding = encodeHelper(name); - auto bytes = std::make_unique(encoding.bytesRequired()); - encoding.moveToStorage(bytes.get()); + StoragePtr encodeHelper(absl::string_view name) const { + auto bytes = std::make_unique(name.size() + StatNameSizeEncodingBytes); + uint8_t* buffer = SymbolTableImpl::writeLengthReturningNext(name.size(), bytes.get()); + memcpy(buffer, name.data(), name.size()); return bytes; } }; diff --git a/source/common/stats/heap_stat_data.cc b/source/common/stats/heap_stat_data.cc index 7177ea19087c7..41494ed85641b 100644 --- a/source/common/stats/heap_stat_data.cc +++ b/source/common/stats/heap_stat_data.cc @@ -1,37 +1,44 @@ #include "common/stats/heap_stat_data.h" #include "common/common/lock_guard.h" +#include "common/common/logger.h" #include "common/common/thread.h" #include "common/common/utility.h" namespace Envoy { namespace Stats { -HeapStatData::HeapStatData(absl::string_view key) { - StringUtil::strlcpy(name_, key.data(), key.size() + 1); -} +HeapStatDataAllocator::~HeapStatDataAllocator() { ASSERT(stats_.empty()); } -HeapStatDataAllocator::HeapStatDataAllocator() {} +HeapStatData* HeapStatData::alloc(StatName stat_name, SymbolTable& symbol_table) { + void* memory = ::malloc(sizeof(HeapStatData) + stat_name.size()); + ASSERT(memory); + // TODO(fredlas) call StatMerger::verifyCombineLogicSpecified() here? + symbol_table.incRefCount(stat_name); + return new (memory) HeapStatData(stat_name); +} -HeapStatDataAllocator::~HeapStatDataAllocator() { ASSERT(stats_.empty()); } +void HeapStatData::free(SymbolTable& symbol_table) { + symbol_table.free(statName()); + this->~HeapStatData(); + ::free(this); // matches malloc() call above. +} -HeapStatData* HeapStatDataAllocator::alloc(absl::string_view name) { - // Any expected truncation of name is done at the callsite. No truncation is - // required to use this allocator. Note that data must be freed by calling - // its free() method, and not by destruction, thus the more complex use of - // unique_ptr. - std::unique_ptr> data( - HeapStatData::alloc(name), [](HeapStatData* d) { d->free(); }); +HeapStatData& HeapStatDataAllocator::alloc(StatName name) { + using HeapStatDataFreeFn = std::function; + std::unique_ptr data_ptr( + HeapStatData::alloc(name, symbolTable()), + [this](HeapStatData* d) { d->free(symbolTable()); }); Thread::ReleasableLockGuard lock(mutex_); - auto ret = stats_.insert(data.get()); + auto ret = stats_.insert(data_ptr.get()); HeapStatData* existing_data = *ret.first; lock.release(); if (ret.second) { - return data.release(); + return *data_ptr.release(); } ++existing_data->ref_count_; - return existing_data; + return *existing_data; } void HeapStatDataAllocator::free(HeapStatData& data) { @@ -46,19 +53,17 @@ void HeapStatDataAllocator::free(HeapStatData& data) { ASSERT(key_removed == 1); } - data.free(); -} - -HeapStatData* HeapStatData::alloc(absl::string_view name) { - void* memory = ::malloc(sizeof(HeapStatData) + name.size() + 1); - ASSERT(memory); - return new (memory) HeapStatData(name); + data.free(symbolTable()); } -void HeapStatData::free() { - this->~HeapStatData(); - ::free(this); // matches malloc() call above. +#ifndef ENVOY_CONFIG_COVERAGE +void HeapStatDataAllocator::debugPrint() { + Thread::LockGuard lock(mutex_); + for (HeapStatData* heap_stat_data : stats_) { + ENVOY_LOG_MISC(info, "{}", symbolTable().toString(heap_stat_data->statName())); + } } +#endif template class StatDataAllocatorImpl; diff --git a/source/common/stats/heap_stat_data.h b/source/common/stats/heap_stat_data.h index dc14303ec6260..a0abe880cefc1 100644 --- a/source/common/stats/heap_stat_data.h +++ b/source/common/stats/heap_stat_data.h @@ -4,10 +4,15 @@ #include #include +#include "envoy/stats/stats.h" +#include "envoy/stats/symbol_table.h" + #include "common/common/hash.h" #include "common/common/thread.h" #include "common/common/thread_annotations.h" +#include "common/stats/metric_impl.h" #include "common/stats/stat_data_allocator_impl.h" +#include "common/stats/symbol_table_impl.h" #include "absl/container/flat_hash_set.h" @@ -19,71 +24,77 @@ namespace Stats { * so that it can be allocated efficiently from the heap on demand. */ struct HeapStatData { - /** - * @returns absl::string_view the name as a string_view. - */ - absl::string_view key() const { return name_; } +private: + explicit HeapStatData(StatName stat_name) { stat_name.copyToStorage(symbol_storage_); } + +public: + static HeapStatData* alloc(StatName stat_name, SymbolTable& symbol_table); - /** - * @returns std::string the name as a const char*. - */ - const char* name() const { return name_; } + void free(SymbolTable& symbol_table); + StatName statName() const { return StatName(symbol_storage_); } - static HeapStatData* alloc(absl::string_view name); - void free(); + bool operator==(const HeapStatData& rhs) const { return statName() == rhs.statName(); } + uint64_t hash() const { return statName().hash(); } std::atomic value_{0}; std::atomic pending_increment_{0}; std::atomic flags_{0}; std::atomic ref_count_{1}; - char name_[]; + SymbolTable::Storage symbol_storage_; +}; -private: - /** - * You cannot construct/destruct a HeapStatData directly with new/delete as - * it's variable-size. Use alloc()/free() methods above. - */ - explicit HeapStatData(absl::string_view name); - ~HeapStatData() {} +template class HeapStat : public Stat { +public: + HeapStat(HeapStatData& data, StatDataAllocatorImpl& alloc, + absl::string_view tag_extracted_name, const std::vector& tags) + : Stat(data, alloc, tag_extracted_name, tags) {} + + StatName statName() const override { return this->data_.statName(); } }; -/** - * Implementation of StatDataAllocator using a pure heap-based strategy, so that - * Envoy implementations that do not require hot-restart can use less memory. - */ class HeapStatDataAllocator : public StatDataAllocatorImpl { public: - HeapStatDataAllocator(); - ~HeapStatDataAllocator(); + HeapStatDataAllocator(SymbolTable& symbol_table) : StatDataAllocatorImpl(symbol_table) {} + ~HeapStatDataAllocator() override; - // StatDataAllocatorImpl - HeapStatData* alloc(absl::string_view name) override; + HeapStatData& alloc(StatName name); void free(HeapStatData& data) override; // StatDataAllocator - bool requiresBoundedStatNameSize() const override { return false; } + CounterSharedPtr makeCounter(StatName name, absl::string_view tag_extracted_name, + const std::vector& tags) override { + return std::make_shared>>(alloc(name), *this, + tag_extracted_name, tags); + } + + GaugeSharedPtr makeGauge(StatName name, absl::string_view tag_extracted_name, + const std::vector& tags) override { + return std::make_shared>>(alloc(name), *this, + tag_extracted_name, tags); + } + +#ifndef ENVOY_CONFIG_COVERAGE + void debugPrint(); +#endif private: struct HeapStatHash { - size_t operator()(const HeapStatData* a) const { return HashUtil::xxHash64(a->key()); } + size_t operator()(const HeapStatData* a) const { return a->hash(); } }; struct HeapStatCompare { - bool operator()(const HeapStatData* a, const HeapStatData* b) const { - return (a->key() == b->key()); - } + bool operator()(const HeapStatData* a, const HeapStatData* b) const { return *a == *b; } }; - // TODO(jmarantz): See https://github.com/envoyproxy/envoy/pull/3927 and - // https://github.com/envoyproxy/envoy/issues/3585, which can help reorganize - // the heap stats using a ref-counted symbol table to compress the stat strings. - using StatSet = absl::flat_hash_set; - // An unordered set of HeapStatData pointers which keys off the key() - // field in each object. This necessitates a custom comparator and hasher. + // field in each object. This necessitates a custom comparator and hasher, which key off of the + // StatNamePtr's own StatNamePtrHash and StatNamePtrCompare operators. + using StatSet = absl::flat_hash_set; StatSet stats_ GUARDED_BY(mutex_); - // A mutex is needed here to protect the stats_ object from both alloc() and free() operations. - // Although alloc() operations are called under existing locking, free() operations are made from - // the destructors of the individual stat objects, which are not protected by locks. + + // A mutex is needed here to protect both the stats_ object from both + // alloc() and free() operations. Although alloc() operations are called under existing locking, + // free() operations are made from the destructors of the individual stat objects, which are not + // protected by locks. Thread::MutexBasicLockable mutex_; }; diff --git a/source/common/stats/histogram_impl.h b/source/common/stats/histogram_impl.h index ef11744ac7163..5fceb00687ff0 100644 --- a/source/common/stats/histogram_impl.h +++ b/source/common/stats/histogram_impl.h @@ -37,13 +37,13 @@ class HistogramStatisticsImpl : public HistogramStatistics, NonCopyable { const std::vector& computedQuantiles() const override { return computed_quantiles_; } const std::vector& supportedBuckets() const override; const std::vector& computedBuckets() const override { return computed_buckets_; } - double sampleCount() const override { return sample_count_; } + uint64_t sampleCount() const override { return sample_count_; } double sampleSum() const override { return sample_sum_; } private: std::vector computed_quantiles_; std::vector computed_buckets_; - double sample_count_; + uint64_t sample_count_; double sample_sum_; }; @@ -52,40 +52,56 @@ class HistogramStatisticsImpl : public HistogramStatistics, NonCopyable { */ class HistogramImpl : public Histogram, public MetricImpl { public: - HistogramImpl(const std::string& name, Store& parent, std::string&& tag_extracted_name, - std::vector&& tags) - : MetricImpl(std::move(tag_extracted_name), std::move(tags)), parent_(parent), name_(name) {} - - // Stats:;Metric - std::string name() const override { return name_; } - const char* nameCStr() const override { return name_.c_str(); } + HistogramImpl(StatName name, Store& parent, const std::string& tag_extracted_name, + const std::vector& tags) + : MetricImpl(tag_extracted_name, tags, parent.symbolTable()), + name_(name, parent.symbolTable()), parent_(parent) {} + ~HistogramImpl() { + // We must explicitly free the StatName here using the SymbolTable reference + // we access via parent_. A pure RAII alternative would be to use + // StatNameManagedStorage rather than StatNameStorage, which will cost a total + // of 16 bytes per stat, counting the extra SymbolTable& reference here, + // plus the extra SymbolTable& reference in MetricImpl. + name_.free(symbolTable()); + + // We must explicitly free the StatName here in order to supply the + // SymbolTable reference. An RAII alternative would be to store a + // reference to the SymbolTable in MetricImpl, which would cost 8 bytes + // per stat. + MetricImpl::clear(); + } // Stats::Histogram void recordValue(uint64_t value) override { parent_.deliverHistogramToSinks(*this, value); } bool used() const override { return true; } + StatName statName() const override { return name_.statName(); } + const SymbolTable& symbolTable() const override { return parent_.symbolTable(); } + SymbolTable& symbolTable() override { return parent_.symbolTable(); } private: + StatNameStorage name_; + // This is used for delivering the histogram data to sinks. Store& parent_; - - const std::string name_; }; /** * Null histogram implementation. * No-ops on all calls and requires no underlying metric or data. */ -class NullHistogramImpl : public Histogram { +class NullHistogramImpl : public Histogram, NullMetricImpl { public: - NullHistogramImpl() {} - ~NullHistogramImpl() {} - std::string name() const override { return ""; } - const char* nameCStr() const override { return ""; } - const std::string& tagExtractedName() const override { CONSTRUCT_ON_FIRST_USE(std::string, ""); } - const std::vector& tags() const override { CONSTRUCT_ON_FIRST_USE(std::vector, {}); } + explicit NullHistogramImpl(SymbolTable& symbol_table) : NullMetricImpl(symbol_table) {} + ~NullHistogramImpl() { + // MetricImpl must be explicitly cleared() before destruction, otherwise it + // will not be able to access the SymbolTable& to free the symbols. An RAII + // alternative would be to store the SymbolTable reference in the + // MetricImpl, costing 8 bytes per stat. + MetricImpl::clear(); + } + void recordValue(uint64_t) override {} - bool used() const override { return false; } }; } // namespace Stats diff --git a/source/common/stats/isolated_store_impl.cc b/source/common/stats/isolated_store_impl.cc index d85d38ef03213..905ff98db06d0 100644 --- a/source/common/stats/isolated_store_impl.cc +++ b/source/common/stats/isolated_store_impl.cc @@ -6,49 +6,38 @@ #include #include "common/common/utility.h" +#include "common/stats/fake_symbol_table_impl.h" #include "common/stats/histogram_impl.h" +#include "common/stats/scope_prefixer.h" #include "common/stats/utility.h" namespace Envoy { namespace Stats { IsolatedStoreImpl::IsolatedStoreImpl() - : counters_([this](const std::string& name) -> CounterSharedPtr { - std::string tag_extracted_name = name; - std::vector tags; - return alloc_.makeCounter(name, std::move(tag_extracted_name), std::move(tags)); + : IsolatedStoreImpl(std::make_unique()) {} + +IsolatedStoreImpl::IsolatedStoreImpl(std::unique_ptr&& symbol_table) + : IsolatedStoreImpl(*symbol_table) { + symbol_table_storage_ = std::move(symbol_table); +} + +IsolatedStoreImpl::IsolatedStoreImpl(SymbolTable& symbol_table) + : StoreImpl(symbol_table), alloc_(symbol_table), + counters_([this](StatName name) -> CounterSharedPtr { + return alloc_.makeCounter(name, alloc_.symbolTable().toString(name), std::vector()); + }), + gauges_([this](StatName name) -> GaugeSharedPtr { + return alloc_.makeGauge(name, alloc_.symbolTable().toString(name), std::vector()); }), - gauges_([this](const std::string& name) -> GaugeSharedPtr { - std::string tag_extracted_name = name; - std::vector tags; - return alloc_.makeGauge(name, std::move(tag_extracted_name), std::move(tags)); + histograms_([this](StatName name) -> HistogramSharedPtr { + return std::make_shared(name, *this, alloc_.symbolTable().toString(name), + std::vector()); }), - histograms_([this](const std::string& name) -> HistogramSharedPtr { - return std::make_shared(name, *this, std::string(name), std::vector()); - }) {} - -struct IsolatedScopeImpl : public Scope { - IsolatedScopeImpl(IsolatedStoreImpl& parent, const std::string& prefix) - : parent_(parent), prefix_(Utility::sanitizeStatsName(prefix)) {} - - // Stats::Scope - ScopePtr createScope(const std::string& name) override { - return ScopePtr{new IsolatedScopeImpl(parent_, prefix_ + name)}; - } - void deliverHistogramToSinks(const Histogram&, uint64_t) override {} - Counter& counter(const std::string& name) override { return parent_.counter(prefix_ + name); } - Gauge& gauge(const std::string& name) override { return parent_.gauge(prefix_ + name); } - Histogram& histogram(const std::string& name) override { - return parent_.histogram(prefix_ + name); - } - const Stats::StatsOptions& statsOptions() const override { return parent_.statsOptions(); } - - IsolatedStoreImpl& parent_; - const std::string prefix_; -}; + null_gauge_(symbol_table) {} ScopePtr IsolatedStoreImpl::createScope(const std::string& name) { - return ScopePtr{new IsolatedScopeImpl(*this, name)}; + return std::make_unique(name, *this); } } // namespace Stats diff --git a/source/common/stats/isolated_store_impl.h b/source/common/stats/isolated_store_impl.h index 7765fd50e3e72..8e720a6ad8e3e 100644 --- a/source/common/stats/isolated_store_impl.h +++ b/source/common/stats/isolated_store_impl.h @@ -6,14 +6,16 @@ #include #include "envoy/stats/stats.h" -#include "envoy/stats/stats_options.h" #include "envoy/stats/store.h" #include "common/common/utility.h" #include "common/stats/heap_stat_data.h" -#include "common/stats/stats_options_impl.h" +#include "common/stats/store_impl.h" +#include "common/stats/symbol_table_impl.h" #include "common/stats/utility.h" +#include "absl/container/flat_hash_map.h" + namespace Envoy { namespace Stats { @@ -22,18 +24,18 @@ namespace Stats { */ template class IsolatedStatsCache { public: - typedef std::function(const std::string& name)> Allocator; + using Allocator = std::function(StatName name)>; IsolatedStatsCache(Allocator alloc) : alloc_(alloc) {} - Base& get(const std::string& name) { + Base& get(StatName name) { auto stat = stats_.find(name); if (stat != stats_.end()) { return *stat->second; } std::shared_ptr new_stat = alloc_(name); - stats_.emplace(name, new_stat); + stats_.emplace(new_stat->statName(), new_stat); return *new_stat; } @@ -48,24 +50,25 @@ template class IsolatedStatsCache { } private: - std::unordered_map> stats_; + StatNameHashMap> stats_; Allocator alloc_; }; -class IsolatedStoreImpl : public Store { +class IsolatedStoreImpl : public StoreImpl { public: IsolatedStoreImpl(); + explicit IsolatedStoreImpl(SymbolTable& symbol_table); // Stats::Scope - Counter& counter(const std::string& name) override { return counters_.get(name); } + Counter& counterFromStatName(StatName name) override { return counters_.get(name); } ScopePtr createScope(const std::string& name) override; void deliverHistogramToSinks(const Histogram&, uint64_t) override {} - Gauge& gauge(const std::string& name) override { return gauges_.get(name); } - Histogram& histogram(const std::string& name) override { + Gauge& gaugeFromStatName(StatName name) override { return gauges_.get(name); } + NullGaugeImpl& nullGauge(const std::string&) override { return null_gauge_; } + Histogram& histogramFromStatName(StatName name) override { Histogram& histogram = histograms_.get(name); return histogram; } - const Stats::StatsOptions& statsOptions() const override { return stats_options_; } // Stats::Store std::vector counters() const override { return counters_.toVector(); } @@ -74,12 +77,28 @@ class IsolatedStoreImpl : public Store { return std::vector{}; } + Counter& counter(const std::string& name) override { + StatNameManagedStorage storage(name, symbolTable()); + return counterFromStatName(storage.statName()); + } + Gauge& gauge(const std::string& name) override { + StatNameManagedStorage storage(name, symbolTable()); + return gaugeFromStatName(storage.statName()); + } + Histogram& histogram(const std::string& name) override { + StatNameManagedStorage storage(name, symbolTable()); + return histogramFromStatName(storage.statName()); + } + private: + IsolatedStoreImpl(std::unique_ptr&& symbol_table); + + std::unique_ptr symbol_table_storage_; HeapStatDataAllocator alloc_; IsolatedStatsCache counters_; IsolatedStatsCache gauges_; IsolatedStatsCache histograms_; - const StatsOptionsImpl stats_options_; + NullGaugeImpl null_gauge_; }; } // namespace Stats diff --git a/source/common/stats/metric_impl.cc b/source/common/stats/metric_impl.cc new file mode 100644 index 0000000000000..ba45f0311bbad --- /dev/null +++ b/source/common/stats/metric_impl.cc @@ -0,0 +1,95 @@ +#include "common/stats/metric_impl.h" + +#include "envoy/stats/tag.h" + +#include "common/stats/symbol_table_impl.h" + +namespace Envoy { +namespace Stats { + +MetricImpl::~MetricImpl() { + // The storage must be cleaned by a subclass of MetricImpl in its + // destructor, because the symbol-table is owned by the subclass. + // Simply call MetricImpl::clear() in the subclass dtor. + ASSERT(!stat_names_.populated()); +} + +MetricImpl::MetricImpl(absl::string_view tag_extracted_name, const std::vector& tags, + SymbolTable& symbol_table) { + // Encode all the names and tags into transient storage so we can count the + // required bytes. 1 is added to account for the tag_extracted_name, and we + // multiply the number of tags by 2 to account for the name and value of each + // tag. + const uint32_t num_names = 1 + 2 * tags.size(); + STACK_ARRAY(names, absl::string_view, num_names); + names[0] = tag_extracted_name; + int index = 0; + for (auto& tag : tags) { + names[++index] = tag.name_; + names[++index] = tag.value_; + } + symbol_table.populateList(names.begin(), num_names, stat_names_); +} + +void MetricImpl::clear() { stat_names_.clear(symbolTable()); } + +std::string MetricImpl::tagExtractedName() const { + return symbolTable().toString(tagExtractedStatName()); +} + +StatName MetricImpl::tagExtractedStatName() const { + StatName stat_name; + stat_names_.iterate([&stat_name](StatName s) -> bool { + stat_name = s; + return false; // Returning 'false' stops the iteration. + }); + return stat_name; +} + +void MetricImpl::iterateTagStatNames(const TagStatNameIterFn& fn) const { + enum { TagExtractedName, TagName, TagValue } state = TagExtractedName; + StatName tag_name; + + // StatNameList maintains a linear ordered collection of StatNames, and we + // are mapping that into a tag-extracted name (the first element), followed + // by alternating TagName and TagValue. So we use a little state machine + // as we iterate through the stat_names_. + stat_names_.iterate([&state, &tag_name, &fn](StatName stat_name) -> bool { + switch (state) { + case TagExtractedName: + state = TagName; + break; + case TagName: + tag_name = stat_name; + state = TagValue; + break; + case TagValue: + state = TagName; + if (!fn(tag_name, stat_name)) { + return false; // early exit. + } + break; + } + return true; + }); + ASSERT(state != TagValue); +} + +void MetricImpl::iterateTags(const TagIterFn& fn) const { + const SymbolTable& symbol_table = symbolTable(); + iterateTagStatNames([&fn, &symbol_table](StatName name, StatName value) -> bool { + return fn(Tag{symbol_table.toString(name), symbol_table.toString(value)}); + }); +} + +std::vector MetricImpl::tags() const { + std::vector tags; + iterateTags([&tags](const Tag& tag) -> bool { + tags.emplace_back(tag); + return true; + }); + return tags; +} + +} // namespace Stats +} // namespace Envoy diff --git a/source/common/stats/metric_impl.h b/source/common/stats/metric_impl.h index 3e65e0d79d865..48c67766c8890 100644 --- a/source/common/stats/metric_impl.h +++ b/source/common/stats/metric_impl.h @@ -3,10 +3,12 @@ #include #include +#include "envoy/stats/stat_data_allocator.h" #include "envoy/stats/stats.h" #include "envoy/stats/tag.h" #include "common/common/assert.h" +#include "common/stats/symbol_table_impl.h" namespace Envoy { namespace Stats { @@ -20,23 +22,36 @@ namespace Stats { */ class MetricImpl : public virtual Metric { public: - MetricImpl(std::string&& tag_extracted_name, std::vector&& tags) - : tag_extracted_name_(std::move(tag_extracted_name)), tags_(std::move(tags)) {} + MetricImpl(absl::string_view tag_extracted_name, const std::vector& tags, + SymbolTable& symbol_table); + ~MetricImpl(); - const std::string& tagExtractedName() const override { return tag_extracted_name_; } - const std::vector& tags() const override { return tags_; } + std::string name() const override { return symbolTable().toString(statName()); } + std::string tagExtractedName() const override; + std::vector tags() const override; + StatName tagExtractedStatName() const override; + void iterateTagStatNames(const TagStatNameIterFn& fn) const override; + void iterateTags(const TagIterFn& fn) const override; protected: - /** - * Flags used by all stats types to figure out whether they have been used. - */ - struct Flags { - static const uint8_t Used = 0x1; - }; + void clear(); private: - const std::string tag_extracted_name_; - const std::vector tags_; + StatNameList stat_names_; +}; + +class NullMetricImpl : public MetricImpl { +public: + explicit NullMetricImpl(SymbolTable& symbol_table) + : MetricImpl("", std::vector(), symbol_table), stat_name_storage_("", symbol_table) {} + + const SymbolTable& symbolTable() const override { return stat_name_storage_.symbolTable(); } + SymbolTable& symbolTable() override { return stat_name_storage_.symbolTable(); } + bool used() const override { return false; } + StatName statName() const override { return stat_name_storage_.statName(); } + +private: + StatNameManagedStorage stat_name_storage_; }; } // namespace Stats diff --git a/source/common/stats/raw_stat_data.cc b/source/common/stats/raw_stat_data.cc deleted file mode 100644 index 7ef23654bb7bb..0000000000000 --- a/source/common/stats/raw_stat_data.cc +++ /dev/null @@ -1,86 +0,0 @@ -#include "common/stats/raw_stat_data.h" - -#include - -#include -#include -#include - -#include "common/common/lock_guard.h" - -namespace Envoy { -namespace Stats { - -namespace { - -// Round val up to the next multiple of the natural alignment. -// Note: this implementation only works because 8 is a power of 2. -uint64_t roundUpMultipleNaturalAlignment(uint64_t val) { - const uint64_t multiple = alignof(RawStatData); - static_assert(multiple == 1 || multiple == 2 || multiple == 4 || multiple == 8 || multiple == 16, - "multiple must be a power of 2 for this algorithm to work"); - return (val + multiple - 1) & ~(multiple - 1); -} - -} // namespace - -// Normally the compiler would do this, but because name_ is a flexible-array-length -// element, the compiler can't. RawStatData is put into an array in HotRestartImpl, so -// it's important that each element starts on the required alignment for the type. -uint64_t RawStatData::structSize(uint64_t name_size) { - return roundUpMultipleNaturalAlignment(sizeof(RawStatData) + name_size + 1); -} - -uint64_t RawStatData::structSizeWithOptions(const StatsOptions& stats_options) { - return structSize(stats_options.maxNameLength()); -} - -void RawStatData::initialize(absl::string_view key, const StatsOptions& stats_options) { - ASSERT(!initialized()); - ASSERT(key.size() <= stats_options.maxNameLength()); - ref_count_ = 1; - memcpy(name_, key.data(), key.size()); - name_[key.size()] = '\0'; -} - -Stats::RawStatData* RawStatDataAllocator::alloc(absl::string_view name) { - // Try to find the existing slot in shared memory, otherwise allocate a new one. - Thread::LockGuard lock(mutex_); - if (name.length() > options_.maxNameLength()) { - ENVOY_LOG_MISC( - warn, - "Statistic '{}' is too long with {} characters, it will be truncated to {} characters", - name, name.size(), options_.maxNameLength()); - name = name.substr(0, options_.maxNameLength()); - } - auto value_created = stats_set_.insert(name); - Stats::RawStatData* data = value_created.first; - if (data == nullptr) { - return nullptr; - } - // For new entries (value-created.second==true), BlockMemoryHashSet calls Value::initialize() - // automatically, but on recycled entries (value-created.second==false) we need to bump the - // ref-count. - if (!value_created.second) { - ++data->ref_count_; - } - return data; -} - -void RawStatDataAllocator::free(Stats::RawStatData& data) { - // We must hold the lock since the reference decrement can race with an initialize above. - Thread::LockGuard lock(mutex_); - ASSERT(data.ref_count_ > 0); - --data.ref_count_; - if (data.ref_count_ > 0) { - return; - } - bool key_removed = stats_set_.remove(data.key()); - ASSERT(key_removed); - memset(static_cast(&data), 0, Stats::RawStatData::structSizeWithOptions(options_)); -} - -template class StatDataAllocatorImpl; - -} // namespace Stats -} // namespace Envoy diff --git a/source/common/stats/raw_stat_data.h b/source/common/stats/raw_stat_data.h deleted file mode 100644 index 882a35549a976..0000000000000 --- a/source/common/stats/raw_stat_data.h +++ /dev/null @@ -1,115 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "envoy/stats/stat_data_allocator.h" -#include "envoy/stats/stats_options.h" - -#include "common/common/assert.h" -#include "common/common/block_memory_hash_set.h" -#include "common/common/hash.h" -#include "common/common/thread.h" -#include "common/stats/stat_data_allocator_impl.h" - -#include "absl/strings/string_view.h" - -namespace Envoy { -namespace Stats { - -/** - * This structure is the backing memory for both CounterImpl and GaugeImpl. It is designed so that - * it can be allocated from shared memory if needed. - * - * @note Due to name_ being variable size, sizeof(RawStatData) probably isn't useful. Use - * RawStatData::structSize() or RawStatData::structSizeWithOptions() instead. - */ -struct RawStatData { - - /** - * Due to the flexible-array-length of name_, c-style allocation - * and initialization are necessary. - */ - RawStatData() = delete; - ~RawStatData() = delete; - - /** - * @return uint64_t the size of this struct, accounting for the length of - * name_ and padding for alignment. - */ - static uint64_t structSize(uint64_t name_size); - - /** - * Wrapper for structSize, taking a StatsOptions struct. Required by - * BlockMemoryHashSet, which has the context to supply StatsOptions. - */ - static uint64_t structSizeWithOptions(const StatsOptions& stats_options); - - /** - * Initializes this object to have the specified key, a refcount of 1, and all - * other values zero. Required for the HeapRawStatDataAllocator, which does - * not expect stat name truncation. We pass in the number of bytes allocated - * in order to assert the copy is safe inline. - * - * @param key the key - * @param stats_options the stats options - */ - void initialize(absl::string_view key, const StatsOptions& stats_options); - - /** - * @return uint64_t a hash of the key. This is required by BlockMemoryHashSet. - */ - static uint64_t hash(absl::string_view key) { return HashUtil::xxHash64(key); } - - /** - * @return true if object is in use. - */ - bool initialized() { return name_[0] != '\0'; } - - /** - * @return absl::string_view the name as a string_view. - */ - absl::string_view key() const { return absl::string_view(name_); } - - /** - * @return const char* the name. - */ - const char* name() const { return name_; } - - std::atomic value_; - std::atomic pending_increment_; - std::atomic flags_; - std::atomic ref_count_; - std::atomic unused_; - char name_[]; -}; - -using RawStatDataSet = BlockMemoryHashSet; - -class RawStatDataAllocator : public StatDataAllocatorImpl { -public: - RawStatDataAllocator(Thread::BasicLockable& mutex, RawStatDataSet& stats_set, - const StatsOptions& options) - : mutex_(mutex), stats_set_(stats_set), options_(options) {} - - // StatDataAllocator - bool requiresBoundedStatNameSize() const override { return true; } - Stats::RawStatData* alloc(absl::string_view name) override; - void free(Stats::RawStatData& data) override; - -private: - Thread::BasicLockable& mutex_; - RawStatDataSet& stats_set_ GUARDED_BY(mutex_); - const StatsOptions& options_; -}; - -} // namespace Stats -} // namespace Envoy diff --git a/source/common/stats/scope_prefixer.cc b/source/common/stats/scope_prefixer.cc new file mode 100644 index 0000000000000..521696059e4ad --- /dev/null +++ b/source/common/stats/scope_prefixer.cc @@ -0,0 +1,52 @@ +#include "common/stats/scope_prefixer.h" + +#include "envoy/stats/scope.h" + +#include "common/stats/symbol_table_impl.h" +#include "common/stats/utility.h" + +namespace Envoy { +namespace Stats { + +ScopePrefixer::ScopePrefixer(absl::string_view prefix, Scope& scope) + : scope_(scope), prefix_(Utility::sanitizeStatsName(prefix), symbolTable()) {} + +ScopePrefixer::ScopePrefixer(StatName prefix, Scope& scope) + : scope_(scope), prefix_(prefix, symbolTable()) {} + +ScopePrefixer::~ScopePrefixer() { prefix_.free(symbolTable()); } + +ScopePtr ScopePrefixer::createScopeFromStatName(StatName name) { + SymbolTable::StoragePtr joined = symbolTable().join({prefix_.statName(), name}); + return std::make_unique(StatName(joined.get()), scope_); +} + +ScopePtr ScopePrefixer::createScope(const std::string& name) { + StatNameManagedStorage stat_name_storage(Utility::sanitizeStatsName(name), symbolTable()); + return createScopeFromStatName(stat_name_storage.statName()); +} + +Counter& ScopePrefixer::counterFromStatName(StatName name) { + Stats::SymbolTable::StoragePtr stat_name_storage = + scope_.symbolTable().join({prefix_.statName(), name}); + return scope_.counterFromStatName(StatName(stat_name_storage.get())); +} + +Gauge& ScopePrefixer::gaugeFromStatName(StatName name) { + Stats::SymbolTable::StoragePtr stat_name_storage = + scope_.symbolTable().join({prefix_.statName(), name}); + return scope_.gaugeFromStatName(StatName(stat_name_storage.get())); +} + +Histogram& ScopePrefixer::histogramFromStatName(StatName name) { + Stats::SymbolTable::StoragePtr stat_name_storage = + scope_.symbolTable().join({prefix_.statName(), name}); + return scope_.histogramFromStatName(StatName(stat_name_storage.get())); +} + +void ScopePrefixer::deliverHistogramToSinks(const Histogram& histograms, uint64_t val) { + scope_.deliverHistogramToSinks(histograms, val); +} + +} // namespace Stats +} // namespace Envoy diff --git a/source/common/stats/scope_prefixer.h b/source/common/stats/scope_prefixer.h new file mode 100644 index 0000000000000..66c61072c7f42 --- /dev/null +++ b/source/common/stats/scope_prefixer.h @@ -0,0 +1,49 @@ +#include "envoy/stats/scope.h" + +#include "common/stats/symbol_table_impl.h" + +namespace Envoy { +namespace Stats { + +// Implements a Scope that delegates to a passed-in scope, prefixing all names +// prior to creation. +class ScopePrefixer : public Scope { +public: + ScopePrefixer(absl::string_view prefix, Scope& scope); + ScopePrefixer(StatName prefix, Scope& scope); + ~ScopePrefixer() override; + + ScopePtr createScopeFromStatName(StatName name); + + // Scope + ScopePtr createScope(const std::string& name) override; + Counter& counterFromStatName(StatName name) override; + Gauge& gaugeFromStatName(StatName name) override; + Histogram& histogramFromStatName(StatName name) override; + void deliverHistogramToSinks(const Histogram& histograms, uint64_t val) override; + + Counter& counter(const std::string& name) override { + StatNameManagedStorage storage(name, symbolTable()); + return counterFromStatName(storage.statName()); + } + Gauge& gauge(const std::string& name) override { + StatNameManagedStorage storage(name, symbolTable()); + return gaugeFromStatName(storage.statName()); + } + Histogram& histogram(const std::string& name) override { + StatNameManagedStorage storage(name, symbolTable()); + return histogramFromStatName(storage.statName()); + } + + const SymbolTable& symbolTable() const override { return scope_.symbolTable(); } + virtual SymbolTable& symbolTable() override { return scope_.symbolTable(); } + + NullGaugeImpl& nullGauge(const std::string& str) override { return scope_.nullGauge(str); } + +private: + Scope& scope_; + StatNameStorage prefix_; +}; + +} // namespace Stats +} // namespace Envoy diff --git a/source/common/stats/stat_data_allocator_impl.h b/source/common/stats/stat_data_allocator_impl.h index 81df5f4269576..47b0858fe0a8f 100644 --- a/source/common/stats/stat_data_allocator_impl.h +++ b/source/common/stats/stat_data_allocator_impl.h @@ -5,6 +5,7 @@ #include "envoy/stats/stat_data_allocator.h" #include "envoy/stats/stats.h" +#include "envoy/stats/symbol_table.h" #include "common/common/assert.h" #include "common/stats/metric_impl.h" @@ -22,27 +23,11 @@ namespace Stats { // hot restart stat continuity, and heap allocation for more efficient RAM usage // for when hot-restart is not required. // -// Also note that RawStatData needs to live in a shared memory block, and it's -// possible, but not obvious, that a vptr would be usable across processes. In -// any case, RawStatData is allocated from a shared-memory block rather than via -// new, so the usual C++ compiler assistance for setting up vptrs will not be -// available. This could be resolved with placed new, or another nesting level. +// TODO(fredlas) the above paragraph is obsolete; it's now only heap. So, this +// interface can hopefully be collapsed down a bit. template class StatDataAllocatorImpl : public StatDataAllocator { public: - // StatDataAllocator - CounterSharedPtr makeCounter(absl::string_view name, std::string&& tag_extracted_name, - std::vector&& tags) override; - GaugeSharedPtr makeGauge(absl::string_view name, std::string&& tag_extracted_name, - std::vector&& tags) override; - - /** - * @param name the full name of the stat. - * @return StatData* a data block for a given stat name or nullptr if there is no more memory - * available for stats. The allocator should return a reference counted data location - * by name if one already exists with the same name. This is used for intra-process - * scope swapping as well as inter-process hot restart. - */ - virtual StatData* alloc(absl::string_view name) PURE; + explicit StatDataAllocatorImpl(SymbolTable& symbol_table) : symbol_table_(symbol_table) {} /** * Free a raw stat data block. The allocator should handle reference counting and only truly @@ -50,6 +35,15 @@ template class StatDataAllocatorImpl : public StatDataAllocator * @param data the data returned by alloc(). */ virtual void free(StatData& data) PURE; + + SymbolTable& symbolTable() override { return symbol_table_; } + const SymbolTable& symbolTable() const override { return symbol_table_; } + +private: + // SymbolTable encodes encodes stat names as back into strings. This does not + // get guarded by a mutex, since it has its own internal mutex to guarantee + // thread safety. + SymbolTable& symbol_table_; }; /** @@ -62,13 +56,17 @@ template class StatDataAllocatorImpl : public StatDataAllocator template class CounterImpl : public Counter, public MetricImpl { public: CounterImpl(StatData& data, StatDataAllocatorImpl& alloc, - std::string&& tag_extracted_name, std::vector&& tags) - : MetricImpl(std::move(tag_extracted_name), std::move(tags)), data_(data), alloc_(alloc) {} - ~CounterImpl() { alloc_.free(data_); } - - // Stats::Metric - std::string name() const override { return std::string(data_.name()); } - const char* nameCStr() const override { return data_.name(); } + absl::string_view tag_extracted_name, const std::vector& tags) + : MetricImpl(tag_extracted_name, tags, alloc.symbolTable()), data_(data), alloc_(alloc) {} + ~CounterImpl() override { + alloc_.free(data_); + + // MetricImpl must be explicitly cleared() before destruction, otherwise it + // will not be able to access the SymbolTable& to free the symbols. An RAII + // alternative would be to store the SymbolTable reference in the + // MetricImpl, costing 8 bytes per stat. + MetricImpl::clear(); + } // Stats::Counter void add(uint64_t amount) override { @@ -83,7 +81,10 @@ template class CounterImpl : public Counter, public MetricImpl bool used() const override { return data_.flags_ & Flags::Used; } uint64_t value() const override { return data_.value_; } -private: + const SymbolTable& symbolTable() const override { return alloc_.symbolTable(); } + SymbolTable& symbolTable() override { return alloc_.symbolTable(); } + +protected: StatData& data_; StatDataAllocatorImpl& alloc_; }; @@ -92,19 +93,21 @@ template class CounterImpl : public Counter, public MetricImpl * Null counter implementation. * No-ops on all calls and requires no underlying metric or data. */ -class NullCounterImpl : public Counter { +class NullCounterImpl : public Counter, NullMetricImpl { public: - NullCounterImpl() {} - ~NullCounterImpl() {} - std::string name() const override { return ""; } - const char* nameCStr() const override { return ""; } - const std::string& tagExtractedName() const override { CONSTRUCT_ON_FIRST_USE(std::string, ""); } - const std::vector& tags() const override { CONSTRUCT_ON_FIRST_USE(std::vector, {}); } + explicit NullCounterImpl(SymbolTable& symbol_table) : NullMetricImpl(symbol_table) {} + ~NullCounterImpl() override { + // MetricImpl must be explicitly cleared() before destruction, otherwise it + // will not be able to access the SymbolTable& to free the symbols. An RAII + // alternative would be to store the SymbolTable reference in the + // MetricImpl, costing 8 bytes per stat. + MetricImpl::clear(); + } + void add(uint64_t) override {} void inc() override {} uint64_t latch() override { return 0; } void reset() override {} - bool used() const override { return false; } uint64_t value() const override { return 0; } }; @@ -114,34 +117,58 @@ class NullCounterImpl : public Counter { template class GaugeImpl : public Gauge, public MetricImpl { public: GaugeImpl(StatData& data, StatDataAllocatorImpl& alloc, - std::string&& tag_extracted_name, std::vector&& tags) - : MetricImpl(std::move(tag_extracted_name), std::move(tags)), data_(data), alloc_(alloc) {} - ~GaugeImpl() { alloc_.free(data_); } - - // Stats::Metric - std::string name() const override { return std::string(data_.name()); } - const char* nameCStr() const override { return data_.name(); } + absl::string_view tag_extracted_name, const std::vector& tags) + : MetricImpl(tag_extracted_name, tags, alloc.symbolTable()), data_(data), alloc_(alloc) {} + ~GaugeImpl() override { + alloc_.free(data_); + + // MetricImpl must be explicitly cleared() before destruction, otherwise it + // will not be able to access the SymbolTable& to free the symbols. An RAII + // alternative would be to store the SymbolTable reference in the + // MetricImpl, costing 8 bytes per stat. + MetricImpl::clear(); + } // Stats::Gauge - virtual void add(uint64_t amount) override { + void add(uint64_t amount) override { data_.value_ += amount; data_.flags_ |= Flags::Used; } - virtual void dec() override { sub(1); } - virtual void inc() override { add(1); } - virtual void set(uint64_t value) override { + void dec() override { sub(1); } + void inc() override { add(1); } + void set(uint64_t value) override { data_.value_ = value; data_.flags_ |= Flags::Used; } - virtual void sub(uint64_t amount) override { + void sub(uint64_t amount) override { ASSERT(data_.value_ >= amount); - ASSERT(used()); + ASSERT(used() || amount == 0); data_.value_ -= amount; } - virtual uint64_t value() const override { return data_.value_; } + uint64_t value() const override { return data_.value_; } bool used() const override { return data_.flags_ & Flags::Used; } + // Returns true if values should be added, false if no import. + absl::optional cachedShouldImport() const override { + if ((data_.flags_ & Flags::LogicCached) == 0) { + return absl::nullopt; + } + return (data_.flags_ & Flags::LogicAccumulate) != 0; + } + + void setShouldImport(bool should_import) override { + if (should_import) { + data_.flags_ |= Flags::LogicAccumulate; + } else { + data_.flags_ |= Flags::LogicNeverImport; + } + } + private: + const SymbolTable& symbolTable() const override { return alloc_.symbolTable(); } + SymbolTable& symbolTable() override { return alloc_.symbolTable(); } + +protected: StatData& data_; StatDataAllocatorImpl& alloc_; }; @@ -150,46 +177,26 @@ template class GaugeImpl : public Gauge, public MetricImpl { * Null gauge implementation. * No-ops on all calls and requires no underlying metric or data. */ -class NullGaugeImpl : public Gauge { +class NullGaugeImpl : public Gauge, NullMetricImpl { public: - NullGaugeImpl() {} - ~NullGaugeImpl() {} - std::string name() const override { return ""; } - const char* nameCStr() const override { return ""; } - const std::string& tagExtractedName() const override { CONSTRUCT_ON_FIRST_USE(std::string, ""); } - const std::vector& tags() const override { CONSTRUCT_ON_FIRST_USE(std::vector, {}); } + explicit NullGaugeImpl(SymbolTable& symbol_table) : NullMetricImpl(symbol_table) {} + ~NullGaugeImpl() override { + // MetricImpl must be explicitly cleared() before destruction, otherwise it + // will not be able to access the SymbolTable& to free the symbols. An RAII + // alternative would be to store the SymbolTable reference in the + // MetricImpl, costing 8 bytes per stat. + MetricImpl::clear(); + } + void add(uint64_t) override {} void inc() override {} void dec() override {} void set(uint64_t) override {} void sub(uint64_t) override {} - bool used() const override { return false; } uint64_t value() const override { return 0; } + absl::optional cachedShouldImport() const override { return absl::nullopt; } + void setShouldImport(bool) override {} }; -template -CounterSharedPtr StatDataAllocatorImpl::makeCounter(absl::string_view name, - std::string&& tag_extracted_name, - std::vector&& tags) { - StatData* data = alloc(name); - if (data == nullptr) { - return nullptr; - } - return std::make_shared>(*data, *this, std::move(tag_extracted_name), - std::move(tags)); -} - -template -GaugeSharedPtr StatDataAllocatorImpl::makeGauge(absl::string_view name, - std::string&& tag_extracted_name, - std::vector&& tags) { - StatData* data = alloc(name); - if (data == nullptr) { - return nullptr; - } - return std::make_shared>(*data, *this, std::move(tag_extracted_name), - std::move(tags)); -} - } // namespace Stats } // namespace Envoy diff --git a/source/common/stats/stat_merger.cc b/source/common/stats/stat_merger.cc new file mode 100644 index 0000000000000..b111fed68d0ba --- /dev/null +++ b/source/common/stats/stat_merger.cc @@ -0,0 +1,89 @@ +#include "common/stats/stat_merger.h" + +#include + +namespace Envoy { +namespace Stats { + +StatMerger::StatMerger(Stats::Store& target_store) : temp_scope_(target_store.createScope("")) {} + +bool StatMerger::shouldImport(Gauge& gauge, const std::string& gauge_name) { + absl::optional should_import = gauge.cachedShouldImport(); + if (should_import.has_value()) { + return should_import.value(); + } + + // Gauge name *substrings*, and special logic to use for combining those gauges' values. + static const auto* nonstandard_combine_logic = new std::vector{ + // Any .version is either a static property of the binary, or an opaque identifier for + // resources that are not passed across hot restart. + std::regex(".*\\.version$"), + // Once the child is up and reporting stats, its own control plane state and liveness is what + // we're interested in. + std::regex(".*\\.control_plane.connected_state$"), + std::regex("^server.live$"), + // Properties that should reasonably have some continuity across hot restart. The parent's + // last value should be a relatively accurate starting point, and then the child can update + // from there when appropriate. (All of these exceptional stats used with set() rather than + // add()/sub(), so the child's new value will in fact overwrite.) + std::regex("^cluster_manager.active_clusters$"), + std::regex("^cluster_manager.warming_clusters$"), + std::regex("^cluster\\..*\\.membership_.*$"), + std::regex("^cluster\\..*\\.max_host_weight$"), + std::regex(".*\\.total_principals$"), + std::regex("^listener_manager.total_listeners_active$"), + std::regex("^overload\\..*\\.pressure$"), + // Due to the fd passing, the parent's view of whether its listeners are in transitive states + // is not useful. + std::regex("^listener_manager.total_listeners_draining$"), + std::regex("^listener_manager.total_listeners_warming$"), + // Static properties known at startup. + std::regex("^server.concurrency$"), + std::regex("^server.hot_restart_epoch$"), + std::regex("^runtime.admin_overrides_active$"), + std::regex("^runtime.num_keys$"), + }; + for (const auto& exception : *nonstandard_combine_logic) { + std::smatch match; + if (std::regex_match(gauge_name, match, exception)) { + gauge.setShouldImport(false); + return false; + } + } + gauge.setShouldImport(true); + return true; +} + +void StatMerger::mergeCounters(const Protobuf::Map& counter_deltas) { + for (const auto& counter : counter_deltas) { + temp_scope_->counter(counter.first).add(counter.second); + } +} + +void StatMerger::mergeGauges(const Protobuf::Map& gauges) { + for (const auto& gauge : gauges) { + auto& gauge_ref = temp_scope_->gauge(gauge.first); + uint64_t& parent_value_ref = parent_gauge_values_[gauge_ref.statName()]; + uint64_t old_parent_value = parent_value_ref; + uint64_t new_parent_value = gauge.second; + parent_value_ref = new_parent_value; + + if (!StatMerger::shouldImport(gauge_ref, gauge.first)) { + continue; + } + if (new_parent_value > old_parent_value) { + gauge_ref.add(new_parent_value - old_parent_value); + } else { + gauge_ref.sub(old_parent_value - new_parent_value); + } + } +} + +void StatMerger::mergeStats(const Protobuf::Map& counter_deltas, + const Protobuf::Map& gauges) { + mergeCounters(counter_deltas); + mergeGauges(gauges); +} + +} // namespace Stats +} // namespace Envoy diff --git a/source/common/stats/stat_merger.h b/source/common/stats/stat_merger.h new file mode 100644 index 0000000000000..3558e312fd5ac --- /dev/null +++ b/source/common/stats/stat_merger.h @@ -0,0 +1,61 @@ +#pragma once + +#include "envoy/stats/store.h" + +#include "common/protobuf/protobuf.h" +#include "common/stats/symbol_table_impl.h" + +#include "absl/container/flat_hash_map.h" + +namespace Envoy { +namespace Stats { + +// Responsible for the sensible merging of two instances of the same stat from two different +// (typically hot restart parent+child) Envoy processes. +class StatMerger { +public: + StatMerger(Stats::Store& target_store); + + // Merge the values of stats_proto into stats_store. Counters are always straightforward + // addition, while gauges default to addition but have exceptions. + void mergeStats(const Protobuf::Map& counter_deltas, + const Protobuf::Map& gauges); + + // TODO(fredlas) add void verifyCombineLogicSpecified(absl::string_view gauge_name), to + // be called at gauge allocation, to ensure (with an ASSERT) that anyone adding a new stat + // will be forced to come across this code and explicitly specify combination logic. + // + // OR, + // switch from the combination logic table to requiring the stat macro declarations themselves + // to indicate the logic. + + // Returns true if the parent's value can be added in, false if we should do nothing. + static bool shouldImport(Gauge& gauge, const std::string& gauge_name); + +private: + void mergeCounters(const Protobuf::Map& counter_deltas); + void mergeGauges(const Protobuf::Map& gauges); + StatNameHashMap parent_gauge_values_; + // A stats Scope for our in-the-merging-process counters to live in. Scopes conceptually hold + // shared_ptrs to the stats that live in them, with the question of which stats are living in a + // given scope determined by which stat names have been accessed via that scope. E.g., if you + // access a stat named "some.shared" directly through the ordinary store, and then access a + // stat named "shared" in a scope configured with the prefix "some.", there is now a single + // stat named some.shared pointed to by both. As another example, if you access the stat + // "single" in the "some" scope, there will be a stat named "some.single" pointed to by just + // that scope. Now, if you delete the scope, some.shared will stick around, but some.single + // will be destroyed. + // + // All of that is relevant here because it is used to get a certain desired behavior. + // Specifically, stats must be kept up to date with values from the parent throughout hot + // restart, but once the restart completes, they must be dropped without a trace if the child has + // not taken action (independent of the hot restart stat merging) that would lead to them getting + // created in the store. By storing these stats in a scope (with an empty prefix), we can + // preserve all stats throughout the hot restart. Then, when the restart completes, dropping + // the scope will drop exactly those stats whose names have not already been accessed through + // another store/scope. + ScopePtr temp_scope_; +}; + +} // namespace Stats +} // namespace Envoy diff --git a/source/common/stats/stats_options_impl.h b/source/common/stats/stats_options_impl.h deleted file mode 100644 index 01b2e563b1a0f..0000000000000 --- a/source/common/stats/stats_options_impl.h +++ /dev/null @@ -1,32 +0,0 @@ -#pragma once - -#include - -#include "envoy/stats/stats_options.h" - -namespace Envoy { -namespace Stats { - -// The max name length is based on current set of stats. -// As of now, the longest stat is -// cluster..outlier_detection.ejections_consecutive_5xx -// which is 52 characters long without the cluster name. -// The max stat name length is 127 (default). So, in order to give room -// for growth to both the envoy generated stat characters -// (e.g., outlier_detection...) and user supplied names (e.g., cluster name), -// we set the max user supplied name length to 60, and the max internally -// generated stat suffixes to 67 (15 more characters to grow). -// If you want to increase the max user supplied name length, use the compiler -// option ENVOY_DEFAULT_MAX_OBJ_NAME_LENGTH or the CLI option -// max-obj-name-len -struct StatsOptionsImpl : public StatsOptions { - size_t maxNameLength() const override { return max_obj_name_length_ + max_stat_suffix_length_; } - size_t maxObjNameLength() const override { return max_obj_name_length_; } - size_t maxStatSuffixLength() const override { return max_stat_suffix_length_; } - - size_t max_obj_name_length_ = 60; - size_t max_stat_suffix_length_ = 67; -}; - -} // namespace Stats -} // namespace Envoy diff --git a/source/common/stats/store_impl.h b/source/common/stats/store_impl.h new file mode 100644 index 0000000000000..94b2db6b06e1c --- /dev/null +++ b/source/common/stats/store_impl.h @@ -0,0 +1,36 @@ +#pragma once + +#include "envoy/stats/stats.h" +#include "envoy/stats/store.h" + +#include "common/stats/symbol_table_impl.h" + +namespace Envoy { +namespace Stats { + +/** + * Implements common parts of the Store API needed by multiple derivations of Store. + */ +class StoreImpl : public Store { +public: + explicit StoreImpl(SymbolTable& symbol_table) : symbol_table_(symbol_table) {} + + Counter& counterFromStatName(StatName name) override { + return counter(symbol_table_.toString(name)); + } + + Gauge& gaugeFromStatName(StatName name) override { return gauge(symbol_table_.toString(name)); } + + Histogram& histogramFromStatName(StatName name) override { + return histogram(symbol_table_.toString(name)); + } + + SymbolTable& symbolTable() override { return symbol_table_; } + const SymbolTable& symbolTable() const override { return symbol_table_; } + +private: + SymbolTable& symbol_table_; +}; + +} // namespace Stats +} // namespace Envoy diff --git a/source/common/stats/symbol_table_impl.cc b/source/common/stats/symbol_table_impl.cc index 82a217ba0a493..9b85687a66bcf 100644 --- a/source/common/stats/symbol_table_impl.cc +++ b/source/common/stats/symbol_table_impl.cc @@ -30,7 +30,7 @@ void StatName::debugPrint() { for (uint64_t i = 0; i < nbytes; ++i) { absl::StrAppend(&msg, " ", static_cast(data()[i])); } - SymbolVec encoding = SymbolEncoding::decodeSymbols(data(), dataSize()); + SymbolVec encoding = SymbolTableImpl::Encoding::decodeSymbols(data(), dataSize()); absl::StrAppend(&msg, ", numSymbols=", encoding.size(), ":"); for (Symbol symbol : encoding) { absl::StrAppend(&msg, " ", symbol); @@ -40,9 +40,13 @@ void StatName::debugPrint() { } #endif -SymbolEncoding::~SymbolEncoding() { ASSERT(vec_.empty()); } +SymbolTableImpl::Encoding::~Encoding() { + // Verifies that moveToStorage() was called on this encoding. Failure + // to call moveToStorage() will result in leaks symbols. + ASSERT(vec_.empty()); +} -void SymbolEncoding::addSymbol(Symbol symbol) { +void SymbolTableImpl::Encoding::addSymbol(Symbol symbol) { // UTF-8-like encoding where a value 127 or less gets written as a single // byte. For higher values we write the low-order 7 bits with a 1 in // the high-order bit. Then we right-shift 7 bits and keep adding more bytes @@ -60,14 +64,8 @@ void SymbolEncoding::addSymbol(Symbol symbol) { } while (symbol != 0); } -void SymbolEncoding::addStringForFakeSymbolTable(absl::string_view str) { - if (!str.empty()) { - vec_.resize(str.size()); - memcpy(&vec_[0], str.data(), str.size()); - } -} - -SymbolVec SymbolEncoding::decodeSymbols(const SymbolTable::Storage array, uint64_t size) { +SymbolVec SymbolTableImpl::Encoding::decodeSymbols(const SymbolTable::Storage array, + uint64_t size) { SymbolVec symbol_vec; Symbol symbol = 0; for (uint32_t shift = 0; size > 0; --size, ++array) { @@ -88,19 +86,9 @@ SymbolVec SymbolEncoding::decodeSymbols(const SymbolTable::Storage array, uint64 return symbol_vec; } -// Saves the specified length into the byte array, returning the next byte. -// There is no guarantee that bytes will be aligned, so we can't cast to a -// uint16_t* and assign, but must individually copy the bytes. -static inline uint8_t* saveLengthToBytesReturningNext(uint64_t length, uint8_t* bytes) { - ASSERT(length < StatNameMaxSize); - *bytes++ = length & 0xff; - *bytes++ = length >> 8; - return bytes; -} - -uint64_t SymbolEncoding::moveToStorage(SymbolTable::Storage symbol_array) { - uint64_t sz = size(); - symbol_array = saveLengthToBytesReturningNext(sz, symbol_array); +uint64_t SymbolTableImpl::Encoding::moveToStorage(SymbolTable::Storage symbol_array) { + uint64_t sz = dataBytesRequired(); + symbol_array = writeLengthReturningNext(sz, symbol_array); if (sz != 0) { memcpy(symbol_array, vec_.data(), sz * sizeof(uint8_t)); } @@ -123,11 +111,9 @@ SymbolTableImpl::~SymbolTableImpl() { // TODO(ambuc): There is a possible performance optimization here for avoiding // the encoding of IPs / numbers if they appear in stat names. We don't want to // waste time symbolizing an integer as an integer, if we can help it. -SymbolEncoding SymbolTableImpl::encode(const absl::string_view name) { - SymbolEncoding encoding; - +void SymbolTableImpl::addTokensToEncoding(const absl::string_view name, Encoding& encoding) { if (name.empty()) { - return encoding; + return; } // We want to hold the lock for the minimum amount of time, so we do the @@ -149,7 +135,6 @@ SymbolEncoding SymbolTableImpl::encode(const absl::string_view name) { for (Symbol symbol : symbols) { encoding.addSymbol(symbol); } - return encoding; } uint64_t SymbolTableImpl::numSymbols() const { @@ -159,7 +144,12 @@ uint64_t SymbolTableImpl::numSymbols() const { } std::string SymbolTableImpl::toString(const StatName& stat_name) const { - return decodeSymbolVec(SymbolEncoding::decodeSymbols(stat_name.data(), stat_name.dataSize())); + return decodeSymbolVec(Encoding::decodeSymbols(stat_name.data(), stat_name.dataSize())); +} + +void SymbolTableImpl::callWithStringView(StatName stat_name, + const std::function& fn) const { + fn(toString(stat_name)); } std::string SymbolTableImpl::decodeSymbolVec(const SymbolVec& symbols) const { @@ -177,7 +167,7 @@ std::string SymbolTableImpl::decodeSymbolVec(const SymbolVec& symbols) const { void SymbolTableImpl::incRefCount(const StatName& stat_name) { // Before taking the lock, decode the array of symbols from the SymbolTable::Storage. - SymbolVec symbols = SymbolEncoding::decodeSymbols(stat_name.data(), stat_name.dataSize()); + SymbolVec symbols = Encoding::decodeSymbols(stat_name.data(), stat_name.dataSize()); Thread::LockGuard lock(lock_); for (Symbol symbol : symbols) { @@ -193,7 +183,7 @@ void SymbolTableImpl::incRefCount(const StatName& stat_name) { void SymbolTableImpl::free(const StatName& stat_name) { // Before taking the lock, decode the array of symbols from the SymbolTable::Storage. - SymbolVec symbols = SymbolEncoding::decodeSymbols(stat_name.data(), stat_name.dataSize()); + SymbolVec symbols = Encoding::decodeSymbols(stat_name.data(), stat_name.dataSize()); Thread::LockGuard lock(lock_); for (Symbol symbol : symbols) { @@ -203,7 +193,7 @@ void SymbolTableImpl::free(const StatName& stat_name) { auto encode_search = encode_map_.find(*decode_search->second); ASSERT(encode_search != encode_map_.end()); - // If that was the last remaining client usage of the symbol, erase the the + // If that was the last remaining client usage of the symbol, erase the // current mappings and add the now-unused symbol to the reuse pool. // // The "if (--EXPR.ref_count_)" pattern speeds up BM_CreateRace by 20% in @@ -265,8 +255,8 @@ bool SymbolTableImpl::lessThan(const StatName& a, const StatName& b) const { // If this becomes a performance bottleneck (e.g. during sorting), we could // provide an iterator-like interface for incrementally decoding the symbols // without allocating memory. - SymbolVec av = SymbolEncoding::decodeSymbols(a.data(), a.dataSize()); - SymbolVec bv = SymbolEncoding::decodeSymbols(b.data(), b.dataSize()); + SymbolVec av = Encoding::decodeSymbols(a.data(), a.dataSize()); + SymbolVec bv = Encoding::decodeSymbols(b.data(), b.dataSize()); // Calling fromSymbol requires holding the lock, as it needs read-access to // the maps that are written when adding new symbols. @@ -296,15 +286,20 @@ void SymbolTableImpl::debugPrint() const { } #endif -StatNameStorage::StatNameStorage(absl::string_view name, SymbolTable& table) { - SymbolEncoding encoding = table.encode(name); - bytes_ = std::make_unique(encoding.bytesRequired()); - encoding.moveToStorage(bytes_.get()); +SymbolTable::StoragePtr SymbolTableImpl::encode(absl::string_view name) { + Encoding encoding; + addTokensToEncoding(name, encoding); + auto bytes = std::make_unique(encoding.bytesRequired()); + encoding.moveToStorage(bytes.get()); + return bytes; } +StatNameStorage::StatNameStorage(absl::string_view name, SymbolTable& table) + : bytes_(table.encode(name)) {} + StatNameStorage::StatNameStorage(StatName src, SymbolTable& table) { uint64_t size = src.size(); - bytes_ = std::make_unique(size); + bytes_ = std::make_unique(size); src.copyToStorage(bytes_.get()); table.incRefCount(statName()); } @@ -322,13 +317,55 @@ void StatNameStorage::free(SymbolTable& table) { bytes_.reset(); } +void StatNamePool::clear() { + for (StatNameStorage& storage : storage_vector_) { + storage.free(symbol_table_); + } + storage_vector_.clear(); +} + +StatName StatNamePool::add(absl::string_view str) { + storage_vector_.push_back(Stats::StatNameStorage(str, symbol_table_)); + return StatName(storage_vector_.back().bytes()); +} + +StatNameStorageSet::~StatNameStorageSet() { + // free() must be called before destructing StatNameStorageSet to decrement + // references to all symbols. + ASSERT(hash_set_.empty()); +} + +void StatNameStorageSet::free(SymbolTable& symbol_table) { + // We must free() all symbols referenced in the set, otherwise the symbols + // will leak when the flat_hash_map superclass is destructed. They cannot + // self-destruct without an explicit free() as each individual StatNameStorage + // object does not have a reference to the symbol table, which would waste 8 + // bytes per stat-name. The easiest way to safely free all the contents of the + // symbol table set is to use flat_hash_map::extract(), which removes and + // returns an element from the set without destructing the element + // immediately. This gives us a chance to call free() on each one before they + // are destroyed. + // + // There's a performance risk here, if removing elements via + // flat_hash_set::begin() is inefficient to use in a loop like this. One can + // imagine a hash-table implementation where the performance of this + // usage-model would be poor. However, tests with 100k elements appeared to + // run quickly when compiled for optimization, so at present this is not a + // performance issue. + + while (!hash_set_.empty()) { + auto storage = hash_set_.extract(hash_set_.begin()); + storage.value().free(symbol_table); + } +} + SymbolTable::StoragePtr SymbolTableImpl::join(const std::vector& stat_names) const { uint64_t num_bytes = 0; for (StatName stat_name : stat_names) { num_bytes += stat_name.dataSize(); } - auto bytes = std::make_unique(num_bytes + StatNameSizeEncodingBytes); - uint8_t* p = saveLengthToBytesReturningNext(num_bytes, bytes.get()); + auto bytes = std::make_unique(num_bytes + StatNameSizeEncodingBytes); + uint8_t* p = writeLengthReturningNext(num_bytes, bytes.get()); for (StatName stat_name : stat_names) { num_bytes = stat_name.dataSize(); memcpy(p, stat_name.data(), num_bytes); @@ -337,34 +374,39 @@ SymbolTable::StoragePtr SymbolTableImpl::join(const std::vector& stat_ return bytes; } -StatNameList::~StatNameList() { ASSERT(!populated()); } - -void StatNameList::populate(const std::vector& names, - SymbolTable& symbol_table) { - RELEASE_ASSERT(names.size() < 256, "Maximum number elements in a StatNameList exceeded"); +void SymbolTableImpl::populateList(const absl::string_view* names, uint32_t num_names, + StatNameList& list) { + RELEASE_ASSERT(num_names < 256, "Maximum number elements in a StatNameList exceeded"); // First encode all the names. size_t total_size_bytes = 1; /* one byte for holding the number of names */ - std::vector encodings; - encodings.resize(names.size()); - int index = 0; - for (auto& name : names) { - SymbolEncoding encoding = symbol_table.encode(name); + + STACK_ARRAY(encodings, Encoding, num_names); + for (uint32_t i = 0; i < num_names; ++i) { + Encoding& encoding = encodings[i]; + addTokensToEncoding(names[i], encoding); total_size_bytes += encoding.bytesRequired(); - encodings[index++].swap(encoding); } // Now allocate the exact number of bytes required and move the encodings // into storage. - storage_ = std::make_unique(total_size_bytes); - uint8_t* p = &storage_[0]; - *p++ = encodings.size(); + auto storage = std::make_unique(total_size_bytes); + uint8_t* p = &storage[0]; + *p++ = num_names; for (auto& encoding : encodings) { p += encoding.moveToStorage(p); } - ASSERT(p == &storage_[0] + total_size_bytes); + + // This assertion double-checks the arithmetic where we computed + // total_size_bytes. After appending all the encoded data into the + // allocated byte array, we should wind up with a pointer difference of + // total_size_bytes from the beginning of the allocation. + ASSERT(p == &storage[0] + total_size_bytes); + list.moveStorageIntoList(std::move(storage)); } +StatNameList::~StatNameList() { ASSERT(!populated()); } + void StatNameList::iterate(const std::function& f) const { uint8_t* p = &storage_[0]; uint32_t num_elements = *p++; diff --git a/source/common/stats/symbol_table_impl.h b/source/common/stats/symbol_table_impl.h index f7c33ebd1d93f..366963f790c77 100644 --- a/source/common/stats/symbol_table_impl.h +++ b/source/common/stats/symbol_table_impl.h @@ -15,6 +15,7 @@ #include "common/common/hash.h" #include "common/common/lock_guard.h" #include "common/common/non_copyable.h" +#include "common/common/stack_array.h" #include "common/common/thread.h" #include "common/common/utility.h" @@ -37,67 +38,6 @@ constexpr uint64_t StatNameMaxSize = 1 << (8 * StatNameSizeEncodingBytes); // 65 /** Transient representations of a vector of 32-bit symbols */ using SymbolVec = std::vector; -/** - * Represents an 8-bit encoding of a vector of symbols, used as a transient - * representation during encoding and prior to retained allocation. - */ -class SymbolEncoding { -public: - /** - * Before destructing SymbolEncoding, you must call moveToStorage. This - * transfers ownership, and in particular, the responsibility to call - * SymbolTable::clear() on all referenced symbols. If we ever wanted - * to be able to destruct a SymbolEncoding without transferring it - * we could add a clear(SymbolTable&) method. - */ - ~SymbolEncoding(); - - /** - * Encodes a token into the vec. - * - * @param symbol the symbol to encode. - */ - void addSymbol(Symbol symbol); - - /** - * Encodes an entire string into the vec, on behalf of FakeSymbolTableImpl. - * TODO(jmarantz): delete this method when FakeSymbolTableImpl is deleted. - * - * @param str The string to encode. - */ - void addStringForFakeSymbolTable(absl::string_view str); - - /** - * Decodes a uint8_t array into a SymbolVec. - */ - static SymbolVec decodeSymbols(const SymbolTable::Storage array, uint64_t size); - - /** - * Returns the number of bytes required to represent StatName as a uint8_t - * array, including the encoded size. - */ - uint64_t bytesRequired() const { return size() + StatNameSizeEncodingBytes; } - - /** - * Returns the number of uint8_t entries we collected while adding symbols. - */ - uint64_t size() const { return vec_.size(); } - - /** - * Moves the contents of the vector into an allocated array. The array - * must have been allocated with bytesRequired() bytes. - * - * @param array destination memory to receive the encoded bytes. - * @return uint64_t the number of bytes transferred. - */ - uint64_t moveToStorage(SymbolTable::Storage array); - - void swap(SymbolEncoding& src) { vec_.swap(src.vec_); } - -private: - std::vector vec_; -}; - /** * SymbolTableImpl manages a namespace optimized for stats, which are typically * composed of arrays of "."-separated tokens, with a significant overlap @@ -130,22 +70,95 @@ class SymbolEncoding { */ class SymbolTableImpl : public SymbolTable { public: + /** + * Intermediate representation for a stat-name. This helps store multiple + * names in a single packed allocation. First we encode each desired name, + * then sum their sizes for the single packed allocation. This is used to + * store MetricImpl's tags and tagExtractedName. + */ + class Encoding { + public: + /** + * Before destructing SymbolEncoding, you must call moveToStorage. This + * transfers ownership, and in particular, the responsibility to call + * SymbolTable::clear() on all referenced symbols. If we ever wanted + * to be able to destruct a SymbolEncoding without transferring it + * we could add a clear(SymbolTable&) method. + */ + ~Encoding(); + + /** + * Encodes a token into the vec. + * + * @param symbol the symbol to encode. + */ + void addSymbol(Symbol symbol); + + /** + * Decodes a uint8_t array into a SymbolVec. + */ + static SymbolVec decodeSymbols(const SymbolTable::Storage array, uint64_t size); + + /** + * Returns the number of bytes required to represent StatName as a uint8_t + * array, including the encoded size. + */ + uint64_t bytesRequired() const { return dataBytesRequired() + StatNameSizeEncodingBytes; } + + /** + * @return the number of uint8_t entries we collected while adding symbols. + */ + uint64_t dataBytesRequired() const { return vec_.size(); } + + /** + * Moves the contents of the vector into an allocated array. The array + * must have been allocated with bytesRequired() bytes. + * + * @param array destination memory to receive the encoded bytes. + * @return uint64_t the number of bytes transferred. + */ + uint64_t moveToStorage(SymbolTable::Storage array); + + private: + std::vector vec_; + }; + SymbolTableImpl(); ~SymbolTableImpl() override; // SymbolTable std::string toString(const StatName& stat_name) const override; - SymbolEncoding encode(absl::string_view name) override; uint64_t numSymbols() const override; bool lessThan(const StatName& a, const StatName& b) const override; void free(const StatName& stat_name) override; void incRefCount(const StatName& stat_name) override; - SymbolTable::StoragePtr join(const std::vector& stat_names) const override; + StoragePtr join(const std::vector& stat_names) const override; + void populateList(const absl::string_view* names, uint32_t num_names, + StatNameList& list) override; + StoragePtr encode(absl::string_view name) override; + void callWithStringView(StatName stat_name, + const std::function& fn) const override; #ifndef ENVOY_CONFIG_COVERAGE void debugPrint() const override; #endif + /** + * Saves the specified length into the byte array, returning the next byte. + * There is no guarantee that bytes will be aligned, so we can't cast to a + * uint16_t* and assign, but must individually copy the bytes. + * + * @param length the length in bytes to write. Must be < StatNameMaxSize. + * @param bytes the pointer into which to write the length. + * @return the pointer to the next byte for writing the data. + */ + static inline uint8_t* writeLengthReturningNext(uint64_t length, uint8_t* bytes) { + ASSERT(length < StatNameMaxSize); + *bytes++ = length & 0xff; + *bytes++ = length >> 8; + return bytes; + } + private: friend class StatName; friend class StatNameTest; @@ -157,7 +170,7 @@ class SymbolTableImpl : public SymbolTable { uint32_t ref_count_; }; - // This must be called during both encode() and free(). + // This must be held during both encode() and free(). mutable Thread::MutexBasicLockable lock_; /** @@ -187,9 +200,20 @@ class SymbolTableImpl : public SymbolTable { */ absl::string_view fromSymbol(Symbol symbol) const EXCLUSIVE_LOCKS_REQUIRED(lock_); - // Stages a new symbol for use. To be called after a successful insertion. + /** + * Stages a new symbol for use. To be called after a successful insertion. + */ void newSymbol(); + /** + * Tokenizes name, finds or allocates symbols for each token, and adds them + * to encoding. + * + * @param name The name to tokenize. + * @param encoding The encoding to write to. + */ + void addTokensToEncoding(absl::string_view name, Encoding& encoding); + Symbol monotonicCounter() { Thread::LockGuard lock(lock_); return monotonic_counter_; @@ -227,7 +251,7 @@ class SymbolTableImpl : public SymbolTable { * will fire to guard against symbol-table leaks. * * Thus this class is inconvenient to directly use as temp storage for building - * a StatName from a string. Instead it should be used via StatNameTempStorage. + * a StatName from a string. Instead it should be used via StatNameManagedStorage. */ class StatNameStorage { public: @@ -263,6 +287,8 @@ class StatNameStorage { */ inline StatName statName() const; + uint8_t* bytes() { return bytes_.get(); } + private: SymbolTable::StoragePtr bytes_; }; @@ -329,10 +355,15 @@ class StatName { #endif /** - * @return uint8_t* A pointer to the first byte of data (skipping over size bytes). + * @return A pointer to the first byte of data (skipping over size bytes). */ const uint8_t* data() const { return size_and_data_ + StatNameSizeEncodingBytes; } + /** + * @return whether this is empty. + */ + bool empty() const { return size_and_data_ == nullptr || dataSize() == 0; } + private: const uint8_t* size_and_data_; }; @@ -343,25 +374,72 @@ StatName StatNameStorage::statName() const { return StatName(bytes_.get()); } * Contains the backing store for a StatName and enough context so it can * self-delete through RAII. This works by augmenting StatNameStorage with a * reference to the SymbolTable&, so it has an extra 8 bytes of footprint. It - * is intended to be used in tests or as a scoped temp in a function, rather - * than stored in a larger structure such as a map, where the redundant copies - * of the SymbolTable& would be costly in aggregate. + * is intended to be used in cases where simplicity of implementation is more + * important than byte-savings, for example: + * - outside the stats system + * - in tests + * - as a scoped temp in a function + * Due to the extra 8 bytes per instance, scalability should be taken into + * account before using this as (say) a value or key in a map. In those + * scenarios, it would be better to store the SymbolTable reference once + * for the entire map. + * + * In the stat structures, we generally use StatNameStorage to avoid the + * per-stat overhead. */ -class StatNameTempStorage : public StatNameStorage { +class StatNameManagedStorage : public StatNameStorage { public: // Basic constructor for when you have a name as a string, and need to // generate symbols for it. - StatNameTempStorage(absl::string_view name, SymbolTable& table) + StatNameManagedStorage(absl::string_view name, SymbolTable& table) : StatNameStorage(name, table), symbol_table_(table) {} // Obtains new backing storage for an already existing StatName. - StatNameTempStorage(StatName src, SymbolTable& table) + StatNameManagedStorage(StatName src, SymbolTable& table) : StatNameStorage(src, table), symbol_table_(table) {} - ~StatNameTempStorage() { free(symbol_table_); } + ~StatNameManagedStorage() { free(symbol_table_); } + + SymbolTable& symbolTable() { return symbol_table_; } + const SymbolTable& symbolTable() const { return symbol_table_; } + +private: + SymbolTable& symbol_table_; +}; + +/** + * Maintains storage for a collection of StatName objects. Like + * StatNameManagedStorage, this has an RAII usage model, taking + * care of decrementing ref-counts in the SymbolTable for all + * contained StatNames on destruction or on clear(); + * + * Example usage: + * StatNamePool pool(symbol_table); + * StatName name1 = pool.add("name1"); + * StatName name2 = pool.add("name2"); + */ +class StatNamePool { +public: + explicit StatNamePool(SymbolTable& symbol_table) : symbol_table_(symbol_table) {} + ~StatNamePool() { clear(); } + + /** + * Removes all StatNames from the pool. + */ + void clear(); + + /** + * @param name the name to add the container. + * @return the StatName held in the container for this name. + */ + StatName add(absl::string_view name); private: + // We keep the stat names in a vector of StatNameStorage, storing the + // SymbolTable reference separately. This saves 8 bytes per StatName, + // at the cost of having a destructor that calls clear(). SymbolTable& symbol_table_; + std::vector storage_vector_; }; // Represents an ordered container of StatNames. The encoding for each StatName @@ -376,16 +454,6 @@ class StatNameList { public: ~StatNameList(); - /** - * Populates the StatNameList from a list of encodings. This is not done at - * construction time to enable StatNameList to be instantiated directly in - * a class that doesn't have a live SymbolTable when it is constructed. - * - * @param encodings The list names to encode. - * @param symbol_table The symbol table in which to encode the names. - */ - void populate(const std::vector& encodings, SymbolTable& symbol_table); - /** * @return true if populate() has been called on this list. */ @@ -411,7 +479,28 @@ class StatNameList { void clear(SymbolTable& symbol_table); private: - std::unique_ptr storage_; + friend class FakeSymbolTableImpl; + friend class SymbolTableImpl; + + /** + * Moves the specified storage into the list. The storage format is an + * array of bytes, organized like this: + * + * [0] The number of elements in the list (must be < 256). + * [1] low order 8 bits of the number of symbols in the first element. + * [2] high order 8 bits of the number of symbols in the first element. + * [3...] the symbols in the first element. + * ... + * + * + * For FakeSymbolTableImpl, each symbol is a single char, casted into a + * uint8_t. For SymbolTableImpl, each symbol is 1 or more bytes, in a + * variable-length encoding. See SymbolTableImpl::Encoding::addSymbol for + * details. + */ + void moveStorageIntoList(SymbolTable::StoragePtr&& storage) { storage_ = std::move(storage); } + + SymbolTable::StoragePtr storage_; }; // Helper class for constructing hash-tables with StatName keys. @@ -441,5 +530,85 @@ struct StatNameLessThan { const SymbolTable& symbol_table_; }; +struct HeterogeneousStatNameHash { + // Specifying is_transparent indicates to the library infrastructure that + // type-conversions should not be applied when calling find(), but instead + // pass the actual types of the contained and searched-for objects directly to + // these functors. See + // https://en.cppreference.com/w/cpp/utility/functional/less_void for an + // official reference, and https://abseil.io/tips/144 for a description of + // using it in the context of absl. + using is_transparent = void; + + size_t operator()(StatName a) const { return a.hash(); } + size_t operator()(const StatNameStorage& a) const { return a.statName().hash(); } +}; + +struct HeterogeneousStatNameEqual { + // See description for HeterogeneousStatNameHash::is_transparent. + using is_transparent = void; + + size_t operator()(StatName a, StatName b) const { return a == b; } + size_t operator()(const StatNameStorage& a, const StatNameStorage& b) const { + return a.statName() == b.statName(); + } + size_t operator()(StatName a, const StatNameStorage& b) const { return a == b.statName(); } + size_t operator()(const StatNameStorage& a, StatName b) const { return a.statName() == b; } +}; + +// Encapsulates a set. We use containment here rather than a +// 'using' alias because we need to ensure that when the set is destructed, +// StatNameStorage::free(symbol_table) is called on each entry. It is a little +// easier at the call-sites in thread_local_store.cc to implement this an +// explicit free() method, analogous to StatNameStorage::free(), compared to +// storing a SymbolTable reference in the class and doing the free in the +// destructor, like StatNameManagedStorage. +class StatNameStorageSet { +public: + using HashSet = + absl::flat_hash_set; + using iterator = HashSet::iterator; + + ~StatNameStorageSet(); + + /** + * Releases all symbols held in this set. Must be called prior to destruction. + * + * @param symbol_table The symbol table that owns the symbols. + */ + void free(SymbolTable& symbol_table); + + /** + * @param storage The StatNameStorage to add to the set. + */ + std::pair insert(StatNameStorage&& storage) { + return hash_set_.insert(std::move(storage)); + } + + /** + * @param stat_name The stat_name to find. + * @return the iterator pointing to the stat_name, or end() if not found. + */ + iterator find(StatName stat_name) { return hash_set_.find(stat_name); } + + /** + * @return the end-marker. + */ + iterator end() { return hash_set_.end(); } + + /** + * @param set the storage set to swap with. + */ + void swap(StatNameStorageSet& set) { hash_set_.swap(set.hash_set_); } + + /** + * @return the number of elements in the set. + */ + size_t size() const { return hash_set_.size(); } + +private: + HashSet hash_set_; +}; + } // namespace Stats } // namespace Envoy diff --git a/source/common/stats/tag_extractor_impl.cc b/source/common/stats/tag_extractor_impl.cc index 092e66f0edd8b..acf440fa695e5 100644 --- a/source/common/stats/tag_extractor_impl.cc +++ b/source/common/stats/tag_extractor_impl.cc @@ -62,11 +62,11 @@ TagExtractorPtr TagExtractorImpl::createTagExtractor(const std::string& name, return TagExtractorPtr{new TagExtractorImpl(name, regex, substr)}; } -bool TagExtractorImpl::substrMismatch(const std::string& stat_name) const { - return !substr_.empty() && stat_name.find(substr_) == std::string::npos; +bool TagExtractorImpl::substrMismatch(absl::string_view stat_name) const { + return !substr_.empty() && stat_name.find(substr_) == absl::string_view::npos; } -bool TagExtractorImpl::extractTag(const std::string& stat_name, std::vector& tags, +bool TagExtractorImpl::extractTag(absl::string_view stat_name, std::vector& tags, IntervalSet& remove_characters) const { PERF_OPERATION(perf); @@ -75,9 +75,11 @@ bool TagExtractorImpl::extractTag(const std::string& stat_name, std::vector return false; } - std::smatch match; + std::match_results match; // The regex must match and contain one or more subexpressions (all after the first are ignored). - if (std::regex_search(stat_name, match, regex_) && match.size() > 1) { + if (std::regex_search(stat_name.begin(), stat_name.end(), match, + regex_) && + match.size() > 1) { // remove_subexpr is the first submatch. It represents the portion of the string to be removed. const auto& remove_subexpr = match[1]; diff --git a/source/common/stats/tag_extractor_impl.h b/source/common/stats/tag_extractor_impl.h index c72765a75234c..138f25b6af7c0 100644 --- a/source/common/stats/tag_extractor_impl.h +++ b/source/common/stats/tag_extractor_impl.h @@ -28,7 +28,7 @@ class TagExtractorImpl : public TagExtractor { TagExtractorImpl(const std::string& name, const std::string& regex, const std::string& substr = ""); std::string name() const override { return name_; } - bool extractTag(const std::string& tag_extracted_name, std::vector& tags, + bool extractTag(absl::string_view tag_extracted_name, std::vector& tags, IntervalSet& remove_characters) const override; absl::string_view prefixToken() const override { return prefix_; } @@ -37,7 +37,7 @@ class TagExtractorImpl : public TagExtractor { * @return bool indicates whether tag extraction should be skipped for this stat_name due * to a substring mismatch. */ - bool substrMismatch(const std::string& stat_name) const; + bool substrMismatch(absl::string_view stat_name) const; private: /** diff --git a/source/common/stats/tag_producer_impl.cc b/source/common/stats/tag_producer_impl.cc index d23d609c8a44a..f03a4b6553e3e 100644 --- a/source/common/stats/tag_producer_impl.cc +++ b/source/common/stats/tag_producer_impl.cc @@ -63,12 +63,12 @@ void TagProducerImpl::addExtractor(TagExtractorPtr extractor) { } void TagProducerImpl::forEachExtractorMatching( - const std::string& stat_name, std::function f) const { + absl::string_view stat_name, std::function f) const { IntervalSetImpl remove_characters; for (const TagExtractorPtr& tag_extractor : tag_extractors_without_prefix_) { f(tag_extractor); } - const std::string::size_type dot = stat_name.find('.'); + const absl::string_view::size_type dot = stat_name.find('.'); if (dot != std::string::npos) { const absl::string_view token = absl::string_view(stat_name.data(), dot); const auto iter = tag_extractor_prefix_map_.find(token); @@ -80,7 +80,7 @@ void TagProducerImpl::forEachExtractorMatching( } } -std::string TagProducerImpl::produceTags(const std::string& metric_name, +std::string TagProducerImpl::produceTags(absl::string_view metric_name, std::vector& tags) const { tags.insert(tags.end(), default_tags_.begin(), default_tags_.end()); IntervalSetImpl remove_characters; diff --git a/source/common/stats/tag_producer_impl.h b/source/common/stats/tag_producer_impl.h index 1614bcb28299e..505cb71929aac 100644 --- a/source/common/stats/tag_producer_impl.h +++ b/source/common/stats/tag_producer_impl.h @@ -37,7 +37,7 @@ class TagProducerImpl : public TagProducer { * @param metric_name std::string a name of Stats::Metric (Counter, Gauge, Histogram). * @param tags std::vector a set of Stats::Tag. */ - std::string produceTags(const std::string& metric_name, std::vector& tags) const override; + std::string produceTags(absl::string_view metric_name, std::vector& tags) const override; private: friend class DefaultTagRegexTester; @@ -89,7 +89,7 @@ class TagProducerImpl : public TagProducer { * @param stat_name const std::string& the stat name. * @param f std::function function to call for each extractor. */ - void forEachExtractorMatching(const std::string& stat_name, + void forEachExtractorMatching(absl::string_view stat_name, std::function f) const; std::vector tag_extractors_without_prefix_; diff --git a/source/common/stats/thread_local_store.cc b/source/common/stats/thread_local_store.cc index 2c72dbf94b983..03d799509c756 100644 --- a/source/common/stats/thread_local_store.cc +++ b/source/common/stats/thread_local_store.cc @@ -10,9 +10,9 @@ #include "envoy/stats/sink.h" #include "envoy/stats/stat_data_allocator.h" #include "envoy/stats/stats.h" -#include "envoy/stats/stats_options.h" #include "common/common/lock_guard.h" +#include "common/stats/scope_prefixer.h" #include "common/stats/stats_matcher_impl.h" #include "common/stats/tag_producer_impl.h" @@ -21,21 +21,28 @@ namespace Envoy { namespace Stats { -ThreadLocalStoreImpl::ThreadLocalStoreImpl(const StatsOptions& stats_options, - StatDataAllocator& alloc) - : stats_options_(stats_options), alloc_(alloc), default_scope_(createScope("")), +ThreadLocalStoreImpl::ThreadLocalStoreImpl(StatDataAllocator& alloc) + : alloc_(alloc), default_scope_(createScope("")), tag_producer_(std::make_unique()), - stats_matcher_(std::make_unique()), - num_last_resort_stats_(default_scope_->counter("stats.overflow")), source_(*this) {} + stats_matcher_(std::make_unique()), heap_allocator_(alloc.symbolTable()), + source_(*this), null_counter_(alloc.symbolTable()), null_gauge_(alloc.symbolTable()), + null_histogram_(alloc.symbolTable()) {} ThreadLocalStoreImpl::~ThreadLocalStoreImpl() { - ASSERT(shutting_down_); + ASSERT(shutting_down_ || !threading_ever_initialized_); default_scope_.reset(); ASSERT(scopes_.empty()); + for (StatNameStorageSet* rejected_stats : rejected_stats_purgatory_) { + rejected_stats->free(symbolTable()); + delete rejected_stats; + } } void ThreadLocalStoreImpl::setStatsMatcher(StatsMatcherPtr&& stats_matcher) { stats_matcher_ = std::move(stats_matcher); + if (stats_matcher_->acceptsAll()) { + return; + } // The Filesystem and potentially other stat-registering objects are // constructed prior to the stat-matcher, and those add stats @@ -51,31 +58,41 @@ void ThreadLocalStoreImpl::setStatsMatcher(StatsMatcherPtr&& stats_matcher) { template void ThreadLocalStoreImpl::removeRejectedStats(StatMapClass& map, StatListClass& list) { - std::vector remove_list; + std::vector remove_list; for (auto& stat : map) { if (rejects(stat.first)) { remove_list.push_back(stat.first); } } - for (const char* stat_name : remove_list) { - auto p = map.find(stat_name); - ASSERT(p != map.end()); - list.push_back(p->second); // Save SharedPtr to the list to avoid invalidating refs to stat. - map.erase(p); + for (StatName stat_name : remove_list) { + auto iter = map.find(stat_name); + ASSERT(iter != map.end()); + list.push_back(iter->second); // Save SharedPtr to the list to avoid invalidating refs to stat. + map.erase(iter); } } -bool ThreadLocalStoreImpl::rejects(const std::string& name) const { +bool ThreadLocalStoreImpl::rejects(StatName stat_name) const { + // Don't both elaborating the StatName there are no pattern-based + // exclusions;/inclusions. + if (stats_matcher_->acceptsAll()) { + return false; + } + // TODO(ambuc): If stats_matcher_ depends on regexes, this operation (on the // hot path) could become prohibitively expensive. Revisit this usage in the // future. - return stats_matcher_->rejects(name); + // + // Also note that the elaboration of the stat-name into a string is expensive, + // so I think it might be better to move the matcher test until after caching, + // unless its acceptsAll/rejectsAll. + return stats_matcher_->rejectsAll() || stats_matcher_->rejects(symbolTable().toString(stat_name)); } std::vector ThreadLocalStoreImpl::counters() const { // Handle de-dup due to overlapping scopes. std::vector ret; - CharStarHashSet names; + StatNameHashSet names; Thread::LockGuard lock(lock_); for (ScopeImpl* scope : scopes_) { for (auto& counter : scope->central_cache_.counters_) { @@ -89,16 +106,16 @@ std::vector ThreadLocalStoreImpl::counters() const { } ScopePtr ThreadLocalStoreImpl::createScope(const std::string& name) { - std::unique_ptr new_scope(new ScopeImpl(*this, name)); + auto new_scope = std::make_unique(*this, name); Thread::LockGuard lock(lock_); scopes_.emplace(new_scope.get()); - return std::move(new_scope); + return new_scope; } std::vector ThreadLocalStoreImpl::gauges() const { // Handle de-dup due to overlapping scopes. std::vector ret; - CharStarHashSet names; + StatNameHashSet names; Thread::LockGuard lock(lock_); for (ScopeImpl* scope : scopes_) { for (auto& gauge : scope->central_cache_.gauges_) { @@ -131,6 +148,7 @@ std::vector ThreadLocalStoreImpl::histograms() const { void ThreadLocalStoreImpl::initializeThreading(Event::Dispatcher& main_thread_dispatcher, ThreadLocal::Instance& tls) { + threading_ever_initialized_ = true; main_thread_dispatcher_ = &main_thread_dispatcher; tls_ = tls.allocateSlot(); tls_->set([](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr { @@ -175,64 +193,138 @@ void ThreadLocalStoreImpl::mergeInternal(PostMergeCb merge_complete_cb) { } void ThreadLocalStoreImpl::releaseScopeCrossThread(ScopeImpl* scope) { - Thread::LockGuard lock(lock_); + Thread::ReleasableLockGuard lock(lock_); ASSERT(scopes_.count(scope) == 1); scopes_.erase(scope); + // This is called directly from the ScopeImpl destructor, but we can't delay + // the destruction of scope->central_cache_.central_cache_.rejected_stats_ + // to wait for all the TLS rejected_stats_ caches to be destructed, as those + // reference elements of SharedStatNameStorageSet. So simply swap out the set + // contents into a local that we can hold onto until the TLS cache is cleared + // of all references. + // + // We use a raw pointer here as it's easier to capture it in in the lambda. + auto rejected_stats = new StatNameStorageSet; + rejected_stats->swap(scope->central_cache_.rejected_stats_); + // This can happen from any thread. We post() back to the main thread which will initiate the // cache flush operation. if (!shutting_down_ && main_thread_dispatcher_) { - main_thread_dispatcher_->post( - [this, scope_id = scope->scope_id_]() -> void { clearScopeFromCaches(scope_id); }); + const uint64_t scope_id = scope->scope_id_; + + // We must delay the cleanup of the rejected stats storage until all the + // thread-local caches are cleared. This happens by post(), and it's + // possible that post() will not run, such as when an exception is thrown + // during startup. To avoid leaking memory and thus failing tests when + // this occurs, we hold the rejected stats in 'purgatory', so they can + // be cleared out in the ThreadLocalStoreImpl destructor. We'd prefer + // to release the memory immediately, however, in which case we remove + // the rejected stats set from purgatory. + rejected_stats_purgatory_.insert(rejected_stats); + auto clean_central_cache = [this, rejected_stats]() { + { + Thread::LockGuard lock(lock_); + rejected_stats_purgatory_.erase(rejected_stats); + } + rejected_stats->free(symbolTable()); + delete rejected_stats; + }; + lock.release(); + main_thread_dispatcher_->post([this, clean_central_cache, scope_id]() { + clearScopeFromCaches(scope_id, clean_central_cache); + }); + } else { + rejected_stats->free(symbolTable()); + delete rejected_stats; } } -std::string ThreadLocalStoreImpl::getTagsForName(const std::string& name, - std::vector& tags) const { - return tag_producer_->produceTags(name, tags); -} - -void ThreadLocalStoreImpl::clearScopeFromCaches(uint64_t scope_id) { +void ThreadLocalStoreImpl::clearScopeFromCaches(uint64_t scope_id, + const Event::PostCb& clean_central_cache) { // If we are shutting down we no longer perform cache flushes as workers may be shutting down // at the same time. if (!shutting_down_) { // Perform a cache flush on all threads. tls_->runOnAllThreads( - [this, scope_id]() -> void { tls_->getTyped().scope_cache_.erase(scope_id); }); + [this, scope_id]() -> void { tls_->getTyped().scope_cache_.erase(scope_id); }, + clean_central_cache); } } -absl::string_view ThreadLocalStoreImpl::truncateStatNameIfNeeded(absl::string_view name) { - // If the main allocator requires stat name truncation, do so now, though any - // warnings will be printed only if the truncated stat requires a new - // allocation. - if (alloc_.requiresBoundedStatNameSize()) { - const uint64_t max_length = stats_options_.maxNameLength(); - name = name.substr(0, max_length); - } - return name; +std::atomic ThreadLocalStoreImpl::ScopeImpl::next_scope_id_; + +ThreadLocalStoreImpl::ScopeImpl::ScopeImpl(ThreadLocalStoreImpl& parent, const std::string& prefix) + : scope_id_(next_scope_id_++), parent_(parent), + prefix_(Utility::sanitizeStatsName(prefix), parent.symbolTable()) {} + +ThreadLocalStoreImpl::ScopeImpl::~ScopeImpl() { + parent_.releaseScopeCrossThread(this); + prefix_.free(symbolTable()); } -std::atomic ThreadLocalStoreImpl::ScopeImpl::next_scope_id_; +// Manages the truncation and tag-extration of stat names. Tag extraction occurs +// on the original, untruncated name so the extraction can complete properly, +// even if the tag values are partially truncated. +class TagExtraction { +public: + TagExtraction(ThreadLocalStoreImpl& tls, StatName name) { + tls.symbolTable().callWithStringView(name, [this, &tls](absl::string_view name_str) { + tag_extracted_name_ = tls.tagProducer().produceTags(name_str, tags_); + }); + } + + const std::vector& tags() { return tags_; } + const std::string& tagExtractedName() { return tag_extracted_name_; } + +private: + std::vector tags_; + std::string tag_extracted_name_; +}; + +bool ThreadLocalStoreImpl::checkAndRememberRejection(StatName name, + StatNameStorageSet& central_rejected_stats, + StatNameHashSet* tls_rejected_stats) { + if (stats_matcher_->acceptsAll()) { + return false; + } -ThreadLocalStoreImpl::ScopeImpl::~ScopeImpl() { parent_.releaseScopeCrossThread(this); } + auto iter = central_rejected_stats.find(name); + const StatNameStorage* rejected_name = nullptr; + if (iter != central_rejected_stats.end()) { + rejected_name = &(*iter); + } else { + if (rejects(name)) { + auto insertion = central_rejected_stats.insert(StatNameStorage(name, symbolTable())); + const StatNameStorage& rejected_name_ref = *(insertion.first); + rejected_name = &rejected_name_ref; + } + } + if (rejected_name != nullptr) { + if (tls_rejected_stats != nullptr) { + tls_rejected_stats->insert(rejected_name->statName()); + } + return true; + } + return false; +} template StatType& ThreadLocalStoreImpl::ScopeImpl::safeMakeStat( - const std::string& name, StatMap>& central_cache_map, - MakeStatFn make_stat, StatMap>* tls_cache) { + StatName name, StatMap>& central_cache_map, + StatNameStorageSet& central_rejected_stats, MakeStatFn make_stat, + StatMap>* tls_cache, StatNameHashSet* tls_rejected_stats, + StatType& null_stat) { - const char* stat_key = name.c_str(); - std::unique_ptr truncation_buffer; - absl::string_view truncated_name = parent_.truncateStatNameIfNeeded(name); - if (truncated_name.size() < name.size()) { - truncation_buffer = std::make_unique(std::string(truncated_name)); - stat_key = truncation_buffer->c_str(); // must be nul-terminated. + // We do name-rejections on the full name, prior to truncation. + if (tls_rejected_stats != nullptr && + tls_rejected_stats->find(name) != tls_rejected_stats->end()) { + return null_stat; } // If we have a valid cache entry, return it. if (tls_cache) { - auto pos = tls_cache->find(stat_key); + auto pos = tls_cache->find(name); if (pos != tls_cache->end()) { return *pos->second; } @@ -241,50 +333,36 @@ StatType& ThreadLocalStoreImpl::ScopeImpl::safeMakeStat( // We must now look in the central store so we must be locked. We grab a reference to the // central store location. It might contain nothing. In this case, we allocate a new stat. Thread::LockGuard lock(parent_.lock_); - auto p = central_cache_map.find(stat_key); + auto iter = central_cache_map.find(name); std::shared_ptr* central_ref = nullptr; - if (p != central_cache_map.end()) { - central_ref = &(p->second); + if (iter != central_cache_map.end()) { + central_ref = &(iter->second); + } else if (parent_.checkAndRememberRejection(name, central_rejected_stats, tls_rejected_stats)) { + // Note that again we do the name-rejection lookup on the untruncated name. + return null_stat; } else { - // If we had to truncate, warn now that we've missed all caches. - if (truncation_buffer != nullptr) { - ENVOY_LOG_MISC( - warn, - "Statistic '{}' is too long with {} characters, it will be truncated to {} characters", - name, name.size(), truncation_buffer->size()); - } - - std::vector tags; - - // Tag extraction occurs on the original, untruncated name so the extraction - // can complete properly, even if the tag values are partially truncated. - std::string tag_extracted_name = parent_.getTagsForName(name, tags); + TagExtraction extraction(parent_, name); std::shared_ptr stat = - make_stat(parent_.alloc_, truncated_name, std::move(tag_extracted_name), std::move(tags)); - if (stat == nullptr) { - // TODO(jmarantz): If make_stat fails, the actual move does not actually occur - // for tag_extracted_name and tags, so there is no use-after-move problem. - // In order to increase the readability of the code, refactoring is done here. - parent_.num_last_resort_stats_.inc(); - stat = make_stat(parent_.heap_allocator_, truncated_name, - std::move(tag_extracted_name), // NOLINT(bugprone-use-after-move) - std::move(tags)); // NOLINT(bugprone-use-after-move) - ASSERT(stat != nullptr); - } - central_ref = ¢ral_cache_map[stat->nameCStr()]; + make_stat(parent_.alloc_, name, extraction.tagExtractedName(), extraction.tags()); + ASSERT(stat != nullptr); + central_ref = ¢ral_cache_map[stat->statName()]; *central_ref = stat; } // If we have a TLS cache, insert the stat. if (tls_cache) { - tls_cache->insert(std::make_pair((*central_ref)->nameCStr(), *central_ref)); + tls_cache->insert(std::make_pair((*central_ref)->statName(), *central_ref)); } // Finally we return the reference. return **central_ref; } -Counter& ThreadLocalStoreImpl::ScopeImpl::counter(const std::string& name) { +Counter& ThreadLocalStoreImpl::ScopeImpl::counterFromStatName(StatName name) { + if (parent_.rejectsAll()) { + return parent_.null_counter_; + } + // Determine the final name based on the prefix and the passed name. // // Note that we can do map.find(final_name.c_str()), but we cannot do @@ -294,25 +372,26 @@ Counter& ThreadLocalStoreImpl::ScopeImpl::counter(const std::string& name) { // after we construct the stat we can insert it into the required maps. This // strategy costs an extra hash lookup for each miss, but saves time // re-copying the string and significant memory overhead. - std::string final_name = prefix_ + name; - if (parent_.rejects(final_name)) { - return null_counter_; - } + Stats::SymbolTable::StoragePtr final_name = symbolTable().join({prefix_.statName(), name}); + StatName final_stat_name(final_name.get()); // We now find the TLS cache. This might remain null if we don't have TLS // initialized currently. StatMap* tls_cache = nullptr; + StatNameHashSet* tls_rejected_stats = nullptr; if (!parent_.shutting_down_ && parent_.tls_) { - tls_cache = &parent_.tls_->getTyped().scope_cache_[this->scope_id_].counters_; + TlsCacheEntry& entry = parent_.tls_->getTyped().scope_cache_[this->scope_id_]; + tls_cache = &entry.counters_; + tls_rejected_stats = &entry.rejected_stats_; } return safeMakeStat( - final_name, central_cache_.counters_, - [](StatDataAllocator& allocator, absl::string_view name, std::string&& tag_extracted_name, - std::vector&& tags) -> CounterSharedPtr { - return allocator.makeCounter(name, std::move(tag_extracted_name), std::move(tags)); + final_stat_name, central_cache_.counters_, central_cache_.rejected_stats_, + [](StatDataAllocator& allocator, StatName name, absl::string_view tag_extracted_name, + const std::vector& tags) -> CounterSharedPtr { + return allocator.makeCounter(name, tag_extracted_name, tags); }, - tls_cache); + tls_cache, tls_rejected_stats, parent_.null_counter_); } void ThreadLocalStoreImpl::ScopeImpl::deliverHistogramToSinks(const Histogram& histogram, @@ -331,7 +410,11 @@ void ThreadLocalStoreImpl::ScopeImpl::deliverHistogramToSinks(const Histogram& h } } -Gauge& ThreadLocalStoreImpl::ScopeImpl::gauge(const std::string& name) { +Gauge& ThreadLocalStoreImpl::ScopeImpl::gaugeFromStatName(StatName name) { + if (parent_.rejectsAll()) { + return parent_.null_gauge_; + } + // See comments in counter(). There is no super clean way (via templates or otherwise) to // share this code so I'm leaving it largely duplicated for now. // @@ -340,26 +423,31 @@ Gauge& ThreadLocalStoreImpl::ScopeImpl::gauge(const std::string& name) { // a temporary, and address sanitization errors would follow. Instead we must // do a find() first, using that if it succeeds. If it fails, then after we // construct the stat we can insert it into the required maps. - std::string final_name = prefix_ + name; - if (parent_.rejects(final_name)) { - return null_gauge_; - } + Stats::SymbolTable::StoragePtr final_name = symbolTable().join({prefix_.statName(), name}); + StatName final_stat_name(final_name.get()); StatMap* tls_cache = nullptr; + StatNameHashSet* tls_rejected_stats = nullptr; if (!parent_.shutting_down_ && parent_.tls_) { - tls_cache = &parent_.tls_->getTyped().scope_cache_[this->scope_id_].gauges_; + TlsCacheEntry& entry = parent_.tls_->getTyped().scope_cache_[this->scope_id_]; + tls_cache = &entry.gauges_; + tls_rejected_stats = &entry.rejected_stats_; } return safeMakeStat( - final_name, central_cache_.gauges_, - [](StatDataAllocator& allocator, absl::string_view name, std::string&& tag_extracted_name, - std::vector&& tags) -> GaugeSharedPtr { - return allocator.makeGauge(name, std::move(tag_extracted_name), std::move(tags)); + final_stat_name, central_cache_.gauges_, central_cache_.rejected_stats_, + [](StatDataAllocator& allocator, StatName name, absl::string_view tag_extracted_name, + const std::vector& tags) -> GaugeSharedPtr { + return allocator.makeGauge(name, tag_extracted_name, tags); }, - tls_cache); + tls_cache, tls_rejected_stats, parent_.null_gauge_); } -Histogram& ThreadLocalStoreImpl::ScopeImpl::histogram(const std::string& name) { +Histogram& ThreadLocalStoreImpl::ScopeImpl::histogramFromStatName(StatName name) { + if (parent_.rejectsAll()) { + return parent_.null_histogram_; + } + // See comments in counter(). There is no super clean way (via templates or otherwise) to // share this code so I'm leaving it largely duplicated for now. // @@ -368,81 +456,91 @@ Histogram& ThreadLocalStoreImpl::ScopeImpl::histogram(const std::string& name) { // a temporary, and address sanitization errors would follow. Instead we must // do a find() first, using that if it succeeds. If it fails, then after we // construct the stat we can insert it into the required maps. - std::string final_name = prefix_ + name; - if (parent_.rejects(final_name)) { - return null_histogram_; - } + Stats::SymbolTable::StoragePtr final_name = symbolTable().join({prefix_.statName(), name}); + StatName final_stat_name(final_name.get()); StatMap* tls_cache = nullptr; + StatNameHashSet* tls_rejected_stats = nullptr; if (!parent_.shutting_down_ && parent_.tls_) { - tls_cache = - &parent_.tls_->getTyped().scope_cache_[this->scope_id_].parent_histograms_; - auto p = tls_cache->find(final_name.c_str()); - if (p != tls_cache->end()) { - return *p->second; + TlsCacheEntry& entry = parent_.tls_->getTyped().scope_cache_[this->scope_id_]; + tls_cache = &entry.parent_histograms_; + auto iter = tls_cache->find(final_stat_name); + if (iter != tls_cache->end()) { + return *iter->second; + } + tls_rejected_stats = &entry.rejected_stats_; + if (tls_rejected_stats->find(final_stat_name) != tls_rejected_stats->end()) { + return parent_.null_histogram_; } } Thread::LockGuard lock(parent_.lock_); - auto p = central_cache_.histograms_.find(final_name.c_str()); + auto iter = central_cache_.histograms_.find(final_stat_name); ParentHistogramImplSharedPtr* central_ref = nullptr; - if (p != central_cache_.histograms_.end()) { - central_ref = &p->second; + if (iter != central_cache_.histograms_.end()) { + central_ref = &iter->second; + } else if (parent_.checkAndRememberRejection(final_stat_name, central_cache_.rejected_stats_, + tls_rejected_stats)) { + return parent_.null_histogram_; } else { - std::vector tags; - std::string tag_extracted_name = parent_.getTagsForName(final_name, tags); + TagExtraction extraction(parent_, final_stat_name); auto stat = std::make_shared( - final_name, parent_, *this, std::move(tag_extracted_name), std::move(tags)); - central_ref = ¢ral_cache_.histograms_[stat->nameCStr()]; + final_stat_name, parent_, *this, extraction.tagExtractedName(), extraction.tags()); + central_ref = ¢ral_cache_.histograms_[stat->statName()]; *central_ref = stat; } if (tls_cache != nullptr) { - tls_cache->insert(std::make_pair((*central_ref)->nameCStr(), *central_ref)); + tls_cache->insert(std::make_pair((*central_ref)->statName(), *central_ref)); } return **central_ref; } -Histogram& ThreadLocalStoreImpl::ScopeImpl::tlsHistogram(const std::string& name, +Histogram& ThreadLocalStoreImpl::ScopeImpl::tlsHistogram(StatName name, ParentHistogramImpl& parent) { - if (parent_.rejects(name)) { - return null_histogram_; - } + // tlsHistogram() is generally not called for a histogram that is rejected by + // the matcher, so no further rejection-checking is needed at this level. + // TlsHistogram inherits its reject/accept status from ParentHistogram. - // See comments in counter() which explains the logic here. + // See comments in counterFromStatName() which explains the logic here. StatMap* tls_cache = nullptr; if (!parent_.shutting_down_ && parent_.tls_) { tls_cache = &parent_.tls_->getTyped().scope_cache_[this->scope_id_].histograms_; - auto p = tls_cache->find(name.c_str()); - if (p != tls_cache->end()) { - return *p->second; + auto iter = tls_cache->find(name); + if (iter != tls_cache->end()) { + return *iter->second; } } std::vector tags; - std::string tag_extracted_name = parent_.getTagsForName(name, tags); - TlsHistogramSharedPtr hist_tls_ptr = std::make_shared( - name, std::move(tag_extracted_name), std::move(tags)); + std::string tag_extracted_name = + parent_.tagProducer().produceTags(symbolTable().toString(name), tags); + TlsHistogramSharedPtr hist_tls_ptr = + std::make_shared(name, tag_extracted_name, tags, symbolTable()); parent.addTlsHistogram(hist_tls_ptr); if (tls_cache) { - tls_cache->insert(std::make_pair(hist_tls_ptr->nameCStr(), hist_tls_ptr)); + tls_cache->insert(std::make_pair(hist_tls_ptr->statName(), hist_tls_ptr)); } return *hist_tls_ptr; } -ThreadLocalHistogramImpl::ThreadLocalHistogramImpl(const std::string& name, - std::string&& tag_extracted_name, - std::vector&& tags) - : MetricImpl(std::move(tag_extracted_name), std::move(tags)), current_active_(0), flags_(0), - created_thread_id_(std::this_thread::get_id()), name_(name) { +ThreadLocalHistogramImpl::ThreadLocalHistogramImpl(StatName name, + absl::string_view tag_extracted_name, + const std::vector& tags, + SymbolTable& symbol_table) + : MetricImpl(tag_extracted_name, tags, symbol_table), current_active_(0), flags_(0), + created_thread_id_(std::this_thread::get_id()), name_(name, symbol_table), + symbol_table_(symbol_table) { histograms_[0] = hist_alloc(); histograms_[1] = hist_alloc(); } ThreadLocalHistogramImpl::~ThreadLocalHistogramImpl() { + MetricImpl::clear(); + name_.free(symbolTable()); hist_free(histograms_[0]); hist_free(histograms_[1]); } @@ -459,21 +557,23 @@ void ThreadLocalHistogramImpl::merge(histogram_t* target) { hist_clear(*other_histogram); } -ParentHistogramImpl::ParentHistogramImpl(const std::string& name, Store& parent, - TlsScope& tls_scope, std::string&& tag_extracted_name, - std::vector&& tags) - : MetricImpl(std::move(tag_extracted_name), std::move(tags)), parent_(parent), +ParentHistogramImpl::ParentHistogramImpl(StatName name, Store& parent, TlsScope& tls_scope, + absl::string_view tag_extracted_name, + const std::vector& tags) + : MetricImpl(tag_extracted_name, tags, parent.symbolTable()), parent_(parent), tls_scope_(tls_scope), interval_histogram_(hist_alloc()), cumulative_histogram_(hist_alloc()), interval_statistics_(interval_histogram_), cumulative_statistics_(cumulative_histogram_), - merged_(false), name_(name) {} + merged_(false), name_(name, parent.symbolTable()) {} ParentHistogramImpl::~ParentHistogramImpl() { + MetricImpl::clear(); + name_.free(symbolTable()); hist_free(interval_histogram_); hist_free(cumulative_histogram_); } void ParentHistogramImpl::recordValue(uint64_t value) { - Histogram& tls_histogram = tls_scope_.tlsHistogram(name(), *this); + Histogram& tls_histogram = tls_scope_.tlsHistogram(statName(), *this); tls_histogram.recordValue(value); parent_.deliverHistogramToSinks(*this, value); } diff --git a/source/common/stats/thread_local_store.h b/source/common/stats/thread_local_store.h index dea59e20ea8b4..4e318bb949fb0 100644 --- a/source/common/stats/thread_local_store.h +++ b/source/common/stats/thread_local_store.h @@ -12,6 +12,7 @@ #include "common/stats/heap_stat_data.h" #include "common/stats/histogram_impl.h" #include "common/stats/source_impl.h" +#include "common/stats/symbol_table_impl.h" #include "common/stats/utility.h" #include "absl/container/flat_hash_map.h" @@ -28,9 +29,9 @@ namespace Stats { */ class ThreadLocalHistogramImpl : public Histogram, public MetricImpl { public: - ThreadLocalHistogramImpl(const std::string& name, std::string&& tag_extracted_name, - std::vector&& tags); - ~ThreadLocalHistogramImpl(); + ThreadLocalHistogramImpl(StatName name, absl::string_view tag_extracted_name, + const std::vector& tags, SymbolTable& symbol_table); + ~ThreadLocalHistogramImpl() override; void merge(histogram_t* target); @@ -49,8 +50,9 @@ class ThreadLocalHistogramImpl : public Histogram, public MetricImpl { bool used() const override { return flags_ & Flags::Used; } // Stats::Metric - std::string name() const override { return name_; } - const char* nameCStr() const override { return name_.c_str(); } + StatName statName() const override { return name_.statName(); } + SymbolTable& symbolTable() override { return symbol_table_; } + const SymbolTable& symbolTable() const override { return symbol_table_; } private: uint64_t otherHistogramIndex() const { return 1 - current_active_; } @@ -58,10 +60,11 @@ class ThreadLocalHistogramImpl : public Histogram, public MetricImpl { histogram_t* histograms_[2]; std::atomic flags_; std::thread::id created_thread_id_; - const std::string name_; + StatNameStorage name_; + SymbolTable& symbol_table_; }; -typedef std::shared_ptr TlsHistogramSharedPtr; +using TlsHistogramSharedPtr = std::shared_ptr; class TlsScope; @@ -70,9 +73,9 @@ class TlsScope; */ class ParentHistogramImpl : public ParentHistogram, public MetricImpl { public: - ParentHistogramImpl(const std::string& name, Store& parent, TlsScope& tlsScope, - std::string&& tag_extracted_name, std::vector&& tags); - ~ParentHistogramImpl(); + ParentHistogramImpl(StatName name, Store& parent, TlsScope& tlsScope, + absl::string_view tag_extracted_name, const std::vector& tags); + ~ParentHistogramImpl() override; void addTlsHistogram(const TlsHistogramSharedPtr& hist_ptr); bool used() const override; @@ -94,8 +97,9 @@ class ParentHistogramImpl : public ParentHistogram, public MetricImpl { const std::string bucketSummary() const override; // Stats::Metric - std::string name() const override { return name_; } - const char* nameCStr() const override { return name_.c_str(); } + StatName statName() const override { return name_.statName(); } + SymbolTable& symbolTable() override { return parent_.symbolTable(); } + const SymbolTable& symbolTable() const override { return parent_.symbolTable(); } private: bool usedLockHeld() const EXCLUSIVE_LOCKS_REQUIRED(merge_lock_); @@ -109,10 +113,10 @@ class ParentHistogramImpl : public ParentHistogram, public MetricImpl { mutable Thread::MutexBasicLockable merge_lock_; std::list tls_histograms_ GUARDED_BY(merge_lock_); bool merged_; - const std::string name_; + StatNameStorage name_; }; -typedef std::shared_ptr ParentHistogramImplSharedPtr; +using ParentHistogramImplSharedPtr = std::shared_ptr; /** * Class used to create ThreadLocalHistogram in the scope. @@ -126,28 +130,39 @@ class TlsScope : public Scope { * @return a ThreadLocalHistogram within the scope's namespace. * @param name name of the histogram with scope prefix attached. */ - virtual Histogram& tlsHistogram(const std::string& name, ParentHistogramImpl& parent) PURE; + virtual Histogram& tlsHistogram(StatName name, ParentHistogramImpl& parent) PURE; }; /** * Store implementation with thread local caching. For design details see - * https://github.com/envoyproxy/envoy/blob/master/docs/stats.md + * https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md */ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRoot { public: - ThreadLocalStoreImpl(const Stats::StatsOptions& stats_options, StatDataAllocator& alloc); - ~ThreadLocalStoreImpl(); + ThreadLocalStoreImpl(StatDataAllocator& alloc); + ~ThreadLocalStoreImpl() override; // Stats::Scope + Counter& counterFromStatName(StatName name) override { + return default_scope_->counterFromStatName(name); + } Counter& counter(const std::string& name) override { return default_scope_->counter(name); } ScopePtr createScope(const std::string& name) override; void deliverHistogramToSinks(const Histogram& histogram, uint64_t value) override { return default_scope_->deliverHistogramToSinks(histogram, value); } + Gauge& gaugeFromStatName(StatName name) override { + return default_scope_->gaugeFromStatName(name); + } Gauge& gauge(const std::string& name) override { return default_scope_->gauge(name); } - Histogram& histogram(const std::string& name) override { - return default_scope_->histogram(name); - }; + Histogram& histogramFromStatName(StatName name) override { + return default_scope_->histogramFromStatName(name); + } + Histogram& histogram(const std::string& name) override { return default_scope_->histogram(name); } + NullGaugeImpl& nullGauge(const std::string&) override { return null_gauge_; } + const SymbolTable& symbolTable() const override { return alloc_.symbolTable(); } + SymbolTable& symbolTable() override { return alloc_.symbolTable(); } + const TagProducer& tagProducer() const { return *tag_producer_; } // Stats::Store std::vector counters() const override; @@ -168,50 +183,70 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo Source& source() override { return source_; } - const Stats::StatsOptions& statsOptions() const override { return stats_options_; } - private: - template using StatMap = CharStarHashMap; + template using StatMap = StatNameHashMap; struct TlsCacheEntry { StatMap counters_; StatMap gauges_; StatMap histograms_; StatMap parent_histograms_; + + // We keep a TLS cache of rejected stat names. This costs memory, but + // reduces runtime overhead running the matcher. Moreover, once symbol + // tables are integrated, rejection will need the fully elaborated string, + // and it we need to take a global symbol-table lock to run. We keep this + // StatName set here in the TLS cache to avoid taking a lock to compute + // rejection. + StatNameHashSet rejected_stats_; }; struct CentralCacheEntry { StatMap counters_; StatMap gauges_; StatMap histograms_; + StatNameStorageSet rejected_stats_; }; struct ScopeImpl : public TlsScope { - ScopeImpl(ThreadLocalStoreImpl& parent, const std::string& prefix) - : scope_id_(next_scope_id_++), parent_(parent), - prefix_(Utility::sanitizeStatsName(prefix)) {} - ~ScopeImpl(); + ScopeImpl(ThreadLocalStoreImpl& parent, const std::string& prefix); + ~ScopeImpl() override; // Stats::Scope - Counter& counter(const std::string& name) override; + Counter& counterFromStatName(StatName name) override; + void deliverHistogramToSinks(const Histogram& histogram, uint64_t value) override; + Gauge& gaugeFromStatName(StatName name) override; + Histogram& histogramFromStatName(StatName name) override; + Histogram& tlsHistogram(StatName name, ParentHistogramImpl& parent) override; ScopePtr createScope(const std::string& name) override { - return parent_.createScope(prefix_ + name); + return parent_.createScope(symbolTable().toString(prefix_.statName()) + "." + name); } - void deliverHistogramToSinks(const Histogram& histogram, uint64_t value) override; - Gauge& gauge(const std::string& name) override; - Histogram& histogram(const std::string& name) override; - Histogram& tlsHistogram(const std::string& name, ParentHistogramImpl& parent) override; - const Stats::StatsOptions& statsOptions() const override { return parent_.statsOptions(); } + const SymbolTable& symbolTable() const override { return parent_.symbolTable(); } + SymbolTable& symbolTable() override { return parent_.symbolTable(); } + + Counter& counter(const std::string& name) override { + StatNameManagedStorage storage(name, symbolTable()); + return counterFromStatName(storage.statName()); + } + Gauge& gauge(const std::string& name) override { + StatNameManagedStorage storage(name, symbolTable()); + return gaugeFromStatName(storage.statName()); + } + Histogram& histogram(const std::string& name) override { + StatNameManagedStorage storage(name, symbolTable()); + return histogramFromStatName(storage.statName()); + } + + NullGaugeImpl& nullGauge(const std::string&) override { return parent_.null_gauge_; } template - using MakeStatFn = - std::function(StatDataAllocator&, absl::string_view name, - std::string&& tag_extracted_name, - std::vector&& tags)>; + using MakeStatFn = std::function(StatDataAllocator&, StatName name, + absl::string_view tag_extracted_name, + const std::vector& tags)>; /** * Makes a stat either by looking it up in the central cache, - * generating it from the the parent allocator, or as a last + * generating it from the parent allocator, or as a last * result, creating it with the heap allocator. * * @param name the full name of the stat (not tag extracted). @@ -221,20 +256,22 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo * used if non-empty, or filled in if empty (and non-null). */ template - StatType& - safeMakeStat(const std::string& name, StatMap>& central_cache_map, - MakeStatFn make_stat, StatMap>* tls_cache); + StatType& safeMakeStat(StatName name, StatMap>& central_cache_map, + StatNameStorageSet& central_rejected_stats, + MakeStatFn make_stat, + StatMap>* tls_cache, + StatNameHashSet* tls_rejected_stats, StatType& null_stat); + + void extractTagsAndTruncate(StatName& name, + std::unique_ptr& truncated_name_storage, + std::vector& tags, std::string& tag_extracted_name); static std::atomic next_scope_id_; const uint64_t scope_id_; ThreadLocalStoreImpl& parent_; - const std::string prefix_; + StatNameStorage prefix_; CentralCacheEntry central_cache_; - - NullCounterImpl null_counter_; - NullGaugeImpl null_gauge_; - NullHistogramImpl null_histogram_; }; struct TlsCache : public ThreadLocal::ThreadLocalObject { @@ -249,15 +286,16 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo }; std::string getTagsForName(const std::string& name, std::vector& tags) const; - void clearScopeFromCaches(uint64_t scope_id); + void clearScopeFromCaches(uint64_t scope_id, const Event::PostCb& clean_central_cache); void releaseScopeCrossThread(ScopeImpl* scope); void mergeInternal(PostMergeCb mergeCb); - absl::string_view truncateStatNameIfNeeded(absl::string_view name); - bool rejects(const std::string& name) const; + bool rejects(StatName name) const; + bool rejectsAll() const { return stats_matcher_->rejectsAll(); } template void removeRejectedStats(StatMapClass& map, StatListClass& list); + bool checkAndRememberRejection(StatName name, StatNameStorageSet& central_rejected_stats, + StatNameHashSet* tls_rejected_stats); - const Stats::StatsOptions& stats_options_; StatDataAllocator& alloc_; Event::Dispatcher* main_thread_dispatcher_{}; ThreadLocal::SlotPtr tls_; @@ -267,12 +305,16 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo std::list> timer_sinks_; TagProducerPtr tag_producer_; StatsMatcherPtr stats_matcher_; + std::atomic threading_ever_initialized_{}; std::atomic shutting_down_{}; std::atomic merge_in_progress_{}; - Counter& num_last_resort_stats_; HeapStatDataAllocator heap_allocator_; SourceImpl source_; + NullCounterImpl null_counter_; + NullGaugeImpl null_gauge_; + NullHistogramImpl null_histogram_; + // Retain storage for deleted stats; these are no longer in maps because the // matcher-pattern was established after they were created. Since the stats // are held by reference in code that expects them to be there, we can't @@ -284,6 +326,8 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo std::vector deleted_counters_; std::vector deleted_gauges_; std::vector deleted_histograms_; + + absl::flat_hash_set rejected_stats_purgatory_ GUARDED_BY(lock_); }; } // namespace Stats diff --git a/source/common/stats/utility.cc b/source/common/stats/utility.cc index afc4e8c4f4ca5..5ec6d919c78d2 100644 --- a/source/common/stats/utility.cc +++ b/source/common/stats/utility.cc @@ -3,15 +3,36 @@ #include #include +#include "absl/strings/match.h" + namespace Envoy { namespace Stats { -std::string Utility::sanitizeStatsName(const std::string& name) { - std::string stats_name = name; +std::string Utility::sanitizeStatsName(absl::string_view name) { + if (absl::EndsWith(name, ".")) { + name.remove_suffix(1); + } + if (absl::StartsWith(name, ".")) { + name.remove_prefix(1); + } + std::string stats_name = std::string(name); std::replace(stats_name.begin(), stats_name.end(), ':', '_'); std::replace(stats_name.begin(), stats_name.end(), '\0', '_'); return stats_name; } +absl::optional Utility::findTag(Metric& metric, StatName find_tag_name) { + absl::optional value; + metric.iterateTagStatNames( + [&value, &find_tag_name](Stats::StatName tag_name, Stats::StatName tag_value) -> bool { + if (tag_name == find_tag_name) { + value = tag_value; + return false; + } + return true; + }); + return value; +} + } // namespace Stats } // namespace Envoy diff --git a/source/common/stats/utility.h b/source/common/stats/utility.h index 18081360640c9..c5100f8ff9eb5 100644 --- a/source/common/stats/utility.h +++ b/source/common/stats/utility.h @@ -2,6 +2,13 @@ #include +#include "envoy/stats/stats.h" + +#include "common/stats/symbol_table_impl.h" + +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" + namespace Envoy { namespace Stats { @@ -10,9 +17,23 @@ namespace Stats { */ class Utility { public: - // ':' is a reserved char in statsd. Do a character replacement to avoid costly inline - // translations later. - static std::string sanitizeStatsName(const std::string& name); + /** + * ':' is a reserved char in statsd. Do a character replacement to avoid + * costly inline translations later. + * + * @param name the stat name to sanitize. + * @return the sanitized stat name. + */ + static std::string sanitizeStatsName(absl::string_view name); + + /** + * Finds a metric tag with the specified name. + * + * @param metric The metric in which the tag is expected to exist. + * @param find_tag_name The name of the tag to search for. + * @return The value of the tag, if found. + */ + static absl::optional findTag(Metric& metric, StatName find_tag_name); }; } // namespace Stats diff --git a/source/common/stream_info/filter_state_impl.cc b/source/common/stream_info/filter_state_impl.cc index 465871599e705..c00f75fe79de6 100644 --- a/source/common/stream_info/filter_state_impl.cc +++ b/source/common/stream_info/filter_state_impl.cc @@ -7,13 +7,7 @@ namespace StreamInfo { void FilterStateImpl::setData(absl::string_view data_name, std::unique_ptr&& data, FilterState::StateType state_type) { - // TODO(Google): Remove string conversion when fixed internally. Fixing - // this TODO will also require an explicit cast from absl::string_view to - // std::string in the data_storage_ index below; see - // https://github.com/abseil/abseil-cpp/blob/master/absl/strings/string_view.h#L328 - const std::string name(data_name); - const auto& it = data_storage_.find(name); - + const auto& it = data_storage_.find(data_name); if (it != data_storage_.end()) { // We have another object with same data_name. Check for mutability // violations namely: readonly data cannot be overwritten. mutable data @@ -31,18 +25,16 @@ void FilterStateImpl::setData(absl::string_view data_name, std::unique_ptr filter_object(new FilterStateImpl::FilterObject()); filter_object->data_ = std::move(data); filter_object->state_type_ = state_type; - data_storage_[name] = std::move(filter_object); + data_storage_[data_name] = std::move(filter_object); } bool FilterStateImpl::hasDataWithName(absl::string_view data_name) const { - // TODO(Google): Remove string conversion when fixed internally. - return data_storage_.count(std::string(data_name)) > 0; + return data_storage_.count(data_name) > 0; } const FilterState::Object* FilterStateImpl::getDataReadOnlyGeneric(absl::string_view data_name) const { - // TODO(Google): Remove string conversion when fixed internally. - const auto& it = data_storage_.find(std::string(data_name)); + const auto& it = data_storage_.find(data_name); if (it == data_storage_.end()) { throw EnvoyException("FilterState::getDataReadOnly called for unknown data name."); @@ -53,8 +45,7 @@ FilterStateImpl::getDataReadOnlyGeneric(absl::string_view data_name) const { } FilterState::Object* FilterStateImpl::getDataMutableGeneric(absl::string_view data_name) { - // TODO(Google): Remove string conversion when fixed internally. - const auto& it = data_storage_.find(std::string(data_name)); + const auto& it = data_storage_.find(data_name); if (it == data_storage_.end()) { throw EnvoyException("FilterState::getDataMutable called for unknown data name."); diff --git a/source/common/stream_info/filter_state_impl.h b/source/common/stream_info/filter_state_impl.h index 180e179511942..6f498e2e68ac1 100644 --- a/source/common/stream_info/filter_state_impl.h +++ b/source/common/stream_info/filter_state_impl.h @@ -1,11 +1,11 @@ #pragma once -#include #include #include #include "envoy/stream_info/filter_state.h" +#include "absl/container/flat_hash_map.h" #include "absl/strings/string_view.h" namespace Envoy { @@ -26,10 +26,7 @@ class FilterStateImpl : public FilterState { FilterState::StateType state_type_; }; - // The explicit non-type-specific comparator is necessary to allow use of find() method - // with absl::string_view. See - // https://stackoverflow.com/questions/20317413/what-are-transparent-comparators. - std::map, std::less<>> data_storage_; + absl::flat_hash_map> data_storage_; }; } // namespace StreamInfo diff --git a/source/common/stream_info/stream_info_impl.h b/source/common/stream_info/stream_info_impl.h index 4cf6d10c0020f..fcdb953a3cc7c 100644 --- a/source/common/stream_info/stream_info_impl.h +++ b/source/common/stream_info/stream_info_impl.h @@ -43,40 +43,24 @@ struct StreamInfoImpl : public StreamInfo { last_downstream_rx_byte_received = time_source_.monotonicTime(); } - absl::optional firstUpstreamTxByteSent() const override { - return duration(first_upstream_tx_byte_sent_); + void setUpstreamTiming(const UpstreamTiming& upstream_timing) override { + upstream_timing_ = upstream_timing; } - void onFirstUpstreamTxByteSent() override { - ASSERT(!first_upstream_tx_byte_sent_); - first_upstream_tx_byte_sent_ = time_source_.monotonicTime(); + absl::optional firstUpstreamTxByteSent() const override { + return duration(upstream_timing_.first_upstream_tx_byte_sent_); } absl::optional lastUpstreamTxByteSent() const override { - return duration(last_upstream_tx_byte_sent_); - } - - void onLastUpstreamTxByteSent() override { - ASSERT(!last_upstream_tx_byte_sent_); - last_upstream_tx_byte_sent_ = time_source_.monotonicTime(); + return duration(upstream_timing_.last_upstream_tx_byte_sent_); } absl::optional firstUpstreamRxByteReceived() const override { - return duration(first_upstream_rx_byte_received_); - } - - void onFirstUpstreamRxByteReceived() override { - ASSERT(!first_upstream_rx_byte_received_); - first_upstream_rx_byte_received_ = time_source_.monotonicTime(); + return duration(upstream_timing_.first_upstream_rx_byte_received_); } absl::optional lastUpstreamRxByteReceived() const override { - return duration(last_upstream_rx_byte_received_); - } - - void onLastUpstreamRxByteReceived() override { - ASSERT(!last_upstream_rx_byte_received_); - last_upstream_rx_byte_received_ = time_source_.monotonicTime(); + return duration(upstream_timing_.last_upstream_rx_byte_received_); } absl::optional firstDownstreamTxByteSent() const override { @@ -106,13 +90,6 @@ struct StreamInfoImpl : public StreamInfo { final_time_ = time_source_.monotonicTime(); } - void resetUpstreamTimings() override { - first_upstream_tx_byte_sent_ = absl::optional{}; - last_upstream_tx_byte_sent_ = absl::optional{}; - first_upstream_rx_byte_received_ = absl::optional{}; - last_upstream_rx_byte_received_ = absl::optional{}; - } - void addBytesReceived(uint64_t bytes_received) override { bytes_received_ += bytes_received; } uint64_t bytesReceived() const override { return bytes_received_; } @@ -123,6 +100,14 @@ struct StreamInfoImpl : public StreamInfo { absl::optional responseCode() const override { return response_code_; } + const absl::optional& responseCodeDetails() const override { + return response_code_details_; + } + + void setResponseCodeDetails(absl::string_view rc_details) override { + response_code_details_.emplace(rc_details); + } + void addBytesSent(uint64_t bytes_sent) override { bytes_sent_ += bytes_sent; } uint64_t bytesSent() const override { return bytes_sent_; } @@ -183,6 +168,14 @@ struct StreamInfoImpl : public StreamInfo { return downstream_remote_address_; } + void setDownstreamSslConnection(const Ssl::ConnectionInfo* connection_info) override { + downstream_ssl_info_ = connection_info; + } + + const Ssl::ConnectionInfo* downstreamSslConnection() const override { + return downstream_ssl_info_; + } + const Router::RouteEntry* routeEntry() const override { return route_entry_; } envoy::api::v2::core::Metadata& dynamicMetadata() override { return metadata_; }; @@ -201,21 +194,26 @@ struct StreamInfoImpl : public StreamInfo { const std::string& requestedServerName() const override { return requested_server_name_; } + void setUpstreamTransportFailureReason(absl::string_view failure_reason) override { + upstream_transport_failure_reason_ = std::string(failure_reason); + } + + const std::string& upstreamTransportFailureReason() const override { + return upstream_transport_failure_reason_; + } + TimeSource& time_source_; const SystemTime start_time_; const MonotonicTime start_time_monotonic_; absl::optional last_downstream_rx_byte_received; - absl::optional first_upstream_tx_byte_sent_; - absl::optional last_upstream_tx_byte_sent_; - absl::optional first_upstream_rx_byte_received_; - absl::optional last_upstream_rx_byte_received_; absl::optional first_downstream_tx_byte_sent_; absl::optional last_downstream_tx_byte_sent_; absl::optional final_time_; absl::optional protocol_; absl::optional response_code_; + absl::optional response_code_details_; uint64_t response_flags_{}; Upstream::HostDescriptionConstSharedPtr upstream_host_{}; bool health_check_request_{}; @@ -230,7 +228,10 @@ struct StreamInfoImpl : public StreamInfo { Network::Address::InstanceConstSharedPtr downstream_local_address_; Network::Address::InstanceConstSharedPtr downstream_direct_remote_address_; Network::Address::InstanceConstSharedPtr downstream_remote_address_; + const Ssl::ConnectionInfo* downstream_ssl_info_; std::string requested_server_name_; + UpstreamTiming upstream_timing_; + std::string upstream_transport_failure_reason_; }; } // namespace StreamInfo diff --git a/source/common/stream_info/utility.cc b/source/common/stream_info/utility.cc index 4c1ba2af4e56b..7821c8caab829 100644 --- a/source/common/stream_info/utility.cc +++ b/source/common/stream_info/utility.cc @@ -22,6 +22,7 @@ const std::string ResponseFlagUtils::FAULT_INJECTED = "FI"; const std::string ResponseFlagUtils::RATE_LIMITED = "RL"; const std::string ResponseFlagUtils::UNAUTHORIZED_EXTERNAL_SERVICE = "UAEX"; const std::string ResponseFlagUtils::RATELIMIT_SERVICE_ERROR = "RLSE"; +const std::string ResponseFlagUtils::STREAM_IDLE_TIMEOUT = "SI"; void ResponseFlagUtils::appendString(std::string& result, const std::string& append) { if (result.empty()) { @@ -34,7 +35,7 @@ void ResponseFlagUtils::appendString(std::string& result, const std::string& app const std::string ResponseFlagUtils::toShortString(const StreamInfo& stream_info) { std::string result; - static_assert(ResponseFlag::LastFlag == 0x8000, "A flag has been added. Fix this code."); + static_assert(ResponseFlag::LastFlag == 0x10000, "A flag has been added. Fix this code."); if (stream_info.hasResponseFlag(ResponseFlag::FailedLocalHealthCheck)) { appendString(result, FAILED_LOCAL_HEALTH_CHECK); @@ -100,6 +101,10 @@ const std::string ResponseFlagUtils::toShortString(const StreamInfo& stream_info appendString(result, UPSTREAM_RETRY_LIMIT_EXCEEDED); } + if (stream_info.hasResponseFlag(ResponseFlag::StreamIdleTimeout)) { + appendString(result, STREAM_IDLE_TIMEOUT); + } + return result.empty() ? NONE : result; } @@ -123,6 +128,7 @@ absl::optional ResponseFlagUtils::toResponseFlag(const std::string {ResponseFlagUtils::DOWNSTREAM_CONNECTION_TERMINATION, ResponseFlag::DownstreamConnectionTermination}, {ResponseFlagUtils::UPSTREAM_RETRY_LIMIT_EXCEEDED, ResponseFlag::UpstreamRetryLimitExceeded}, + {ResponseFlagUtils::STREAM_IDLE_TIMEOUT, ResponseFlag::StreamIdleTimeout}, }; const auto& it = map.find(flag); if (it != map.end()) { diff --git a/source/common/stream_info/utility.h b/source/common/stream_info/utility.h index 7361d1889da4b..bf588a7aa72da 100644 --- a/source/common/stream_info/utility.h +++ b/source/common/stream_info/utility.h @@ -37,6 +37,7 @@ class ResponseFlagUtils { const static std::string RATE_LIMITED; const static std::string UNAUTHORIZED_EXTERNAL_SERVICE; const static std::string RATELIMIT_SERVICE_ERROR; + const static std::string STREAM_IDLE_TIMEOUT; }; /** diff --git a/source/common/tcp/conn_pool.cc b/source/common/tcp/conn_pool.cc index e753d9ba0e75d..ea046b0fee3fa 100644 --- a/source/common/tcp/conn_pool.cc +++ b/source/common/tcp/conn_pool.cc @@ -161,6 +161,7 @@ void ConnPoolImpl::onConnectionEvent(ActiveConn& conn, Network::ConnectionEvent // do with the request. // NOTE: We move the existing pending requests to a temporary list. This is done so that // if retry logic submits a new request to the pool, we don't fail it inline. + // TODO(lizan): If pool failure due to transport socket, propagate the reason to access log. ConnectionPool::PoolFailureReason reason; if (conn.timed_out_) { reason = ConnectionPool::PoolFailureReason::Timeout; @@ -356,7 +357,7 @@ ConnPoolImpl::ActiveConn::ActiveConn(ConnPoolImpl& parent) remaining_requests_(parent_.host_->cluster().maxRequestsPerConnection()), timed_out_(false) { parent_.conn_connect_ms_ = std::make_unique( - parent_.host_->cluster().stats().upstream_cx_connect_ms_, parent_.dispatcher_.timeSystem()); + parent_.host_->cluster().stats().upstream_cx_connect_ms_, parent_.dispatcher_.timeSource()); Upstream::Host::CreateConnectionData data = parent_.host_->createConnection( parent_.dispatcher_, parent_.socket_options_, parent_.transport_socket_options_); @@ -376,7 +377,7 @@ ConnPoolImpl::ActiveConn::ActiveConn(ConnPoolImpl& parent) parent_.host_->stats().cx_total_.inc(); parent_.host_->stats().cx_active_.inc(); conn_length_ = std::make_unique( - parent_.host_->cluster().stats().upstream_cx_length_ms_, parent_.dispatcher_.timeSystem()); + parent_.host_->cluster().stats().upstream_cx_length_ms_, parent_.dispatcher_.timeSource()); connect_timer_->enableTimer(parent_.host_->cluster().connectTimeout()); parent_.host_->cluster().resourceManager(parent_.priority_).connections().inc(); diff --git a/source/common/tcp_proxy/tcp_proxy.cc b/source/common/tcp_proxy/tcp_proxy.cc index 626aa5ae058ed..07758e5982d78 100644 --- a/source/common/tcp_proxy/tcp_proxy.cc +++ b/source/common/tcp_proxy/tcp_proxy.cc @@ -203,6 +203,7 @@ void Filter::initialize(Network::ReadFilterCallbacks& callbacks, bool set_connec read_callbacks_->connection().enableHalfClose(true); getStreamInfo().setDownstreamLocalAddress(read_callbacks_->connection().localAddress()); getStreamInfo().setDownstreamRemoteAddress(read_callbacks_->connection().remoteAddress()); + getStreamInfo().setDownstreamSslConnection(read_callbacks_->connection().ssl()); // Need to disable reads so that we don't write to an upstream that might fail // in onData(). This will get re-enabled when the upstream connection is diff --git a/source/common/thread/thread_factory_singleton.cc b/source/common/thread/thread_factory_singleton.cc deleted file mode 100644 index c3c8d8a62e883..0000000000000 --- a/source/common/thread/thread_factory_singleton.cc +++ /dev/null @@ -1,21 +0,0 @@ -#include "envoy/thread/thread.h" - -#include "common/common/assert.h" - -namespace Envoy { -namespace Thread { - -ThreadFactory* ThreadFactorySingleton::thread_factory_{nullptr}; - -// This function can not be inlined in the thread.h header due to the use of ASSERT() creating a -// circular dependency with assert.h. -void ThreadFactorySingleton::set(ThreadFactory* thread_factory) { - // Verify that either the singleton is uninitialized (i.e., thread_factory_ == nullptr) OR it's - // being reset to the uninitialized state (i.e., thread_factory == nullptr), but _not_ both. The - // use of XOR complicates tests but improves our ability to catch init/cleanup errors. - ASSERT((thread_factory == nullptr) != (thread_factory_ == nullptr)); - thread_factory_ = thread_factory; -} - -} // namespace Thread -} // namespace Envoy diff --git a/source/common/thread_local/thread_local_impl.cc b/source/common/thread_local/thread_local_impl.cc index 29685bf9fd962..14bcf251bd5c6 100644 --- a/source/common/thread_local/thread_local_impl.cc +++ b/source/common/thread_local/thread_local_impl.cc @@ -28,13 +28,13 @@ SlotPtr InstanceImpl::allocateSlot() { if (slots_[i] == nullptr) { std::unique_ptr slot(new SlotImpl(*this, i)); slots_[i] = slot.get(); - return std::move(slot); + return slot; } } std::unique_ptr slot(new SlotImpl(*this, slots_.size())); slots_.push_back(slot.get()); - return std::move(slot); + return slot; } ThreadLocalObjectSharedPtr InstanceImpl::SlotImpl::get() { diff --git a/source/common/tracing/http_tracer_impl.cc b/source/common/tracing/http_tracer_impl.cc index 662501a4bf382..46ff74ac58a11 100644 --- a/source/common/tracing/http_tracer_impl.cc +++ b/source/common/tracing/http_tracer_impl.cc @@ -23,14 +23,14 @@ static std::string buildResponseCode(const StreamInfo::StreamInfo& info) { } static std::string valueOrDefault(const Http::HeaderEntry* header, const char* default_value) { - return header ? header->value().c_str() : default_value; + return header ? std::string(header->value().getStringView()) : default_value; } static std::string buildUrl(const Http::HeaderMap& request_headers) { - std::string path = request_headers.EnvoyOriginalPath() - ? request_headers.EnvoyOriginalPath()->value().c_str() - : request_headers.Path()->value().c_str(); - static const size_t max_path_length = 128; + std::string path(request_headers.EnvoyOriginalPath() + ? request_headers.EnvoyOriginalPath()->value().getStringView() + : request_headers.Path()->value().getStringView()); + static const size_t max_path_length = 256; if (path.length() > max_path_length) { path = path.substr(0, max_path_length); } @@ -39,15 +39,15 @@ static std::string buildUrl(const Http::HeaderMap& request_headers) { valueOrDefault(request_headers.Host(), ""), path); } -const std::string HttpTracerUtility::INGRESS_OPERATION = "ingress"; -const std::string HttpTracerUtility::EGRESS_OPERATION = "egress"; +const std::string HttpTracerUtility::IngressOperation = "ingress"; +const std::string HttpTracerUtility::EgressOperation = "egress"; const std::string& HttpTracerUtility::toString(OperationName operation_name) { switch (operation_name) { case OperationName::Ingress: - return INGRESS_OPERATION; + return IngressOperation; case OperationName::Egress: - return EGRESS_OPERATION; + return EgressOperation; } NOT_REACHED_GCOVR_EXCL_LINE; @@ -64,9 +64,8 @@ Decision HttpTracerUtility::isTracing(const StreamInfo::StreamInfo& stream_info, return {Reason::NotTraceableRequestId, false}; } - // TODO PERF: Avoid copy. UuidTraceStatus trace_status = - UuidUtils::isTraceableUuid(request_headers.RequestId()->value().c_str()); + UuidUtils::isTraceableUuid(request_headers.RequestId()->value().getStringView()); switch (trace_status) { case UuidTraceStatus::Client: @@ -82,51 +81,94 @@ Decision HttpTracerUtility::isTracing(const StreamInfo::StreamInfo& stream_info, NOT_REACHED_GCOVR_EXCL_LINE; } +static void annotateVerbose(Span& span, const StreamInfo::StreamInfo& stream_info) { + const auto start_time = stream_info.startTime(); + if (stream_info.lastDownstreamRxByteReceived()) { + span.log(start_time + std::chrono::duration_cast( + *stream_info.lastDownstreamRxByteReceived()), + Tracing::Logs::get().LastDownstreamRxByteReceived); + } + if (stream_info.firstUpstreamTxByteSent()) { + span.log(start_time + std::chrono::duration_cast( + *stream_info.firstUpstreamTxByteSent()), + Tracing::Logs::get().FirstUpstreamTxByteSent); + } + if (stream_info.lastUpstreamTxByteSent()) { + span.log(start_time + std::chrono::duration_cast( + *stream_info.lastUpstreamTxByteSent()), + Tracing::Logs::get().LastUpstreamTxByteSent); + } + if (stream_info.firstUpstreamRxByteReceived()) { + span.log(start_time + std::chrono::duration_cast( + *stream_info.firstUpstreamRxByteReceived()), + Tracing::Logs::get().FirstUpstreamRxByteReceived); + } + if (stream_info.lastUpstreamRxByteReceived()) { + span.log(start_time + std::chrono::duration_cast( + *stream_info.lastUpstreamRxByteReceived()), + Tracing::Logs::get().LastUpstreamRxByteReceived); + } + if (stream_info.firstDownstreamTxByteSent()) { + span.log(start_time + std::chrono::duration_cast( + *stream_info.firstDownstreamTxByteSent()), + Tracing::Logs::get().FirstDownstreamTxByteSent); + } + if (stream_info.lastDownstreamTxByteSent()) { + span.log(start_time + std::chrono::duration_cast( + *stream_info.lastDownstreamTxByteSent()), + Tracing::Logs::get().LastDownstreamTxByteSent); + } +} + void HttpTracerUtility::finalizeSpan(Span& span, const Http::HeaderMap* request_headers, const StreamInfo::StreamInfo& stream_info, const Config& tracing_config) { // Pre response data. if (request_headers) { if (request_headers->RequestId()) { - span.setTag(Tracing::Tags::get().GUID_X_REQUEST_ID, - std::string(request_headers->RequestId()->value().c_str())); + span.setTag(Tracing::Tags::get().GuidXRequestId, + std::string(request_headers->RequestId()->value().getStringView())); } - span.setTag(Tracing::Tags::get().HTTP_URL, buildUrl(*request_headers)); - span.setTag(Tracing::Tags::get().HTTP_METHOD, request_headers->Method()->value().c_str()); - span.setTag(Tracing::Tags::get().DOWNSTREAM_CLUSTER, + span.setTag(Tracing::Tags::get().HttpUrl, buildUrl(*request_headers)); + span.setTag(Tracing::Tags::get().HttpMethod, + std::string(request_headers->Method()->value().getStringView())); + span.setTag(Tracing::Tags::get().DownstreamCluster, valueOrDefault(request_headers->EnvoyDownstreamServiceCluster(), "-")); - span.setTag(Tracing::Tags::get().USER_AGENT, valueOrDefault(request_headers->UserAgent(), "-")); - span.setTag(Tracing::Tags::get().HTTP_PROTOCOL, + span.setTag(Tracing::Tags::get().UserAgent, valueOrDefault(request_headers->UserAgent(), "-")); + span.setTag(Tracing::Tags::get().HttpProtocol, AccessLog::AccessLogFormatUtils::protocolToString(stream_info.protocol())); if (request_headers->ClientTraceId()) { - span.setTag(Tracing::Tags::get().GUID_X_CLIENT_TRACE_ID, - std::string(request_headers->ClientTraceId()->value().c_str())); + span.setTag(Tracing::Tags::get().GuidXClientTraceId, + std::string(request_headers->ClientTraceId()->value().getStringView())); } // Build tags based on the custom headers. for (const Http::LowerCaseString& header : tracing_config.requestHeadersForTags()) { const Http::HeaderEntry* entry = request_headers->get(header); if (entry) { - span.setTag(header.get(), entry->value().c_str()); + span.setTag(header.get(), entry->value().getStringView()); } } } - span.setTag(Tracing::Tags::get().REQUEST_SIZE, std::to_string(stream_info.bytesReceived())); + span.setTag(Tracing::Tags::get().RequestSize, std::to_string(stream_info.bytesReceived())); if (nullptr != stream_info.upstreamHost()) { - span.setTag(Tracing::Tags::get().UPSTREAM_CLUSTER, - stream_info.upstreamHost()->cluster().name()); + span.setTag(Tracing::Tags::get().UpstreamCluster, stream_info.upstreamHost()->cluster().name()); } // Post response data. - span.setTag(Tracing::Tags::get().HTTP_STATUS_CODE, buildResponseCode(stream_info)); - span.setTag(Tracing::Tags::get().RESPONSE_SIZE, std::to_string(stream_info.bytesSent())); - span.setTag(Tracing::Tags::get().RESPONSE_FLAGS, + span.setTag(Tracing::Tags::get().HttpStatusCode, buildResponseCode(stream_info)); + span.setTag(Tracing::Tags::get().ResponseSize, std::to_string(stream_info.bytesSent())); + span.setTag(Tracing::Tags::get().ResponseFlags, StreamInfo::ResponseFlagUtils::toShortString(stream_info)); + if (tracing_config.verbose()) { + annotateVerbose(span, stream_info); + } + if (!stream_info.responseCode() || Http::CodeUtility::is5xx(stream_info.responseCode().value())) { - span.setTag(Tracing::Tags::get().ERROR, Tracing::Tags::get().TRUE); + span.setTag(Tracing::Tags::get().Error, Tracing::Tags::get().True); } span.finishSpan(); @@ -142,15 +184,15 @@ SpanPtr HttpTracerImpl::startSpan(const Config& config, Http::HeaderMap& request if (config.operationName() == OperationName::Egress) { span_name.append(" "); - span_name.append(request_headers.Host()->value().c_str()); + span_name.append(std::string(request_headers.Host()->value().getStringView())); } SpanPtr active_span = driver_->startSpan(config, request_headers, span_name, stream_info.startTime(), tracing_decision); if (active_span) { - active_span->setTag(Tracing::Tags::get().COMPONENT, Tracing::Tags::get().PROXY); - active_span->setTag(Tracing::Tags::get().NODE_ID, local_info_.nodeName()); - active_span->setTag(Tracing::Tags::get().ZONE, local_info_.zoneName()); + active_span->setTag(Tracing::Tags::get().Component, Tracing::Tags::get().Proxy); + active_span->setTag(Tracing::Tags::get().NodeId, local_info_.nodeName()); + active_span->setTag(Tracing::Tags::get().Zone, local_info_.zoneName()); } return active_span; diff --git a/source/common/tracing/http_tracer_impl.h b/source/common/tracing/http_tracer_impl.h index f426e44ddef4f..2c557d41cace8 100644 --- a/source/common/tracing/http_tracer_impl.h +++ b/source/common/tracing/http_tracer_impl.h @@ -20,47 +20,64 @@ namespace Tracing { class TracingTagValues { public: // OpenTracing standard tag names. - const std::string COMPONENT = "component"; - const std::string DB_INSTANCE = "db.instance"; - const std::string DB_STATEMENT = "db.statement"; - const std::string DB_USER = "db.user"; - const std::string DB_TYPE = "db.type"; - const std::string ERROR = "error"; - const std::string HTTP_METHOD = "http.method"; - const std::string HTTP_STATUS_CODE = "http.status_code"; - const std::string HTTP_URL = "http.url"; - const std::string MESSAGE_BUS_DESTINATION = "message_bus.destination"; - const std::string PEER_ADDRESS = "peer.address"; - const std::string PEER_HOSTNAME = "peer.hostname"; - const std::string PEER_IPV4 = "peer.ipv4"; - const std::string PEER_IPV6 = "peer.ipv6"; - const std::string PEER_PORT = "peer.port"; - const std::string PEER_SERVICE = "peer.service"; - const std::string SPAN_KIND = "span.kind"; + const std::string Component = "component"; + const std::string DbInstance = "db.instance"; + const std::string DbStatement = "db.statement"; + const std::string DbUser = "db.user"; + const std::string DbType = "db.type"; + const std::string Error = "error"; + const std::string HttpMethod = "http.method"; + const std::string HttpStatusCode = "http.status_code"; + const std::string HttpUrl = "http.url"; + const std::string MessageBusDestination = "message_bus.destination"; + const std::string PeerAddress = "peer.address"; + const std::string PeerHostname = "peer.hostname"; + const std::string PeerIpv4 = "peer.ipv4"; + const std::string PeerIpv6 = "peer.ipv6"; + const std::string PeerPort = "peer.port"; + const std::string PeerService = "peer.service"; + const std::string SpanKind = "span.kind"; // Non-standard tag names. - const std::string DOWNSTREAM_CLUSTER = "downstream_cluster"; - const std::string GRPC_STATUS_CODE = "grpc.status_code"; - const std::string GUID_X_CLIENT_TRACE_ID = "guid:x-client-trace-id"; - const std::string GUID_X_REQUEST_ID = "guid:x-request-id"; - const std::string HTTP_PROTOCOL = "http.protocol"; - const std::string NODE_ID = "node_id"; - const std::string REQUEST_SIZE = "request_size"; - const std::string RESPONSE_FLAGS = "response_flags"; - const std::string RESPONSE_SIZE = "response_size"; - const std::string STATUS = "status"; - const std::string UPSTREAM_CLUSTER = "upstream_cluster"; - const std::string USER_AGENT = "user_agent"; - const std::string ZONE = "zone"; + const std::string DownstreamCluster = "downstream_cluster"; + const std::string GrpcStatusCode = "grpc.status_code"; + const std::string GuidXClientTraceId = "guid:x-client-trace-id"; + const std::string GuidXRequestId = "guid:x-request-id"; + const std::string HttpProtocol = "http.protocol"; + const std::string NodeId = "node_id"; + const std::string RequestSize = "request_size"; + const std::string ResponseFlags = "response_flags"; + const std::string ResponseSize = "response_size"; + const std::string Status = "status"; + const std::string UpstreamCluster = "upstream_cluster"; + const std::string UserAgent = "user_agent"; + const std::string Zone = "zone"; // Tag values. - const std::string CANCELED = "canceled"; - const std::string PROXY = "proxy"; - const std::string TRUE = "true"; + const std::string Canceled = "canceled"; + const std::string Proxy = "proxy"; + const std::string True = "true"; }; typedef ConstSingleton Tags; +class TracingLogValues { +public: + // OpenTracing standard key names. + const std::string EventKey = "event"; + + // Event names + const std::string LastDownstreamRxByteReceived = "last_downstream_rx_byte_received"; + const std::string FirstUpstreamTxByteSent = "first_upstream_tx_byte_sent"; + const std::string LastUpstreamTxByteSent = "last_upstream_tx_byte_sent"; + const std::string FirstUpstreamRxByteReceived = "first_upstream_rx_byte_received"; + const std::string LastUpstreamRxByteReceived = "last_upstream_rx_byte_received"; + const std::string FirstDownstreamTxByteSent = "first_downstream_tx_byte_sent"; + const std::string LastDownstreamTxByteSent = "last_downstream_tx_byte_sent"; +}; + +typedef ConstSingleton Logs; + class HttpTracerUtility { public: /** @@ -86,8 +103,8 @@ class HttpTracerUtility { static void finalizeSpan(Span& span, const Http::HeaderMap* request_headers, const StreamInfo::StreamInfo& stream_info, const Config& tracing_config); - static const std::string INGRESS_OPERATION; - static const std::string EGRESS_OPERATION; + static const std::string IngressOperation; + static const std::string EgressOperation; }; class EgressConfigImpl : public Config { @@ -97,9 +114,10 @@ class EgressConfigImpl : public Config { const std::vector& requestHeadersForTags() const override { return request_headers_for_tags_; } + bool verbose() const override { return false; } private: - const std::vector request_headers_for_tags_; + const std::vector request_headers_for_tags_{}; }; typedef ConstSingleton EgressConfig; @@ -112,8 +130,9 @@ class NullSpan : public Span { } // Tracing::Span - void setOperation(const std::string&) override {} - void setTag(const std::string&, const std::string&) override {} + void setOperation(absl::string_view) override {} + void setTag(absl::string_view, absl::string_view) override {} + void log(SystemTime, const std::string&) override {} void finishSpan() override {} void injectContext(Http::HeaderMap&) override {} SpanPtr spawnChild(const Config&, const std::string&, SystemTime) override { diff --git a/source/common/upstream/BUILD b/source/common/upstream/BUILD index aaad5800e0d1a..1746d4d039ccc 100644 --- a/source/common/upstream/BUILD +++ b/source/common/upstream/BUILD @@ -60,8 +60,7 @@ envoy_cc_library( "//source/common/protobuf:utility_lib", "//source/common/router:shadow_writer_lib", "//source/common/tcp:conn_pool_lib", - "//source/common/upstream:conn_pool_map", - "//source/common/upstream:conn_pool_map_impl_lib", + "//source/common/upstream:priority_conn_pool_map_impl_lib", "//source/common/upstream:upstream_lib", "@envoy_api//envoy/admin/v2alpha:config_dump_cc", "@envoy_api//envoy/api/v2/core:base_cc", @@ -73,6 +72,8 @@ envoy_cc_library( hdrs = ["conn_pool_map.h"], deps = [ "//include/envoy/event:dispatcher_interface", + "//include/envoy/upstream:resource_manager_interface", + "//include/envoy/upstream:upstream_interface", "//source/common/common:debug_recursion_checker_lib", ], ) @@ -85,6 +86,27 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "priority_conn_pool_map", + hdrs = ["priority_conn_pool_map.h"], + deps = [ + ":conn_pool_map", + "//include/envoy/event:dispatcher_interface", + "//include/envoy/upstream:resource_manager_interface", + "//include/envoy/upstream:upstream_interface", + "//source/common/common:debug_recursion_checker_lib", + ], +) + +envoy_cc_library( + name = "priority_conn_pool_map_impl_lib", + hdrs = ["priority_conn_pool_map_impl.h"], + deps = [ + ":conn_pool_map_impl_lib", + ":priority_conn_pool_map", + ], +) + envoy_cc_library( name = "edf_scheduler_lib", hdrs = ["edf_scheduler.h"], @@ -190,14 +212,17 @@ envoy_cc_library( srcs = ["logical_dns_cluster.cc"], hdrs = ["logical_dns_cluster.h"], deps = [ + ":cluster_factory_lib", ":upstream_includes", "//include/envoy/thread_local:thread_local_interface", + "//include/envoy/upstream:cluster_factory_interface", "//source/common/common:empty_string", "//source/common/config:utility_lib", "//source/common/network:address_lib", "//source/common/network:utility_lib", "//source/common/protobuf", "//source/common/protobuf:utility_lib", + "//source/extensions/clusters:well_known_names", ], ) @@ -206,11 +231,14 @@ envoy_cc_library( srcs = ["original_dst_cluster.cc"], hdrs = ["original_dst_cluster.h"], deps = [ + ":cluster_factory_lib", ":upstream_includes", "//include/envoy/secret:secret_manager_interface", + "//include/envoy/upstream:cluster_factory_interface", "//source/common/common:empty_string", "//source/common/network:address_lib", "//source/common/network:utility_lib", + "//source/extensions/clusters:well_known_names", ], ) @@ -280,11 +308,13 @@ envoy_cc_library( srcs = ["eds.cc"], hdrs = ["eds.h"], deps = [ + ":cluster_factory_lib", ":upstream_includes", "//include/envoy/config:grpc_mux_interface", "//include/envoy/config:subscription_interface", "//include/envoy/local_info:local_info_interface", "//include/envoy/secret:secret_manager_interface", + "//include/envoy/upstream:cluster_factory_interface", "//include/envoy/upstream:locality_lib", "//source/common/config:metadata_lib", "//source/common/config:subscription_factory_lib", @@ -293,6 +323,7 @@ envoy_cc_library( "//source/common/network:resolver_lib", "//source/common/network:utility_lib", "//source/common/protobuf:utility_lib", + "//source/extensions/clusters:well_known_names", "@envoy_api//envoy/api/v2:eds_cc", "@envoy_api//envoy/api/v2/core:base_cc", "@envoy_api//envoy/api/v2/endpoint:endpoint_cc", @@ -324,8 +355,11 @@ envoy_cc_library( deps = [ ":eds_lib", ":health_checker_lib", + # TODO(mattklein123): Move the clusters to extensions so they can be compiled out. ":logical_dns_cluster_lib", ":original_dst_cluster_lib", + ":static_cluster_lib", + ":strict_dns_cluster_lib", ":upstream_includes", "//include/envoy/event:dispatcher_interface", "//include/envoy/event:timer_interface", @@ -350,6 +384,26 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "static_cluster_lib", + srcs = ["static_cluster.cc"], + hdrs = ["static_cluster.h"], + deps = [ + ":cluster_factory_includes", + ":upstream_includes", + ], +) + +envoy_cc_library( + name = "strict_dns_cluster_lib", + srcs = ["strict_dns_cluster.cc"], + hdrs = ["strict_dns_cluster.h"], + deps = [ + ":cluster_factory_includes", + ":upstream_includes", + ], +) + envoy_cc_library( name = "upstream_includes", hdrs = ["upstream_impl.h"], @@ -375,9 +429,76 @@ envoy_cc_library( "//source/common/common:minimal_logger_lib", "//source/common/config:metadata_lib", "//source/common/config:well_known_names", + "//source/common/init:manager_lib", + "//source/common/stats:isolated_store_lib", + "//source/common/stats:stats_lib", + "//source/server:transport_socket_config_lib", + "@envoy_api//envoy/api/v2/core:base_cc", + "@envoy_api//envoy/api/v2/endpoint:endpoint_cc", + ], +) + +envoy_cc_library( + name = "cluster_factory_lib", + srcs = ["cluster_factory_impl.cc"], + deps = [ + ":cluster_factory_includes", + ":health_checker_lib", + ":upstream_includes", + "//include/envoy/event:dispatcher_interface", + "//include/envoy/event:timer_interface", + "//include/envoy/network:dns_interface", + "//include/envoy/network:listen_socket_interface", + "//include/envoy/ssl:context_interface", + "//include/envoy/upstream:health_checker_interface", + "//source/common/common:enum_to_int", + "//source/common/common:utility_lib", + "//source/common/config:protocol_json_lib", + "//source/common/config:tls_context_json_lib", + "//source/common/http:utility_lib", + "//source/common/network:address_lib", + "//source/common/network:resolver_lib", + "//source/common/network:socket_option_factory_lib", + "//source/common/network:utility_lib", + "//source/common/protobuf", + "//source/common/protobuf:utility_lib", + "//source/extensions/clusters:well_known_names", + "//source/extensions/transport_sockets:well_known_names", + "//source/server:transport_socket_config_lib", + "@envoy_api//envoy/api/v2/core:base_cc", + ], +) + +envoy_cc_library( + name = "cluster_factory_includes", + hdrs = ["cluster_factory_impl.h"], + deps = [ + ":load_balancer_lib", + ":outlier_detection_lib", + ":resource_manager_lib", + ":upstream_includes", + "//include/envoy/event:timer_interface", + "//include/envoy/local_info:local_info_interface", + "//include/envoy/network:dns_interface", + "//include/envoy/runtime:runtime_interface", + "//include/envoy/server:transport_socket_config_interface", + "//include/envoy/ssl:context_manager_interface", + "//include/envoy/thread_local:thread_local_interface", + "//include/envoy/upstream:cluster_factory_interface", + "//include/envoy/upstream:cluster_manager_interface", + "//include/envoy/upstream:health_checker_interface", + "//include/envoy/upstream:load_balancer_interface", + "//include/envoy/upstream:locality_lib", + "//include/envoy/upstream:upstream_interface", + "//source/common/common:callback_impl_lib", + "//source/common/common:enum_to_int", + "//source/common/common:minimal_logger_lib", + "//source/common/config:metadata_lib", + "//source/common/config:utility_lib", + "//source/common/config:well_known_names", "//source/common/stats:isolated_store_lib", "//source/common/stats:stats_lib", - "//source/server:init_manager_lib", + "//source/extensions/clusters:well_known_names", "//source/server:transport_socket_config_lib", "@envoy_api//envoy/api/v2/core:base_cc", "@envoy_api//envoy/api/v2/endpoint:endpoint_cc", diff --git a/source/common/upstream/cds_api_impl.cc b/source/common/upstream/cds_api_impl.cc index 8e3d1f2bacffd..79a888a26dc6a 100644 --- a/source/common/upstream/cds_api_impl.cc +++ b/source/common/upstream/cds_api_impl.cc @@ -7,6 +7,7 @@ #include "envoy/stats/scope.h" #include "common/common/cleanup.h" +#include "common/common/utility.h" #include "common/config/resources.h" #include "common/config/subscription_factory.h" #include "common/config/utility.h" @@ -29,45 +30,101 @@ CdsApiImpl::CdsApiImpl(const envoy::api::v2::core::ConfigSource& cds_config, Clu : cm_(cm), scope_(scope.createScope("cluster_manager.cds.")) { Config::Utility::checkLocalInfo("cds", local_info); - subscription_ = - Config::SubscriptionFactory::subscriptionFromConfigSource( - cds_config, local_info, dispatcher, cm, random, *scope_, - "envoy.api.v2.ClusterDiscoveryService.FetchClusters", - "envoy.api.v2.ClusterDiscoveryService.StreamClusters", api); + const bool is_delta = (cds_config.api_config_source().api_type() == + envoy::api::v2::core::ApiConfigSource::DELTA_GRPC); + const std::string grpc_method = is_delta ? "envoy.api.v2.ClusterDiscoveryService.DeltaClusters" + : "envoy.api.v2.ClusterDiscoveryService.StreamClusters"; + subscription_ = Config::SubscriptionFactory::subscriptionFromConfigSource( + cds_config, local_info, dispatcher, cm, random, *scope_, + "envoy.api.v2.ClusterDiscoveryService.FetchClusters", grpc_method, + Grpc::Common::typeUrl(envoy::api::v2::Cluster().GetDescriptor()->full_name()), api); } -void CdsApiImpl::onConfigUpdate(const ResourceVector& resources, const std::string& version_info) { +void CdsApiImpl::onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + const std::string& version_info) { + ClusterManager::ClusterInfoMap clusters_to_remove = cm_.clusters(); + std::vector clusters; + for (const auto& cluster_blob : resources) { + clusters.push_back(MessageUtil::anyConvert(cluster_blob)); + clusters_to_remove.erase(clusters.back().name()); + } + Protobuf::RepeatedPtrField to_remove_repeated; + for (const auto& cluster : clusters_to_remove) { + *to_remove_repeated.Add() = cluster.first; + } + Protobuf::RepeatedPtrField to_add_repeated; + for (const auto& cluster : clusters) { + envoy::api::v2::Resource* to_add = to_add_repeated.Add(); + to_add->set_name(cluster.name()); + to_add->set_version(version_info); + to_add->mutable_resource()->PackFrom(cluster); + } + onConfigUpdate(to_add_repeated, to_remove_repeated, version_info); +} + +void CdsApiImpl::onConfigUpdate( + const Protobuf::RepeatedPtrField& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& system_version_info) { cm_.adsMux().pause(Config::TypeUrl::get().ClusterLoadAssignment); Cleanup eds_resume([this] { cm_.adsMux().resume(Config::TypeUrl::get().ClusterLoadAssignment); }); + std::vector exception_msgs; std::unordered_set cluster_names; - for (const auto& cluster : resources) { - if (!cluster_names.insert(cluster.name()).second) { - throw EnvoyException(fmt::format("duplicate cluster {} found", cluster.name())); + for (const auto& resource : added_resources) { + envoy::api::v2::Cluster cluster; + try { + cluster = MessageUtil::anyConvert(resource.resource()); + MessageUtil::validate(cluster); + if (!cluster_names.insert(cluster.name()).second) { + throw EnvoyException(fmt::format("duplicate cluster {} found", cluster.name())); + } + if (cm_.addOrUpdateCluster( + cluster, resource.version(), + [this](const std::string&, ClusterManager::ClusterWarmingState state) { + // Following if/else block implements a control flow mechanism that can be used + // by an ADS implementation to properly sequence CDS and RDS update. It is not + // enforcing on ADS. ADS can use it to detect when a previously sent cluster becomes + // warm before sending routes that depend on it. This can improve incidence of HTTP + // 503 responses from Envoy when a route is used before it's supporting cluster is + // ready. + // + // We achieve that by leaving CDS in the paused state as long as there is at least + // one cluster in the warming state. This prevents CDS ACK from being sent to ADS. + // Once cluster is warmed up, CDS is resumed, and ACK is sent to ADS, providing a + // signal to ADS to proceed with RDS updates. + // + // Major concern with this approach is CDS being left in the paused state forever. + // As long as ClusterManager::removeCluster() is not called on a warming cluster + // this is not an issue. CdsApiImpl takes care of doing this properly, and there + // is no other component removing clusters from the ClusterManagerImpl. If this + // ever changes, we would need to correct the following logic. + if (state == ClusterManager::ClusterWarmingState::Starting && + cm_.warmingClusterCount() == 1) { + cm_.adsMux().pause(Config::TypeUrl::get().Cluster); + } else if (state == ClusterManager::ClusterWarmingState::Finished && + cm_.warmingClusterCount() == 0) { + cm_.adsMux().resume(Config::TypeUrl::get().Cluster); + } + })) { + ENVOY_LOG(debug, "cds: add/update cluster '{}'", cluster.name()); + } + } catch (const EnvoyException& e) { + exception_msgs.push_back(fmt::format("{}: {}", cluster.name(), e.what())); } } - for (const auto& cluster : resources) { - MessageUtil::validate(cluster); - } - // We need to keep track of which clusters we might need to remove. - ClusterManager::ClusterInfoMap clusters_to_remove = cm_.clusters(); - for (auto& cluster : resources) { - const std::string cluster_name = cluster.name(); - clusters_to_remove.erase(cluster_name); - if (cm_.addOrUpdateCluster(cluster, version_info)) { - ENVOY_LOG(debug, "cds: add/update cluster '{}'", cluster_name); - } - } - - for (auto cluster : clusters_to_remove) { - const std::string cluster_name = cluster.first; - if (cm_.removeCluster(cluster_name)) { - ENVOY_LOG(debug, "cds: remove cluster '{}'", cluster_name); + for (auto resource_name : removed_resources) { + if (cm_.removeCluster(resource_name)) { + ENVOY_LOG(debug, "cds: remove cluster '{}'", resource_name); } } - version_info_ = version_info; runInitializeCallbackIfAny(); + if (!exception_msgs.empty()) { + throw EnvoyException( + fmt::format("Error adding/updating cluster(s) {}", StringUtil::join(exception_msgs, ", "))); + } + system_version_info_ = system_version_info; } void CdsApiImpl::onConfigUpdateFailed(const EnvoyException*) { diff --git a/source/common/upstream/cds_api_impl.h b/source/common/upstream/cds_api_impl.h index 8fb1fb1cc3035..2550577880e57 100644 --- a/source/common/upstream/cds_api_impl.h +++ b/source/common/upstream/cds_api_impl.h @@ -19,7 +19,7 @@ namespace Upstream { * CDS API implementation that fetches via Subscription. */ class CdsApiImpl : public CdsApi, - Config::SubscriptionCallbacks, + Config::SubscriptionCallbacks, Logger::Loggable { public: static CdsApiPtr create(const envoy::api::v2::core::ConfigSource& cds_config, ClusterManager& cm, @@ -32,10 +32,14 @@ class CdsApiImpl : public CdsApi, void setInitializedCb(std::function callback) override { initialize_callback_ = callback; } - const std::string versionInfo() const override { return version_info_; } + const std::string versionInfo() const override { return system_version_info_; } // Config::SubscriptionCallbacks - void onConfigUpdate(const ResourceVector& resources, const std::string& version_info) override; + // TODO(fredlas) deduplicate + void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + const std::string& version_info) override; + void onConfigUpdate(const Protobuf::RepeatedPtrField&, + const Protobuf::RepeatedPtrField&, const std::string&) override; void onConfigUpdateFailed(const EnvoyException* e) override; std::string resourceName(const ProtobufWkt::Any& resource) override { return MessageUtil::anyConvert(resource).name(); @@ -48,8 +52,8 @@ class CdsApiImpl : public CdsApi, void runInitializeCallbackIfAny(); ClusterManager& cm_; - std::unique_ptr> subscription_; - std::string version_info_; + std::unique_ptr subscription_; + std::string system_version_info_; std::function initialize_callback_; Stats::ScopePtr scope_; }; diff --git a/source/common/upstream/cluster_factory_impl.cc b/source/common/upstream/cluster_factory_impl.cc new file mode 100644 index 0000000000000..ce4ad39dc1f9a --- /dev/null +++ b/source/common/upstream/cluster_factory_impl.cc @@ -0,0 +1,123 @@ +#include "common/upstream/cluster_factory_impl.h" + +#include "common/http/utility.h" +#include "common/network/address_impl.h" +#include "common/network/resolver_impl.h" +#include "common/network/socket_option_factory.h" +#include "common/upstream/health_checker_impl.h" + +#include "server/transport_socket_config_impl.h" + +namespace Envoy { +namespace Upstream { + +namespace { + +Stats::ScopePtr generateStatsScope(const envoy::api::v2::Cluster& config, Stats::Store& stats) { + return stats.createScope(fmt::format( + "cluster.{}.", config.alt_stat_name().empty() ? config.name() : config.alt_stat_name())); +} + +} // namespace + +ClusterSharedPtr ClusterFactoryImplBase::create( + const envoy::api::v2::Cluster& cluster, ClusterManager& cluster_manager, Stats::Store& stats, + ThreadLocal::Instance& tls, Network::DnsResolverSharedPtr dns_resolver, + Ssl::ContextManager& ssl_context_manager, Runtime::Loader& runtime, + Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, + AccessLog::AccessLogManager& log_manager, const LocalInfo::LocalInfo& local_info, + Server::Admin& admin, Singleton::Manager& singleton_manager, + Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api, Api::Api& api) { + + std::string cluster_type; + + if (!cluster.has_cluster_type()) { + switch (cluster.type()) { + case envoy::api::v2::Cluster::STATIC: + cluster_type = Extensions::Clusters::ClusterTypes::get().Static; + break; + case envoy::api::v2::Cluster::STRICT_DNS: + cluster_type = Extensions::Clusters::ClusterTypes::get().StrictDns; + break; + case envoy::api::v2::Cluster::LOGICAL_DNS: + cluster_type = Extensions::Clusters::ClusterTypes::get().LogicalDns; + break; + case envoy::api::v2::Cluster::ORIGINAL_DST: + cluster_type = Extensions::Clusters::ClusterTypes::get().OriginalDst; + break; + case envoy::api::v2::Cluster::EDS: + cluster_type = Extensions::Clusters::ClusterTypes::get().Eds; + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + } else { + cluster_type = cluster.cluster_type().name(); + } + ClusterFactory* factory = Registry::FactoryRegistry::getFactory(cluster_type); + + if (factory == nullptr) { + throw EnvoyException(fmt::format( + "Didn't find a registered cluster factory implementation for name: '{}'", cluster_type)); + } + + ClusterFactoryContextImpl context(cluster_manager, stats, tls, std::move(dns_resolver), + ssl_context_manager, runtime, random, dispatcher, log_manager, + local_info, admin, singleton_manager, + std::move(outlier_event_logger), added_via_api, api); + return factory->create(cluster, context); +} + +Network::DnsResolverSharedPtr +ClusterFactoryImplBase::selectDnsResolver(const envoy::api::v2::Cluster& cluster, + ClusterFactoryContext& context) { + // We make this a shared pointer to deal with the distinct ownership + // scenarios that can exist: in one case, we pass in the "default" + // DNS resolver that is owned by the Server::Instance. In the case + // where 'dns_resolvers' is specified, we have per-cluster DNS + // resolvers that are created here but ownership resides with + // StrictDnsClusterImpl/LogicalDnsCluster. + if (!cluster.dns_resolvers().empty()) { + const auto& resolver_addrs = cluster.dns_resolvers(); + std::vector resolvers; + resolvers.reserve(resolver_addrs.size()); + for (const auto& resolver_addr : resolver_addrs) { + resolvers.push_back(Network::Address::resolveProtoAddress(resolver_addr)); + } + return context.dispatcher().createDnsResolver(resolvers); + } + + return context.dnsResolver(); +} + +ClusterSharedPtr ClusterFactoryImplBase::create(const envoy::api::v2::Cluster& cluster, + ClusterFactoryContext& context) { + + auto stats_scope = generateStatsScope(cluster, context.stats()); + Server::Configuration::TransportSocketFactoryContextImpl factory_context( + context.admin(), context.sslContextManager(), *stats_scope, context.clusterManager(), + context.localInfo(), context.dispatcher(), context.random(), context.stats(), + context.singletonManager(), context.tls(), context.api()); + + ClusterImplBaseSharedPtr new_cluster = + createClusterImpl(cluster, context, factory_context, std::move(stats_scope)); + + if (!cluster.health_checks().empty()) { + // TODO(htuch): Need to support multiple health checks in v2. + if (cluster.health_checks().size() != 1) { + throw EnvoyException("Multiple health checks not supported"); + } else { + new_cluster->setHealthChecker(HealthCheckerFactory::create( + cluster.health_checks()[0], *new_cluster, context.runtime(), context.random(), + context.dispatcher(), context.logManager())); + } + } + + new_cluster->setOutlierDetector(Outlier::DetectorImplFactory::createForCluster( + *new_cluster, cluster, context.dispatcher(), context.runtime(), + context.outlierEventLogger())); + return new_cluster; +} + +} // namespace Upstream +} // namespace Envoy diff --git a/source/common/upstream/cluster_factory_impl.h b/source/common/upstream/cluster_factory_impl.h new file mode 100644 index 0000000000000..6777a2ba6b1ec --- /dev/null +++ b/source/common/upstream/cluster_factory_impl.h @@ -0,0 +1,184 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "envoy/api/v2/core/base.pb.h" +#include "envoy/api/v2/endpoint/endpoint.pb.h" +#include "envoy/config/typed_metadata.h" +#include "envoy/event/timer.h" +#include "envoy/local_info/local_info.h" +#include "envoy/network/dns.h" +#include "envoy/runtime/runtime.h" +#include "envoy/secret/secret_manager.h" +#include "envoy/server/transport_socket_config.h" +#include "envoy/ssl/context_manager.h" +#include "envoy/stats/scope.h" +#include "envoy/thread_local/thread_local.h" +#include "envoy/upstream/cluster_factory.h" +#include "envoy/upstream/cluster_manager.h" +#include "envoy/upstream/health_checker.h" +#include "envoy/upstream/load_balancer.h" +#include "envoy/upstream/locality.h" +#include "envoy/upstream/upstream.h" + +#include "common/common/callback_impl.h" +#include "common/common/enum_to_int.h" +#include "common/common/logger.h" +#include "common/config/metadata.h" +#include "common/config/utility.h" +#include "common/config/well_known_names.h" +#include "common/network/utility.h" +#include "common/protobuf/utility.h" +#include "common/stats/isolated_store_impl.h" +#include "common/upstream/load_balancer_impl.h" +#include "common/upstream/outlier_detection_impl.h" +#include "common/upstream/resource_manager_impl.h" +#include "common/upstream/upstream_impl.h" + +#include "extensions/clusters/well_known_names.h" + +namespace Envoy { +namespace Upstream { + +class ClusterFactoryContextImpl : public ClusterFactoryContext { + +public: + ClusterFactoryContextImpl( + ClusterManager& cluster_manager, Stats::Store& stats, ThreadLocal::SlotAllocator& tls, + Network::DnsResolverSharedPtr dns_resolver, Ssl::ContextManager& ssl_context_manager, + Runtime::Loader& runtime, Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, + AccessLog::AccessLogManager& log_manager, const LocalInfo::LocalInfo& local_info, + Server::Admin& admin, Singleton::Manager& singleton_manager, + Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api, Api::Api& api) + : cluster_manager_(cluster_manager), stats_(stats), tls_(tls), + dns_resolver_(std::move(dns_resolver)), ssl_context_manager_(ssl_context_manager), + runtime_(runtime), random_(random), dispatcher_(dispatcher), log_manager_(log_manager), + local_info_(local_info), admin_(admin), singleton_manager_(singleton_manager), + outlier_event_logger_(std::move(outlier_event_logger)), added_via_api_(added_via_api), + api_(api) {} + + ClusterManager& clusterManager() override { return cluster_manager_; } + Stats::Store& stats() override { return stats_; } + ThreadLocal::SlotAllocator& tls() override { return tls_; } + Network::DnsResolverSharedPtr dnsResolver() override { return dns_resolver_; } + Ssl::ContextManager& sslContextManager() override { return ssl_context_manager_; } + Runtime::Loader& runtime() override { return runtime_; } + Runtime::RandomGenerator& random() override { return random_; } + Event::Dispatcher& dispatcher() override { return dispatcher_; } + AccessLog::AccessLogManager& logManager() override { return log_manager_; } + const LocalInfo::LocalInfo& localInfo() override { return local_info_; } + Server::Admin& admin() override { return admin_; } + Singleton::Manager& singletonManager() override { return singleton_manager_; } + Outlier::EventLoggerSharedPtr outlierEventLogger() override { return outlier_event_logger_; } + bool addedViaApi() override { return added_via_api_; } + Api::Api& api() override { return api_; } + +private: + ClusterManager& cluster_manager_; + Stats::Store& stats_; + ThreadLocal::SlotAllocator& tls_; + Network::DnsResolverSharedPtr dns_resolver_; + Ssl::ContextManager& ssl_context_manager_; + Runtime::Loader& runtime_; + Runtime::RandomGenerator& random_; + Event::Dispatcher& dispatcher_; + AccessLog::AccessLogManager& log_manager_; + const LocalInfo::LocalInfo& local_info_; + Server::Admin& admin_; + Singleton::Manager& singleton_manager_; + Outlier::EventLoggerSharedPtr outlier_event_logger_; + const bool added_via_api_; + Api::Api& api_; +}; + +/** + * Base class for all cluster factory implementation. This class can be directly extended if the + * custom cluster does not have any custom configuration. For custom cluster with custom + * configuration, use ConfigurableClusterFactoryBase instead. + */ +class ClusterFactoryImplBase : public ClusterFactory { +public: + /** + * Static method to get the registered cluster factory and create an instance of cluster. + */ + static ClusterSharedPtr + create(const envoy::api::v2::Cluster& cluster, ClusterManager& cluster_manager, + Stats::Store& stats, ThreadLocal::Instance& tls, + Network::DnsResolverSharedPtr dns_resolver, Ssl::ContextManager& ssl_context_manager, + Runtime::Loader& runtime, Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, + AccessLog::AccessLogManager& log_manager, const LocalInfo::LocalInfo& local_info, + Server::Admin& admin, Singleton::Manager& singleton_manager, + Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api, Api::Api& api); + + /** + * Create a dns resolver to be used by the cluster. + */ + Network::DnsResolverSharedPtr selectDnsResolver(const envoy::api::v2::Cluster& cluster, + ClusterFactoryContext& context); + + // Upstream::ClusterFactory + ClusterSharedPtr create(const envoy::api::v2::Cluster& cluster, + ClusterFactoryContext& context) override; + std::string name() override { return name_; } + +protected: + ClusterFactoryImplBase(const std::string& name) : name_(name) {} + +private: + /** + * Create an instance of ClusterImplBase. + */ + virtual ClusterImplBaseSharedPtr + createClusterImpl(const envoy::api::v2::Cluster& cluster, ClusterFactoryContext& context, + Server::Configuration::TransportSocketFactoryContext& socket_factory_context, + Stats::ScopePtr&& stats_scope) PURE; + const std::string name_; +}; + +/** + * Common base class for custom cluster factory with custom configuration. + * @param ConfigProto is the configuration protobuf. + */ +template class ConfigurableClusterFactoryBase : public ClusterFactoryImplBase { +public: + /** + * @return ProtobufTypes::MessagePtr create empty config proto message. + */ + virtual ProtobufTypes::MessagePtr createEmptyConfigProto() { + return std::make_unique(); + } + +protected: + ConfigurableClusterFactoryBase(const std::string& name) : ClusterFactoryImplBase(name) {} + +private: + virtual ClusterImplBaseSharedPtr + createClusterImpl(const envoy::api::v2::Cluster& cluster, ClusterFactoryContext& context, + Server::Configuration::TransportSocketFactoryContext& socket_factory_context, + Stats::ScopePtr&& stats_scope) override { + ProtobufTypes::MessagePtr config = createEmptyConfigProto(); + Config::Utility::translateOpaqueConfig(cluster.cluster_type().typed_config(), + ProtobufWkt::Struct::default_instance(), *config); + return createClusterWithConfig(cluster, + MessageUtil::downcastAndValidate(*config), + context, socket_factory_context, std::move(stats_scope)); + } + + virtual ClusterImplBaseSharedPtr createClusterWithConfig( + const envoy::api::v2::Cluster& cluster, const ConfigProto& proto_config, + ClusterFactoryContext& context, + Server::Configuration::TransportSocketFactoryContext& socket_factory_context, + Stats::ScopePtr&& stats_scope) PURE; +}; + +} // namespace Upstream +} // namespace Envoy diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index 46d96b2320ac9..7252be1202a1d 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -14,6 +14,7 @@ #include "envoy/runtime/runtime.h" #include "envoy/stats/scope.h" +#include "common/common/assert.h" #include "common/common/enum_to_int.h" #include "common/common/fmt.h" #include "common/common/utility.h" @@ -30,15 +31,25 @@ #include "common/router/shadow_writer_impl.h" #include "common/tcp/conn_pool.h" #include "common/upstream/cds_api_impl.h" -#include "common/upstream/conn_pool_map_impl.h" #include "common/upstream/load_balancer_impl.h" #include "common/upstream/maglev_lb.h" #include "common/upstream/original_dst_cluster.h" +#include "common/upstream/priority_conn_pool_map_impl.h" #include "common/upstream/ring_hash_lb.h" #include "common/upstream/subset_lb.h" namespace Envoy { namespace Upstream { +namespace { + +void addOptionsIfNotNull(Network::Socket::OptionsSharedPtr& options, + const Network::Socket::OptionsSharedPtr& to_add) { + if (to_add != nullptr) { + Network::Socket::appendOptions(options, to_add); + } +} + +} // namespace void ClusterManagerInitHelper::addCluster(Cluster& cluster) { // See comments in ClusterManagerImpl::addOrUpdateCluster() for why this is only called during @@ -179,7 +190,7 @@ ClusterManagerImpl::ClusterManagerImpl( init_helper_([this](Cluster& cluster) { onClusterInit(cluster); }), config_tracker_entry_( admin.getConfigTracker().add("clusters", [this] { return dumpClusterConfigs(); })), - time_source_(main_thread_dispatcher.timeSystem()), dispatcher_(main_thread_dispatcher), + time_source_(main_thread_dispatcher.timeSource()), dispatcher_(main_thread_dispatcher), http_context_(http_context) { async_client_manager_ = std::make_unique(*this, tls, time_source_, api); @@ -297,6 +308,17 @@ void ClusterManagerImpl::onClusterInit(Cluster& cluster) { } // Now setup for cross-thread updates. + cluster.prioritySet().addMemberUpdateCb( + [&cluster, this](const HostVector&, const HostVector& hosts_removed) -> void { + // TODO(snowp): Should this be subject to merge windows? + + // Whenever hosts are removed from the cluster, we make each TLS cluster drain it's + // connection pools for the removed hosts. + if (!hosts_removed.empty()) { + postThreadLocalHostRemoval(cluster, hosts_removed); + } + }); + cluster.prioritySet().addPriorityUpdateCb([&cluster, this](uint32_t priority, const HostVector& hosts_added, const HostVector& hosts_removed) { @@ -420,7 +442,8 @@ void ClusterManagerImpl::applyUpdates(const Cluster& cluster, uint32_t priority, } bool ClusterManagerImpl::addOrUpdateCluster(const envoy::api::v2::Cluster& cluster, - const std::string& version_info) { + const std::string& version_info, + ClusterWarmingCallback cluster_warming_cb) { // First we need to see if this new config is new or an update to an existing dynamic cluster. // We don't allow updates to statically configured clusters in the main configuration. We check // both the warming clusters and the active clusters to see if we need an update or the update @@ -468,7 +491,8 @@ bool ClusterManagerImpl::addOrUpdateCluster(const envoy::api::v2::Cluster& clust } else { auto& cluster_entry = warming_clusters_.at(cluster_name); ENVOY_LOG(info, "add/update cluster {} starting warming", cluster_name); - cluster_entry->cluster_->initialize([this, cluster_name] { + cluster_warming_cb(cluster_name, ClusterWarmingState::Starting); + cluster_entry->cluster_->initialize([this, cluster_name, cluster_warming_cb] { auto warming_it = warming_clusters_.find(cluster_name); auto& cluster_entry = *warming_it->second; @@ -482,6 +506,7 @@ bool ClusterManagerImpl::addOrUpdateCluster(const envoy::api::v2::Cluster& clust ENVOY_LOG(info, "warming cluster {} complete", cluster_name); createOrUpdateThreadLocalCluster(cluster_entry); onClusterInit(*cluster_entry.cluster_); + cluster_warming_cb(cluster_name, ClusterWarmingState::Finished); updateGauges(); }); } @@ -592,11 +617,13 @@ void ClusterManagerImpl::loadCluster(const envoy::api::v2::Cluster& cluster, // finishes. if (cluster_reference.info()->lbType() == LoadBalancerType::RingHash) { cluster_entry_it->second->thread_aware_lb_ = std::make_unique( - cluster_reference.prioritySet(), cluster_reference.info()->stats(), runtime_, random_, + cluster_reference.prioritySet(), cluster_reference.info()->stats(), + cluster_reference.info()->statsScope(), runtime_, random_, cluster_reference.info()->lbRingHashConfig(), cluster_reference.info()->lbConfig()); } else if (cluster_reference.info()->lbType() == LoadBalancerType::Maglev) { cluster_entry_it->second->thread_aware_lb_ = std::make_unique( - cluster_reference.prioritySet(), cluster_reference.info()->stats(), runtime_, random_, + cluster_reference.prioritySet(), cluster_reference.info()->stats(), + cluster_reference.info()->statsScope(), runtime_, random_, cluster_reference.info()->lbConfig()); } @@ -647,32 +674,25 @@ Tcp::ConnectionPool::Instance* ClusterManagerImpl::tcpConnPoolForCluster( return entry->second->tcpConnPool(priority, context, transport_socket_options); } +void ClusterManagerImpl::postThreadLocalHostRemoval(const Cluster& cluster, + const HostVector& hosts_removed) { + tls_->runOnAllThreads([this, name = cluster.info()->name(), hosts_removed]() { + ThreadLocalClusterManagerImpl::removeHosts(name, hosts_removed, *tls_); + }); +} + void ClusterManagerImpl::postThreadLocalClusterUpdate(const Cluster& cluster, uint32_t priority, const HostVector& hosts_added, const HostVector& hosts_removed) { const auto& host_set = cluster.prioritySet().hostSetsPerPriority()[priority]; - // TODO(htuch): Can we skip these copies by exporting out const shared_ptr from HostSet? - HostVectorConstSharedPtr hosts_copy(new HostVector(host_set->hosts())); - HostVectorConstSharedPtr healthy_hosts_copy(new HostVector(host_set->healthyHosts())); - HostVectorConstSharedPtr degraded_hosts_copy(new HostVector(host_set->degradedHosts())); - HostsPerLocalityConstSharedPtr hosts_per_locality_copy = host_set->hostsPerLocality().clone(); - HostsPerLocalityConstSharedPtr healthy_hosts_per_locality_copy = - host_set->healthyHostsPerLocality().clone(); - HostsPerLocalityConstSharedPtr degraded_hosts_per_locality_copy = - host_set->degradedHostsPerLocality().clone(); - - tls_->runOnAllThreads([this, name = cluster.info()->name(), priority, hosts_copy, - healthy_hosts_copy, degraded_hosts_copy, hosts_per_locality_copy, - healthy_hosts_per_locality_copy, degraded_hosts_per_locality_copy, - locality_weights = host_set->localityWeights(), hosts_added, - hosts_removed]() { + tls_->runOnAllThreads([this, name = cluster.info()->name(), priority, + update_params = HostSetImpl::updateHostsParams(*host_set), + locality_weights = host_set->localityWeights(), hosts_added, hosts_removed, + overprovisioning_factor = host_set->overprovisioningFactor()]() { ThreadLocalClusterManagerImpl::updateClusterMembership( - name, priority, - HostSetImpl::updateHostsParams(hosts_copy, hosts_per_locality_copy, healthy_hosts_copy, - healthy_hosts_per_locality_copy, degraded_hosts_copy, - degraded_hosts_per_locality_copy), - locality_weights, hosts_added, hosts_removed, *tls_); + name, priority, update_params, locality_weights, hosts_added, hosts_removed, *tls_, + overprovisioning_factor); }); } @@ -925,10 +945,25 @@ void ClusterManagerImpl::ThreadLocalClusterManagerImpl::removeTcpConn( } } +void ClusterManagerImpl::ThreadLocalClusterManagerImpl::removeHosts(const std::string& name, + const HostVector& hosts_removed, + ThreadLocal::Slot& tls) { + ThreadLocalClusterManagerImpl& config = tls.getTyped(); + + ASSERT(config.thread_local_clusters_.find(name) != config.thread_local_clusters_.end()); + const auto& cluster_entry = config.thread_local_clusters_[name]; + ENVOY_LOG(debug, "removing hosts for TLS cluster {} removed {}", name, hosts_removed.size()); + + // We need to go through and purge any connection pools for hosts that got deleted. + // Even if two hosts actually point to the same address this will be safe, since if a + // host is readded it will be a different physical HostSharedPtr. + cluster_entry->parent_.drainConnPools(hosts_removed); +} + void ClusterManagerImpl::ThreadLocalClusterManagerImpl::updateClusterMembership( - const std::string& name, uint32_t priority, HostSet::UpdateHostsParams&& update_hosts_params, + const std::string& name, uint32_t priority, PrioritySet::UpdateHostsParams update_hosts_params, LocalityWeightsConstSharedPtr locality_weights, const HostVector& hosts_added, - const HostVector& hosts_removed, ThreadLocal::Slot& tls) { + const HostVector& hosts_removed, ThreadLocal::Slot& tls, uint64_t overprovisioning_factor) { ThreadLocalClusterManagerImpl& config = tls.getTyped(); @@ -936,9 +971,9 @@ void ClusterManagerImpl::ThreadLocalClusterManagerImpl::updateClusterMembership( const auto& cluster_entry = config.thread_local_clusters_[name]; ENVOY_LOG(debug, "membership update for TLS cluster {} added {} removed {}", name, hosts_added.size(), hosts_removed.size()); - cluster_entry->priority_set_.getOrCreateHostSet(priority).updateHosts( - std::move(update_hosts_params), std::move(locality_weights), hosts_added, hosts_removed, - absl::nullopt); + cluster_entry->priority_set_.updateHosts(priority, std::move(update_hosts_params), + std::move(locality_weights), hosts_added, hosts_removed, + overprovisioning_factor); // If an LB is thread aware, create a new worker local LB on membership changes. if (cluster_entry->lb_factory_ != nullptr) { @@ -1003,7 +1038,7 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::getHttpConnPoolsContainer( if (!allocate) { return nullptr; } - ConnPoolsContainer container{thread_local_dispatcher_}; + ConnPoolsContainer container{thread_local_dispatcher_, host}; container_iter = host_http_conn_pool_map_.emplace(host, std::move(container)).first; } @@ -1026,8 +1061,9 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::ClusterEntry( if (cluster->lbSubsetInfo().isEnabled()) { lb_ = std::make_unique( cluster->lbType(), priority_set_, parent_.local_priority_set_, cluster->stats(), - parent.parent_.runtime_, parent.parent_.random_, cluster->lbSubsetInfo(), - cluster->lbRingHashConfig(), cluster->lbLeastRequestConfig(), cluster->lbConfig()); + cluster->statsScope(), parent.parent_.runtime_, parent.parent_.random_, + cluster->lbSubsetInfo(), cluster->lbRingHashConfig(), cluster->lbLeastRequestConfig(), + cluster->lbConfig()); } else { switch (cluster->lbType()) { case LoadBalancerType::LeastRequest: { @@ -1066,14 +1102,6 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::ClusterEntry( } } } - - priority_set_.addMemberUpdateCb( - [this](const HostVector&, const HostVector& hosts_removed) -> void { - // We need to go through and purge any connection pools for hosts that got deleted. - // Even if two hosts actually point to the same address this will be safe, since if a - // host is readded it will be a different physical HostSharedPtr. - parent_.drainConnPools(hosts_removed); - }); } ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::~ClusterEntry() { @@ -1098,35 +1126,40 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::connPool( return nullptr; } - // Inherit socket options from downstream connection, if set. - std::vector hash_key = {uint8_t(protocol), uint8_t(priority)}; + std::vector hash_key = {uint8_t(protocol)}; - // Use downstream connection socket options for computing connection pool hash key, if any. + Network::Socket::OptionsSharedPtr upstream_options(std::make_shared()); + if (context) { + // Inherit socket options from downstream connection, if set. + if (context->downstreamConnection()) { + addOptionsIfNotNull(upstream_options, context->downstreamConnection()->socketOptions()); + } + addOptionsIfNotNull(upstream_options, context->upstreamSocketOptions()); + } + + // Use the socket options for computing connection pool hash key, if any. // This allows socket options to control connection pooling so that connections with // different options are not pooled together. - bool have_options = false; - if (context && context->downstreamConnection()) { - const Network::ConnectionSocket::OptionsSharedPtr& options = - context->downstreamConnection()->socketOptions(); - if (options) { - for (const auto& option : *options) { - have_options = true; - option->hashKey(hash_key); - } - } + for (const auto& option : *upstream_options) { + option->hashKey(hash_key); } ConnPoolsContainer& container = *parent_.getHttpConnPoolsContainer(host, true); // Note: to simplify this, we assume that the factory is only called in the scope of this // function. Otherwise, we'd need to capture a few of these variables by value. - Http::ConnectionPool::Instance& pool = container.pools_->getPool(hash_key, [&]() { - return parent_.parent_.factory_.allocateConnPool( - parent_.thread_local_dispatcher_, host, priority, protocol, - have_options ? context->downstreamConnection()->socketOptions() : nullptr); - }); + ConnPoolsContainer::ConnPools::OptPoolRef pool = + container.pools_->getPool(priority, hash_key, [&]() { + return parent_.parent_.factory_.allocateConnPool( + parent_.thread_local_dispatcher_, host, priority, protocol, + !upstream_options->empty() ? upstream_options : nullptr); + }); - return &pool; + if (pool.has_value()) { + return &(pool.value().get()); + } else { + return nullptr; + } } Tcp::ConnectionPool::Instance* @@ -1189,7 +1222,7 @@ Http::ConnectionPool::InstancePtr ProdClusterManagerFactory::allocateConnPool( new Http::Http2::ProdConnPoolImpl(dispatcher, host, priority, options)}; } else { return Http::ConnectionPool::InstancePtr{ - new Http::Http1::ConnPoolImplProd(dispatcher, host, priority, options)}; + new Http::Http1::ProdConnPoolImpl(dispatcher, host, priority, options)}; } } @@ -1204,10 +1237,10 @@ Tcp::ConnectionPool::InstancePtr ProdClusterManagerFactory::allocateTcpConnPool( ClusterSharedPtr ProdClusterManagerFactory::clusterFromProto( const envoy::api::v2::Cluster& cluster, ClusterManager& cm, Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api) { - return ClusterImplBase::create(cluster, cm, stats_, tls_, dns_resolver_, ssl_context_manager_, - runtime_, random_, main_thread_dispatcher_, log_manager_, - local_info_, admin_, singleton_manager_, outlier_event_logger, - added_via_api, api_); + return ClusterFactoryImplBase::create( + cluster, cm, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, random_, + main_thread_dispatcher_, log_manager_, local_info_, admin_, singleton_manager_, + outlier_event_logger, added_via_api, api_); } CdsApiPtr ProdClusterManagerFactory::createCds(const envoy::api::v2::core::ConfigSource& cds_config, diff --git a/source/common/upstream/cluster_manager_impl.h b/source/common/upstream/cluster_manager_impl.h index 04431ca14742e..2195538576250 100644 --- a/source/common/upstream/cluster_manager_impl.h +++ b/source/common/upstream/cluster_manager_impl.h @@ -22,8 +22,8 @@ #include "common/config/grpc_mux_impl.h" #include "common/http/async_client_impl.h" -#include "common/upstream/conn_pool_map.h" #include "common/upstream/load_stats_reporter.h" +#include "common/upstream/priority_conn_pool_map.h" #include "common/upstream/upstream_impl.h" namespace Envoy { @@ -173,11 +173,12 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable callback) override { init_helper_.setInitializedCb(callback); } + ClusterInfoMap clusters() override { // TODO(mattklein123): Add ability to see warming clusters in admin output. ClusterInfoMap clusters_map; @@ -202,9 +203,12 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable(dispatcher)} {} + ConnPoolsContainer(Event::Dispatcher& dispatcher, const HostConstSharedPtr& host) + : pools_{std::make_shared(dispatcher, host)} {} - typedef ConnPoolMap, Http::ConnectionPool::Instance> ConnPools; + typedef PriorityConnPoolMap, Http::ConnectionPool::Instance> ConnPools; // This is a shared_ptr so we can keep it alive while cleaning up. std::shared_ptr pools_; @@ -316,11 +323,14 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable #include #include "envoy/event/dispatcher.h" +#include "envoy/upstream/resource_manager.h" +#include "envoy/upstream/upstream.h" #include "common/common/debug_recursion_checker.h" #include "absl/container/flat_hash_map.h" +#include "absl/types/optional.h" namespace Envoy { namespace Upstream { @@ -17,15 +21,17 @@ template class ConnPoolMap { public: using PoolFactory = std::function()>; using DrainedCb = std::function; + using OptPoolRef = absl::optional>; - ConnPoolMap(Event::Dispatcher& dispatcher); + ConnPoolMap(Event::Dispatcher& dispatcher, const HostConstSharedPtr& host, + ResourcePriority priority); ~ConnPoolMap(); /** * Returns an existing pool for `key`, or creates a new one using `factory`. Note that it is * possible for this to fail if a limit on the number of pools allowed is reached. * @return The pool corresponding to `key`, or `absl::nullopt`. */ - POOL_TYPE& getPool(KEY_TYPE key, const PoolFactory& factory); + OptPoolRef getPool(KEY_TYPE key, const PoolFactory& factory); /** * @return the number of pools. @@ -51,10 +57,23 @@ template class ConnPoolMap { void drainConnections(); private: + /** + * Frees the first idle pool in `active_pools_`. + * @return false if no pool was freed. + */ + bool freeOnePool(); + + /** + * Cleans up the active_pools_ map and updates resource tracking + **/ + void clearActivePools(); + absl::flat_hash_map> active_pools_; Event::Dispatcher& thread_local_dispatcher_; std::vector cached_callbacks_; Common::DebugRecursionChecker recursion_checker_; + const HostConstSharedPtr host_; + const ResourcePriority priority_; }; } // namespace Upstream diff --git a/source/common/upstream/conn_pool_map_impl.h b/source/common/upstream/conn_pool_map_impl.h index 518805106ca93..19fa80ce4baa0 100644 --- a/source/common/upstream/conn_pool_map_impl.h +++ b/source/common/upstream/conn_pool_map_impl.h @@ -6,30 +6,58 @@ namespace Envoy { namespace Upstream { template -ConnPoolMap::ConnPoolMap(Envoy::Event::Dispatcher& dispatcher) - : thread_local_dispatcher_(dispatcher) {} +ConnPoolMap::ConnPoolMap(Envoy::Event::Dispatcher& dispatcher, + const HostConstSharedPtr& host, + ResourcePriority priority) + : thread_local_dispatcher_(dispatcher), host_(host), priority_(priority) {} -template -ConnPoolMap::~ConnPoolMap() = default; +template ConnPoolMap::~ConnPoolMap() { + // Clean up the pools to ensure resource tracking is kept up to date. Note that we do not call + // `clear()` here to avoid doing a deferred delete. This triggers some unwanted race conditions + // on shutdown where deleted resources end up putting stuff on the deferred delete list after the + // worker threads have shut down. + clearActivePools(); +} template -POOL_TYPE& ConnPoolMap::getPool(KEY_TYPE key, const PoolFactory& factory) { +typename ConnPoolMap::OptPoolRef +ConnPoolMap::getPool(KEY_TYPE key, const PoolFactory& factory) { Common::AutoDebugRecursionChecker assert_not_in(recursion_checker_); // TODO(klarose): Consider how we will change the connection pool's configuration in the future. // The plan is to change the downstream socket options... We may want to take those as a parameter // here. Maybe we'll pass them to the factory function? - auto inserted = active_pools_.emplace(key, nullptr); - - // If we inserted a new element, create a pool and assign it to the iterator. Tell it about any - // cached callbacks. - if (inserted.second) { - inserted.first->second = factory(); - for (const auto& cb : cached_callbacks_) { - inserted.first->second->addDrainedCallback(cb); + auto pool_iter = active_pools_.find(key); + if (pool_iter != active_pools_.end()) { + return std::ref(*(pool_iter->second)); + } + Resource& connPoolResource = host_->cluster().resourceManager(priority_).connectionPools(); + // We need a new pool. Check if we have room. + if (!connPoolResource.canCreate()) { + // We're full. Try to free up a pool. If we can't, bail out. + if (!freeOnePool()) { + host_->cluster().stats().upstream_cx_pool_overflow_.inc(); + return absl::nullopt; } + + ASSERT(size() < connPoolResource.max(), + "Freeing a pool should reduce the size to below the max."); + + // TODO(klarose): Consider some simple hysteresis here. How can we prevent iterating over all + // pools when we're at the limit every time we want to allocate a new one, even if most of the + // pools are not busy, while balancing that with not unnecessarily freeing all pools? If we + // start freeing once we cross a threshold, then stop after we cross another, we could + // achieve that balance. } - return *inserted.first->second; + // We have room for a new pool. Allocate one and let it know about any cached callbacks. + auto new_pool = factory(); + connPoolResource.inc(); + for (const auto& cb : cached_callbacks_) { + new_pool->addDrainedCallback(cb); + } + + auto inserted = active_pools_.emplace(key, std::move(new_pool)); + return std::ref(*inserted.first->second); } template @@ -42,8 +70,7 @@ template void ConnPoolMap @@ -63,5 +90,32 @@ void ConnPoolMap::drainConnections() { pool_pair.second->drainConnections(); } } + +template +bool ConnPoolMap::freeOnePool() { + // Try to find a pool that isn't doing anything. + auto pool_iter = active_pools_.begin(); + while (pool_iter != active_pools_.end()) { + if (!pool_iter->second->hasActiveConnections()) { + break; + } + ++pool_iter; + } + + if (pool_iter != active_pools_.end()) { + // We found one. Free it up, and let the caller know. + active_pools_.erase(pool_iter); + host_->cluster().resourceManager(priority_).connectionPools().dec(); + return true; + } + + return false; +} + +template +void ConnPoolMap::clearActivePools() { + host_->cluster().resourceManager(priority_).connectionPools().decBy(active_pools_.size()); + active_pools_.clear(); +} } // namespace Upstream } // namespace Envoy diff --git a/source/common/upstream/eds.cc b/source/common/upstream/eds.cc index f1895f37a88ad..06ae14618fb93 100644 --- a/source/common/upstream/eds.cc +++ b/source/common/upstream/eds.cc @@ -1,19 +1,9 @@ #include "common/upstream/eds.h" #include "envoy/api/v2/eds.pb.validate.h" -#include "envoy/common/exception.h" -#include "envoy/stats/scope.h" -#include "common/common/fmt.h" -#include "common/config/metadata.h" +#include "common/common/utility.h" #include "common/config/subscription_factory.h" -#include "common/config/utility.h" -#include "common/config/well_known_names.h" -#include "common/network/address_impl.h" -#include "common/network/resolver_impl.h" -#include "common/network/utility.h" -#include "common/protobuf/utility.h" -#include "common/upstream/load_balancer_impl.h" namespace Envoy { namespace Upstream { @@ -29,52 +19,37 @@ EdsClusterImpl::EdsClusterImpl( ? cluster.name() : cluster.eds_cluster_config().service_name()) { Config::Utility::checkLocalInfo("eds", local_info_); - - const auto& eds_config = cluster.eds_cluster_config().eds_config(); Event::Dispatcher& dispatcher = factory_context.dispatcher(); Runtime::RandomGenerator& random = factory_context.random(); Upstream::ClusterManager& cm = factory_context.clusterManager(); - subscription_ = Config::SubscriptionFactory::subscriptionFromConfigSource< - envoy::api::v2::ClusterLoadAssignment>( + assignment_timeout_ = dispatcher.createTimer([this]() -> void { onAssignmentTimeout(); }); + const auto& eds_config = cluster.eds_cluster_config().eds_config(); + subscription_ = Config::SubscriptionFactory::subscriptionFromConfigSource( eds_config, local_info_, dispatcher, cm, random, info_->statsScope(), "envoy.api.v2.EndpointDiscoveryService.FetchEndpoints", - "envoy.api.v2.EndpointDiscoveryService.StreamEndpoints", factory_context.api()); + "envoy.api.v2.EndpointDiscoveryService.StreamEndpoints", + Grpc::Common::typeUrl(envoy::api::v2::ClusterLoadAssignment().GetDescriptor()->full_name()), + factory_context.api()); } void EdsClusterImpl::startPreInit() { subscription_->start({cluster_name_}, *this); } -void EdsClusterImpl::onConfigUpdate(const ResourceVector& resources, const std::string&) { - if (resources.empty()) { - ENVOY_LOG(debug, "Missing ClusterLoadAssignment for {} in onConfigUpdate()", cluster_name_); - info_->stats().update_empty_.inc(); - onPreInitComplete(); - return; - } - if (resources.size() != 1) { - throw EnvoyException(fmt::format("Unexpected EDS resource length: {}", resources.size())); - } - const auto& cluster_load_assignment = resources[0]; - MessageUtil::validate(cluster_load_assignment); - // TODO(PiotrSikora): Remove this hack once fixed internally. - if (!(cluster_load_assignment.cluster_name() == cluster_name_)) { - throw EnvoyException(fmt::format("Unexpected EDS cluster (expecting {}): {}", cluster_name_, - cluster_load_assignment.cluster_name())); - } - +void EdsClusterImpl::BatchUpdateHelper::batchUpdate(PrioritySet::HostUpdateCb& host_update_cb) { std::unordered_map updated_hosts; - PriorityStateManager priority_state_manager(*this, local_info_); - for (const auto& locality_lb_endpoint : cluster_load_assignment.endpoints()) { + PriorityStateManager priority_state_manager(parent_, parent_.local_info_, &host_update_cb); + for (const auto& locality_lb_endpoint : cluster_load_assignment_.endpoints()) { const uint32_t priority = locality_lb_endpoint.priority(); - if (priority > 0 && !cluster_name_.empty() && cluster_name_ == cm_.localClusterName()) { - throw EnvoyException( - fmt::format("Unexpected non-zero priority for local cluster '{}'.", cluster_name_)); + if (priority > 0 && !parent_.cluster_name_.empty() && + parent_.cluster_name_ == parent_.cm_.localClusterName()) { + throw EnvoyException(fmt::format("Unexpected non-zero priority for local cluster '{}'.", + parent_.cluster_name_)); } priority_state_manager.initializePriorityFor(locality_lb_endpoint); for (const auto& lb_endpoint : locality_lb_endpoint.lb_endpoints()) { priority_state_manager.registerHostForPriority( - "", resolveProtoAddress(lb_endpoint.endpoint().address()), locality_lb_endpoint, + "", parent_.resolveProtoAddress(lb_endpoint.endpoint().address()), locality_lb_endpoint, lb_endpoint); } } @@ -83,44 +58,142 @@ void EdsClusterImpl::onConfigUpdate(const ResourceVector& resources, const std:: bool cluster_rebuilt = false; const uint32_t overprovisioning_factor = PROTOBUF_GET_WRAPPED_OR_DEFAULT( - cluster_load_assignment.policy(), overprovisioning_factor, kDefaultOverProvisioningFactor); + cluster_load_assignment_.policy(), overprovisioning_factor, kDefaultOverProvisioningFactor); // Loop over all priorities that exist in the new configuration. auto& priority_state = priority_state_manager.priorityState(); for (size_t i = 0; i < priority_state.size(); ++i) { if (priority_state[i].first != nullptr) { - if (locality_weights_map_.size() <= i) { - locality_weights_map_.resize(i + 1); + if (parent_.locality_weights_map_.size() <= i) { + parent_.locality_weights_map_.resize(i + 1); } - cluster_rebuilt |= updateHostsPerLocality( - i, overprovisioning_factor, *priority_state[i].first, locality_weights_map_[i], + cluster_rebuilt |= parent_.updateHostsPerLocality( + i, overprovisioning_factor, *priority_state[i].first, parent_.locality_weights_map_[i], priority_state[i].second, priority_state_manager, updated_hosts); } } // Loop over all priorities not present in the config that already exists. This will // empty out any remaining priority that the config update did not refer to. - for (size_t i = priority_state.size(); i < priority_set_.hostSetsPerPriority().size(); ++i) { + for (size_t i = priority_state.size(); i < parent_.priority_set_.hostSetsPerPriority().size(); + ++i) { const HostVector empty_hosts; LocalityWeightsMap empty_locality_map; - if (locality_weights_map_.size() <= i) { - locality_weights_map_.resize(i + 1); + if (parent_.locality_weights_map_.size() <= i) { + parent_.locality_weights_map_.resize(i + 1); } - cluster_rebuilt |= - updateHostsPerLocality(i, overprovisioning_factor, empty_hosts, locality_weights_map_[i], - empty_locality_map, priority_state_manager, updated_hosts); + cluster_rebuilt |= parent_.updateHostsPerLocality( + i, overprovisioning_factor, empty_hosts, parent_.locality_weights_map_[i], + empty_locality_map, priority_state_manager, updated_hosts); } - all_hosts_ = std::move(updated_hosts); + parent_.all_hosts_ = std::move(updated_hosts); if (!cluster_rebuilt) { - info_->stats().update_no_rebuild_.inc(); + parent_.info_->stats().update_no_rebuild_.inc(); } // If we didn't setup to initialize when our first round of health checking is complete, just // do it now. - onPreInitComplete(); + parent_.onPreInitComplete(); +} + +void EdsClusterImpl::onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + const std::string&) { + if (resources.empty()) { + ENVOY_LOG(debug, "Missing ClusterLoadAssignment for {} in onConfigUpdate()", cluster_name_); + info_->stats().update_empty_.inc(); + onPreInitComplete(); + return; + } + if (resources.size() != 1) { + throw EnvoyException(fmt::format("Unexpected EDS resource length: {}", resources.size())); + } + auto cluster_load_assignment = + MessageUtil::anyConvert(resources[0]); + MessageUtil::validate(cluster_load_assignment); + // TODO(PiotrSikora): Remove this hack once fixed internally. + if (!(cluster_load_assignment.cluster_name() == cluster_name_)) { + throw EnvoyException(fmt::format("Unexpected EDS cluster (expecting {}): {}", cluster_name_, + cluster_load_assignment.cluster_name())); + } + + // Disable timer (if enabled) as we have received new assignment. + if (assignment_timeout_->enabled()) { + assignment_timeout_->disableTimer(); + } + // Check if endpoint_stale_after is set. + const uint64_t stale_after_ms = + PROTOBUF_GET_MS_OR_DEFAULT(cluster_load_assignment.policy(), endpoint_stale_after, 0); + if (stale_after_ms > 0) { + // Stat to track how often we receive valid assignment_timeout in response. + info_->stats().assignment_timeout_received_.inc(); + assignment_timeout_->enableTimer(std::chrono::milliseconds(stale_after_ms)); + } + + BatchUpdateHelper helper(*this, cluster_load_assignment); + priority_set_.batchHostUpdate(helper); +} + +void EdsClusterImpl::onAssignmentTimeout() { + // We can no longer use the assignments, remove them. + // TODO(vishalpowar) This is not going to work for incremental updates, and we + // need to instead change the health status to indicate the assignments are + // stale. + Protobuf::RepeatedPtrField resources; + envoy::api::v2::ClusterLoadAssignment resource; + resource.set_cluster_name(cluster_name_); + resources.Add()->PackFrom(resource); + onConfigUpdate(resources, ""); + // Stat to track how often we end up with stale assignments. + info_->stats().assignment_stale_.inc(); +} + +void EdsClusterImpl::reloadHealthyHostsHelper(const HostSharedPtr& host) { + // Here we will see if we have a host that has been marked for deletion by service discovery + // but has been stabilized due to passing active health checking. If such a host is now + // failing active health checking we can remove it during this health check update. + HostSharedPtr host_to_exclude = host; + if (host_to_exclude != nullptr && + host_to_exclude->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC) && + host_to_exclude->healthFlagGet(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL)) { + // Empty for clarity. + } else { + // Do not exclude and remove the host during the update. + host_to_exclude = nullptr; + } + + const auto& host_sets = prioritySet().hostSetsPerPriority(); + for (size_t priority = 0; priority < host_sets.size(); ++priority) { + const auto& host_set = host_sets[priority]; + + // Filter current hosts in case we need to exclude a host. + HostVectorSharedPtr hosts_copy(new HostVector()); + std::copy_if(host_set->hosts().begin(), host_set->hosts().end(), + std::back_inserter(*hosts_copy), + [&host_to_exclude](const HostSharedPtr& host) { return host_to_exclude != host; }); + + // Setup a hosts to remove vector in case we need to exclude a host. + HostVector hosts_to_remove; + if (hosts_copy->size() != host_set->hosts().size()) { + ASSERT(hosts_copy->size() == host_set->hosts().size() - 1); + hosts_to_remove.emplace_back(host_to_exclude); + } + + // Filter hosts per locality in case we need to exclude a host. + HostsPerLocalityConstSharedPtr hosts_per_locality_copy = host_set->hostsPerLocality().filter( + {[&host_to_exclude](const Host& host) { return &host != host_to_exclude.get(); }})[0]; + + prioritySet().updateHosts(priority, + HostSetImpl::partitionHosts(hosts_copy, hosts_per_locality_copy), + host_set->localityWeights(), {}, hosts_to_remove, absl::nullopt); + } + + if (host_to_exclude != nullptr) { + ASSERT(all_hosts_.find(host_to_exclude->address()->asString()) != all_hosts_.end()); + all_hosts_.erase(host_to_exclude->address()->asString()); + } } bool EdsClusterImpl::updateHostsPerLocality( @@ -134,16 +207,19 @@ bool EdsClusterImpl::updateHostsPerLocality( HostVector hosts_added; HostVector hosts_removed; // We need to trigger updateHosts with the new host vectors if they have changed. We also do this - // when the locality weight map changes. + // when the locality weight map or the overprovisioning factor. Note calling updateDynamicHostList + // is responsible for both determining whether there was a change and to perform the actual update + // to current_hosts_copy, so it must be called even if we know that we need to update (e.g. if the + // overprovisioning factor changes). // TODO(htuch): We eagerly update all the host sets here on weight changes, which isn't great, // since this has the knock on effect that we rebuild the load balancers and locality scheduler. // We could make this happen lazily, as we do for host-level weight updates, where as things age // out of the locality scheduler, we discover their new weights. We don't currently have a shared // object for locality weights that we can update here, we should add something like this to // improve performance and scalability of locality weight updates. - if (host_set.overprovisioningFactor() != overprovisioning_factor || - updateDynamicHostList(new_hosts, *current_hosts_copy, hosts_added, hosts_removed, - updated_hosts, all_hosts_) || + const bool hosts_updated = updateDynamicHostList(new_hosts, *current_hosts_copy, hosts_added, + hosts_removed, updated_hosts, all_hosts_); + if (hosts_updated || host_set.overprovisioningFactor() != overprovisioning_factor || locality_weights_map != new_locality_weights_map) { ASSERT(std::all_of(current_hosts_copy->begin(), current_hosts_copy->end(), [&](const auto& host) { return host->priority() == priority; })); @@ -166,5 +242,22 @@ void EdsClusterImpl::onConfigUpdateFailed(const EnvoyException* e) { onPreInitComplete(); } +ClusterImplBaseSharedPtr EdsClusterFactory::createClusterImpl( + const envoy::api::v2::Cluster& cluster, ClusterFactoryContext& context, + Server::Configuration::TransportSocketFactoryContext& socket_factory_context, + Stats::ScopePtr&& stats_scope) { + if (!cluster.has_eds_cluster_config()) { + throw EnvoyException("cannot create an EDS cluster without an EDS config"); + } + + return std::make_unique(cluster, context.runtime(), socket_factory_context, + std::move(stats_scope), context.addedViaApi()); +} + +/** + * Static registration for the strict dns cluster factory. @see RegisterFactory. + */ +REGISTER_FACTORY(EdsClusterFactory, ClusterFactory); + } // namespace Upstream } // namespace Envoy diff --git a/source/common/upstream/eds.h b/source/common/upstream/eds.h index 589fd3dd26992..ac2a95411b489 100644 --- a/source/common/upstream/eds.h +++ b/source/common/upstream/eds.h @@ -8,16 +8,19 @@ #include "envoy/stats/scope.h" #include "envoy/upstream/locality.h" +#include "common/upstream/cluster_factory_impl.h" #include "common/upstream/upstream_impl.h" +#include "extensions/clusters/well_known_names.h" + namespace Envoy { namespace Upstream { /** * Cluster implementation that reads host information from the Endpoint Discovery Service. */ -class EdsClusterImpl : public BaseDynamicClusterImpl, - Config::SubscriptionCallbacks { +class EdsClusterImpl : public BaseDynamicClusterImpl, Config::SubscriptionCallbacks { + public: EdsClusterImpl(const envoy::api::v2::Cluster& cluster, Runtime::Loader& runtime, Server::Configuration::TransportSocketFactoryContext& factory_context, @@ -27,7 +30,13 @@ class EdsClusterImpl : public BaseDynamicClusterImpl, InitializePhase initializePhase() const override { return InitializePhase::Secondary; } // Config::SubscriptionCallbacks - void onConfigUpdate(const ResourceVector& resources, const std::string& version_info) override; + // TODO(fredlas) deduplicate + void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + const std::string& version_info) override; + void onConfigUpdate(const Protobuf::RepeatedPtrField&, + const Protobuf::RepeatedPtrField&, const std::string&) override { + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } void onConfigUpdateFailed(const EnvoyException* e) override; std::string resourceName(const ProtobufWkt::Any& resource) override { return MessageUtil::anyConvert(resource).cluster_name(); @@ -43,14 +52,42 @@ class EdsClusterImpl : public BaseDynamicClusterImpl, std::unordered_map& updated_hosts); // ClusterImplBase + void reloadHealthyHostsHelper(const HostSharedPtr& host) override; void startPreInit() override; + void onAssignmentTimeout(); + + class BatchUpdateHelper : public PrioritySet::BatchUpdateCb { + public: + BatchUpdateHelper(EdsClusterImpl& parent, + const envoy::api::v2::ClusterLoadAssignment& cluster_load_assignment) + : parent_(parent), cluster_load_assignment_(cluster_load_assignment) {} + + // Upstream::PrioritySet::BatchUpdateCb + void batchUpdate(PrioritySet::HostUpdateCb& host_update_cb) override; + + private: + EdsClusterImpl& parent_; + const envoy::api::v2::ClusterLoadAssignment& cluster_load_assignment_; + }; const ClusterManager& cm_; - std::unique_ptr> subscription_; + std::unique_ptr subscription_; const LocalInfo::LocalInfo& local_info_; const std::string cluster_name_; std::vector locality_weights_map_; HostMap all_hosts_; + Event::TimerPtr assignment_timeout_; +}; + +class EdsClusterFactory : public ClusterFactoryImplBase { +public: + EdsClusterFactory() : ClusterFactoryImplBase(Extensions::Clusters::ClusterTypes::get().Eds) {} + +private: + ClusterImplBaseSharedPtr + createClusterImpl(const envoy::api::v2::Cluster& cluster, ClusterFactoryContext& context, + Server::Configuration::TransportSocketFactoryContext& socket_factory_context, + Stats::ScopePtr&& stats_scope) override; }; } // namespace Upstream diff --git a/source/common/upstream/health_checker_base_impl.cc b/source/common/upstream/health_checker_base_impl.cc index 24cfabac7b541..21e68d6b518b7 100644 --- a/source/common/upstream/health_checker_base_impl.cc +++ b/source/common/upstream/health_checker_base_impl.cc @@ -24,6 +24,7 @@ HealthCheckerImplBase::HealthCheckerImplBase(const Cluster& cluster, reuse_connection_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, reuse_connection, true)), event_logger_(std::move(event_logger)), interval_(PROTOBUF_GET_MS_REQUIRED(config, interval)), no_traffic_interval_(PROTOBUF_GET_MS_OR_DEFAULT(config, no_traffic_interval, 60000)), + initial_jitter_(PROTOBUF_GET_MS_OR_DEFAULT(config, initial_jitter, 0)), interval_jitter_(PROTOBUF_GET_MS_OR_DEFAULT(config, interval_jitter, 0)), interval_jitter_percent_(config.interval_jitter_percent()), unhealthy_interval_( @@ -38,6 +39,11 @@ HealthCheckerImplBase::HealthCheckerImplBase(const Cluster& cluster, }); } +HealthCheckerImplBase::~HealthCheckerImplBase() { + // Make sure that any sessions that were deferred deleted are cleared before we destruct. + dispatcher_.clearDeferredDeleteList(); +} + void HealthCheckerImplBase::decHealthy() { ASSERT(local_process_healthy_ > 0); local_process_healthy_--; @@ -99,14 +105,19 @@ std::chrono::milliseconds HealthCheckerImplBase::interval(HealthState state, } else { base_time_ms = no_traffic_interval_.count(); } + return intervalWithJitter(base_time_ms, interval_jitter_); +} +std::chrono::milliseconds +HealthCheckerImplBase::intervalWithJitter(uint64_t base_time_ms, + std::chrono::milliseconds interval_jitter) const { const uint64_t jitter_percent_mod = interval_jitter_percent_ * base_time_ms / 100; if (jitter_percent_mod > 0) { base_time_ms += random_.random() % jitter_percent_mod; } - if (interval_jitter_.count() > 0) { - base_time_ms += (random_.random() % interval_jitter_.count()); + if (interval_jitter.count() > 0) { + base_time_ms += (random_.random() % interval_jitter.count()); } const uint64_t min_interval = runtime_.snapshot().getInteger("health_check.min_interval", 0); @@ -135,6 +146,9 @@ void HealthCheckerImplBase::onClusterMemberUpdate(const HostVector& hosts_added, for (const HostSharedPtr& host : hosts_removed) { auto session_iter = active_sessions_.find(host); ASSERT(active_sessions_.end() != session_iter); + // This deletion can happen inline in response to a host failure, so we deferred delete. + session_iter->second->onDeferredDeleteBase(); + dispatcher_.deferredDelete(std::move(session_iter->second)); active_sessions_.erase(session_iter); } } @@ -220,6 +234,14 @@ HealthCheckerImplBase::ActiveHealthCheckSession::~ActiveHealthCheckSession() { } } +void HealthCheckerImplBase::ActiveHealthCheckSession::onDeferredDeleteBase() { + // The session is about to be deferred deleted. Make sure all timers are gone and any + // implementation specific state is destroyed. + interval_timer_.reset(); + timeout_timer_.reset(); + onDeferredDelete(); +} + void HealthCheckerImplBase::ActiveHealthCheckSession::handleSuccess(bool degraded) { // If we are healthy, reset the # of unhealthy to zero. num_unhealthy_ = 0; @@ -241,6 +263,8 @@ void HealthCheckerImplBase::ActiveHealthCheckSession::handleSuccess(bool degrade } } + changed_state = clearPendingFlag(changed_state); + if (degraded != host_->healthFlagGet(Host::HealthFlag::DEGRADED_ACTIVE_HC)) { if (degraded) { host_->healthFlagSet(Host::HealthFlag::DEGRADED_ACTIVE_HC); @@ -291,6 +315,8 @@ HealthTransition HealthCheckerImplBase::ActiveHealthCheckSession::setUnhealthy( } } + changed_state = clearPendingFlag(changed_state); + if ((first_check_ || parent_.always_log_health_check_failures_) && parent_.event_logger_) { parent_.event_logger_->logUnhealthy(parent_.healthCheckerType(), host_, type, first_check_); } @@ -310,8 +336,26 @@ HealthTransition HealthCheckerImplBase::ActiveHealthCheckSession::setUnhealthy( void HealthCheckerImplBase::ActiveHealthCheckSession::handleFailure( envoy::data::core::v2alpha::HealthCheckFailureType type) { HealthTransition changed_state = setUnhealthy(type); - timeout_timer_->disableTimer(); - interval_timer_->enableTimer(parent_.interval(HealthState::Unhealthy, changed_state)); + // It's possible that the previous call caused this session to be deferred deleted. + if (timeout_timer_ != nullptr) { + timeout_timer_->disableTimer(); + } + + if (interval_timer_ != nullptr) { + interval_timer_->enableTimer(parent_.interval(HealthState::Unhealthy, changed_state)); + } +} + +HealthTransition +HealthCheckerImplBase::ActiveHealthCheckSession::clearPendingFlag(HealthTransition changed_state) { + if (host_->healthFlagGet(Host::HealthFlag::PENDING_ACTIVE_HC)) { + host_->healthFlagClear(Host::HealthFlag::PENDING_ACTIVE_HC); + // Even though the health value of the host might have not changed, we set this to Changed to + // that the cluster can update its list of excluded hosts. + return HealthTransition::Changed; + } + + return changed_state; } void HealthCheckerImplBase::ActiveHealthCheckSession::onIntervalBase() { @@ -325,6 +369,15 @@ void HealthCheckerImplBase::ActiveHealthCheckSession::onTimeoutBase() { handleFailure(envoy::data::core::v2alpha::HealthCheckFailureType::NETWORK); } +void HealthCheckerImplBase::ActiveHealthCheckSession::onInitialInterval() { + if (parent_.initial_jitter_.count() == 0) { + onIntervalBase(); + } else { + interval_timer_->enableTimer( + std::chrono::milliseconds(parent_.intervalWithJitter(0, parent_.initial_jitter_))); + } +} + void HealthCheckEventLoggerImpl::logEjectUnhealthy( envoy::data::core::v2alpha::HealthCheckerType health_checker_type, const HostDescriptionConstSharedPtr& host, diff --git a/source/common/upstream/health_checker_base_impl.h b/source/common/upstream/health_checker_base_impl.h index 1485d1020d74a..3cf26f1100a2f 100644 --- a/source/common/upstream/health_checker_base_impl.h +++ b/source/common/upstream/health_checker_base_impl.h @@ -46,11 +46,12 @@ class HealthCheckerImplBase : public HealthChecker, void start() override; protected: - class ActiveHealthCheckSession { + class ActiveHealthCheckSession : public Event::DeferredDeletable { public: virtual ~ActiveHealthCheckSession(); HealthTransition setUnhealthy(envoy::data::core::v2alpha::HealthCheckFailureType type); - void start() { onIntervalBase(); } + void onDeferredDeleteBase(); + void start() { onInitialInterval(); } protected: ActiveHealthCheckSession(HealthCheckerImplBase& parent, HostSharedPtr host); @@ -62,10 +63,16 @@ class HealthCheckerImplBase : public HealthChecker, HostSharedPtr host_; private: + // Clears the pending flag if it is set. By clearing this flag we're marking the host as having + // been health checked. + // Returns the changed state to use following the flag update. + HealthTransition clearPendingFlag(HealthTransition changed_state); virtual void onInterval() PURE; void onIntervalBase(); virtual void onTimeout() PURE; void onTimeoutBase(); + virtual void onDeferredDelete() PURE; + void onInitialInterval(); HealthCheckerImplBase& parent_; Event::TimerPtr interval_timer_; @@ -80,6 +87,7 @@ class HealthCheckerImplBase : public HealthChecker, HealthCheckerImplBase(const Cluster& cluster, const envoy::api::v2::core::HealthCheck& config, Event::Dispatcher& dispatcher, Runtime::Loader& runtime, Runtime::RandomGenerator& random, HealthCheckEventLoggerPtr&& event_logger); + ~HealthCheckerImplBase(); virtual ActiveHealthCheckSessionPtr makeSession(HostSharedPtr host) PURE; virtual envoy::data::core::v2alpha::HealthCheckerType healthCheckerType() const PURE; @@ -116,6 +124,8 @@ class HealthCheckerImplBase : public HealthChecker, void incHealthy(); void incDegraded(); std::chrono::milliseconds interval(HealthState state, HealthTransition changed_state) const; + std::chrono::milliseconds intervalWithJitter(uint64_t base_time_ms, + std::chrono::milliseconds interval_jitter) const; void onClusterMemberUpdate(const HostVector& hosts_added, const HostVector& hosts_removed); void refreshHealthyStat(); void runCallbacks(HostSharedPtr host, HealthTransition changed_state); @@ -126,6 +136,7 @@ class HealthCheckerImplBase : public HealthChecker, std::list callbacks_; const std::chrono::milliseconds interval_; const std::chrono::milliseconds no_traffic_interval_; + const std::chrono::milliseconds initial_jitter_; const std::chrono::milliseconds interval_jitter_; const uint32_t interval_jitter_percent_; const std::chrono::milliseconds unhealthy_interval_; @@ -162,7 +173,7 @@ class HealthCheckEventLoggerImpl : public HealthCheckEventLogger { const HostDescription& host, std::function callback) const; TimeSource& time_source_; - Filesystem::FileSharedPtr file_; + AccessLog::AccessLogFileSharedPtr file_; }; } // namespace Upstream diff --git a/source/common/upstream/health_checker_impl.cc b/source/common/upstream/health_checker_impl.cc index 2e76168e6a374..9efe775a754af 100644 --- a/source/common/upstream/health_checker_impl.cc +++ b/source/common/upstream/health_checker_impl.cc @@ -52,7 +52,7 @@ HealthCheckerFactory::create(const envoy::api::v2::core::HealthCheck& health_che HealthCheckEventLoggerPtr event_logger; if (!health_check_config.event_log_path().empty()) { event_logger = std::make_unique( - log_manager, dispatcher.timeSystem(), health_check_config.event_log_path()); + log_manager, dispatcher.timeSource(), health_check_config.event_log_path()); } switch (health_check_config.health_checker_case()) { case envoy::api::v2::core::HealthCheck::HealthCheckerCase::kHttpHealthCheck: @@ -94,12 +94,55 @@ HttpHealthCheckerImpl::HttpHealthCheckerImpl(const Cluster& cluster, request_headers_parser_( Router::HeaderParser::configure(config.http_health_check().request_headers_to_add(), config.http_health_check().request_headers_to_remove())), + http_status_checker_(config.http_health_check().expected_statuses(), + static_cast(Http::Code::OK)), codec_client_type_(codecClientType(config.http_health_check().use_http2())) { if (!config.http_health_check().service_name().empty()) { service_name_ = config.http_health_check().service_name(); } } +HttpHealthCheckerImpl::HttpStatusChecker::HttpStatusChecker( + const Protobuf::RepeatedPtrField& expected_statuses, + uint64_t default_expected_status) { + for (const auto& status_range : expected_statuses) { + const auto start = status_range.start(); + const auto end = status_range.end(); + + if (start >= end) { + throw EnvoyException(fmt::format( + "Invalid http status range: expecting start < end, but found start={} and end={}", start, + end)); + } + + if (start < 100) { + throw EnvoyException(fmt::format( + "Invalid http status range: expecting start >= 100, but found start={}", start)); + } + + if (end > 600) { + throw EnvoyException( + fmt::format("Invalid http status range: expecting end <= 600, but found end={}", end)); + } + + ranges_.emplace_back(std::make_pair(static_cast(start), static_cast(end))); + } + + if (ranges_.empty()) { + ranges_.emplace_back(std::make_pair(default_expected_status, default_expected_status + 1)); + } +} + +bool HttpHealthCheckerImpl::HttpStatusChecker::inRange(uint64_t http_status) const { + for (const auto& range : ranges_) { + if (http_status >= range.first && http_status < range.second) { + return true; + } + } + + return false; +} + HttpHealthCheckerImpl::HttpActiveHealthCheckSession::HttpActiveHealthCheckSession( HttpHealthCheckerImpl& parent, const HostSharedPtr& host) : ActiveHealthCheckSession(parent, host), parent_(parent), @@ -111,6 +154,11 @@ HttpHealthCheckerImpl::HttpActiveHealthCheckSession::HttpActiveHealthCheckSessio local_address_(std::make_shared("127.0.0.1")) {} HttpHealthCheckerImpl::HttpActiveHealthCheckSession::~HttpActiveHealthCheckSession() { + onDeferredDelete(); + ASSERT(client_ == nullptr); +} + +void HttpHealthCheckerImpl::HttpActiveHealthCheckSession::onDeferredDelete() { if (client_) { // If there is an active request it will get reset, so make sure we ignore the reset. expect_reset_ = true; @@ -156,7 +204,7 @@ void HttpHealthCheckerImpl::HttpActiveHealthCheckSession::onInterval() { {Http::Headers::get().Path, parent_.path_}, {Http::Headers::get().UserAgent, Http::Headers::get().UserAgentValues.EnvoyHealthChecker}}; Router::FilterUtility::setUpstreamScheme(request_headers, *parent_.cluster_.info()); - StreamInfo::StreamInfoImpl stream_info(protocol_, parent_.dispatcher_.timeSystem()); + StreamInfo::StreamInfoImpl stream_info(protocol_, parent_.dispatcher_.timeSource()); stream_info.setDownstreamLocalAddress(local_address_); stream_info.setDownstreamRemoteAddress(local_address_); stream_info.onUpstreamHostSelected(host_); @@ -165,7 +213,8 @@ void HttpHealthCheckerImpl::HttpActiveHealthCheckSession::onInterval() { request_encoder_ = nullptr; } -void HttpHealthCheckerImpl::HttpActiveHealthCheckSession::onResetStream(Http::StreamResetReason) { +void HttpHealthCheckerImpl::HttpActiveHealthCheckSession::onResetStream(Http::StreamResetReason, + absl::string_view) { if (expect_reset_) { return; } @@ -181,7 +230,7 @@ HttpHealthCheckerImpl::HttpActiveHealthCheckSession::healthCheckResult() { ENVOY_CONN_LOG(debug, "hc response={} health_flags={}", *client_, response_code, HostUtility::healthFlagsToString(*host_)); - if (response_code != enumToInt(Http::Code::OK)) { + if (!parent_.http_status_checker_.inRange(response_code)) { return HealthCheckResult::Failed; } @@ -192,7 +241,8 @@ HttpHealthCheckerImpl::HttpActiveHealthCheckSession::healthCheckResult() { parent_.stats_.verify_cluster_.inc(); std::string service_cluster_healthchecked = response_headers_->EnvoyUpstreamHealthCheckedCluster() - ? response_headers_->EnvoyUpstreamHealthCheckedCluster()->value().c_str() + ? std::string( + response_headers_->EnvoyUpstreamHealthCheckedCluster()->value().getStringView()) : EMPTY_STRING; if (service_cluster_healthchecked.find(parent_.service_name_.value()) == 0) { @@ -222,6 +272,9 @@ void HttpHealthCheckerImpl::HttpActiveHealthCheckSession::onResponseComplete() { if ((response_headers_->Connection() && absl::EqualsIgnoreCase(response_headers_->Connection()->value().getStringView(), Http::Headers::get().ConnectionValues.Close)) || + (response_headers_->ProxyConnection() && protocol_ != Http::Protocol::Http2 && + absl::EqualsIgnoreCase(response_headers_->ProxyConnection()->value().getStringView(), + Http::Headers::get().ConnectionValues.Close)) || !parent_.reuse_connection_) { client_->close(); } @@ -262,7 +315,7 @@ TcpHealthCheckMatcher::MatchSegments TcpHealthCheckMatcher::loadProtoBytes( for (const auto& entry : byte_array) { const auto decoded = Hex::decode(entry.text()); - if (decoded.size() == 0) { + if (decoded.empty()) { throw EnvoyException(fmt::format("invalid hex string '{}'", entry.text())); } result.push_back(decoded); @@ -301,7 +354,13 @@ TcpHealthCheckerImpl::TcpHealthCheckerImpl(const Cluster& cluster, receive_bytes_(TcpHealthCheckMatcher::loadProtoBytes(config.tcp_health_check().receive())) {} TcpHealthCheckerImpl::TcpActiveHealthCheckSession::~TcpActiveHealthCheckSession() { + onDeferredDelete(); + ASSERT(client_ == nullptr); +} + +void TcpHealthCheckerImpl::TcpActiveHealthCheckSession::onDeferredDelete() { if (client_) { + expect_close_ = true; client_->close(Network::ConnectionCloseType::NoFlush); } } @@ -313,6 +372,7 @@ void TcpHealthCheckerImpl::TcpActiveHealthCheckSession::onData(Buffer::Instance& data.drain(data.length()); handleSuccess(false); if (!parent_.reuse_connection_) { + expect_close_ = true; client_->close(Network::ConnectionCloseType::NoFlush); } } else { @@ -321,12 +381,11 @@ void TcpHealthCheckerImpl::TcpActiveHealthCheckSession::onData(Buffer::Instance& } void TcpHealthCheckerImpl::TcpActiveHealthCheckSession::onEvent(Network::ConnectionEvent event) { - if (event == Network::ConnectionEvent::RemoteClose) { - handleFailure(envoy::data::core::v2alpha::HealthCheckFailureType::NETWORK); - } - if (event == Network::ConnectionEvent::RemoteClose || event == Network::ConnectionEvent::LocalClose) { + if (!expect_close_) { + handleFailure(envoy::data::core::v2alpha::HealthCheckFailureType::NETWORK); + } parent_.dispatcher_.deferredDelete(std::move(client_)); } @@ -345,6 +404,7 @@ void TcpHealthCheckerImpl::TcpActiveHealthCheckSession::onEvent(Network::Connect // TODO(mattklein123): In the case that a user configured bytes to write, they will not be // be written, since we currently have no way to know if the bytes actually get written via // the connection interface. We might want to figure out how to handle this better later. + expect_close_ = true; client_->close(Network::ConnectionCloseType::NoFlush); handleSuccess(false); } @@ -358,6 +418,7 @@ void TcpHealthCheckerImpl::TcpActiveHealthCheckSession::onInterval() { client_->addConnectionCallbacks(*session_callbacks_); client_->addReadFilter(session_callbacks_); + expect_close_ = false; client_->connect(); client_->noDelay(true); } @@ -373,6 +434,7 @@ void TcpHealthCheckerImpl::TcpActiveHealthCheckSession::onInterval() { } void TcpHealthCheckerImpl::TcpActiveHealthCheckSession::onTimeout() { + expect_close_ = true; host_->setActiveHealthFailureType(Host::ActiveHealthFailureType::TIMEOUT); client_->close(Network::ConnectionCloseType::NoFlush); } @@ -400,6 +462,11 @@ GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::GrpcActiveHealthCheckSessio : ActiveHealthCheckSession(parent, host), parent_(parent) {} GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::~GrpcActiveHealthCheckSession() { + onDeferredDelete(); + ASSERT(client_ == nullptr); +} + +void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onDeferredDelete() { if (client_) { // If there is an active request it will get reset, so make sure we ignore the reset. expect_reset_ = true; @@ -523,10 +590,11 @@ void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onInterval() { request.set_service(parent_.service_name_.value()); } - request_encoder_->encodeData(*Grpc::Common::serializeBody(request), true); + request_encoder_->encodeData(*Grpc::Common::serializeToGrpcFrame(request), true); } -void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onResetStream(Http::StreamResetReason) { +void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onResetStream(Http::StreamResetReason, + absl::string_view) { const bool expected_reset = expect_reset_; resetState(); diff --git a/source/common/upstream/health_checker_impl.h b/source/common/upstream/health_checker_impl.h index 48690a178de86..f05f44ea84715 100644 --- a/source/common/upstream/health_checker_impl.h +++ b/source/common/upstream/health_checker_impl.h @@ -47,6 +47,20 @@ class HttpHealthCheckerImpl : public HealthCheckerImplBase { Event::Dispatcher& dispatcher, Runtime::Loader& runtime, Runtime::RandomGenerator& random, HealthCheckEventLoggerPtr&& event_logger); + /** + * Utility class checking if given http status matches configured expectations. + */ + class HttpStatusChecker { + public: + HttpStatusChecker(const Protobuf::RepeatedPtrField& expected_statuses, + uint64_t default_expected_status); + + bool inRange(uint64_t http_status) const; + + private: + std::vector> ranges_; + }; + private: struct HttpActiveHealthCheckSession : public ActiveHealthCheckSession, public Http::StreamDecoder, @@ -61,6 +75,7 @@ class HttpHealthCheckerImpl : public HealthCheckerImplBase { // ActiveHealthCheckSession void onInterval() override; void onTimeout() override; + void onDeferredDelete() final; // Http::StreamDecoder void decode100ContinueHeaders(Http::HeaderMapPtr&&) override {} @@ -74,7 +89,8 @@ class HttpHealthCheckerImpl : public HealthCheckerImplBase { void decodeMetadata(Http::MetadataMapPtr&&) override {} // Http::StreamCallbacks - void onResetStream(Http::StreamResetReason reason) override; + void onResetStream(Http::StreamResetReason reason, + absl::string_view transport_failure_reason) override; void onAboveWriteBufferHighWatermark() override {} void onBelowWriteBufferLowWatermark() override {} @@ -121,6 +137,7 @@ class HttpHealthCheckerImpl : public HealthCheckerImplBase { const std::string host_value_; absl::optional service_name_; Router::HeaderParserPtr request_headers_parser_; + const HttpStatusChecker http_status_checker_; protected: const Http::CodecClient::Type codec_client_type_; @@ -230,10 +247,14 @@ class TcpHealthCheckerImpl : public HealthCheckerImplBase { // ActiveHealthCheckSession void onInterval() override; void onTimeout() override; + void onDeferredDelete() final; TcpHealthCheckerImpl& parent_; Network::ClientConnectionPtr client_; std::shared_ptr session_callbacks_; + // If true, stream close was initiated by us, not e.g. remote close or TCP reset. + // In this case healthcheck status already reported, only state cleanup required. + bool expect_close_{}; }; typedef std::unique_ptr TcpActiveHealthCheckSessionPtr; @@ -276,6 +297,7 @@ class GrpcHealthCheckerImpl : public HealthCheckerImplBase { // ActiveHealthCheckSession void onInterval() override; void onTimeout() override; + void onDeferredDelete() final; // Http::StreamDecoder void decode100ContinueHeaders(Http::HeaderMapPtr&&) override {} @@ -285,7 +307,8 @@ class GrpcHealthCheckerImpl : public HealthCheckerImplBase { void decodeMetadata(Http::MetadataMapPtr&&) override {} // Http::StreamCallbacks - void onResetStream(Http::StreamResetReason reason) override; + void onResetStream(Http::StreamResetReason reason, + absl::string_view transport_failure_reason) override; void onAboveWriteBufferHighWatermark() override {} void onBelowWriteBufferLowWatermark() override {} diff --git a/source/common/upstream/health_discovery_service.cc b/source/common/upstream/health_discovery_service.cc index ae44def4a796f..d104759accdb8 100644 --- a/source/common/upstream/health_discovery_service.cc +++ b/source/common/upstream/health_discovery_service.cc @@ -166,12 +166,15 @@ void HdsDelegate::onReceiveMessage( hds_clusters_.clear(); // Set response - server_response_ms_ = PROTOBUF_GET_MS_REQUIRED(*message, interval); + auto server_response_ms = PROTOBUF_GET_MS_REQUIRED(*message, interval); // Process the HealthCheckSpecifier message processMessage(std::move(message)); - setHdsStreamResponseTimer(); + if (server_response_ms_ != server_response_ms) { + server_response_ms_ = server_response_ms; + setHdsStreamResponseTimer(); + } } void HdsDelegate::onReceiveTrailingMetadata(Http::HeaderMapPtr&& metadata) { @@ -182,6 +185,7 @@ void HdsDelegate::onRemoteClose(Grpc::Status::GrpcStatus status, const std::stri ENVOY_LOG(warn, "gRPC config stream closed: {}, {}", status, message); hds_stream_response_timer_->disableTimer(); stream_ = nullptr; + server_response_ms_ = 0; handleFailure(); } @@ -252,10 +256,8 @@ void HdsCluster::initialize(std::function callback) { host->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC); } - auto& first_host_set = priority_set_.getOrCreateHostSet(0); - - first_host_set.updateHosts( - HostSetImpl::partitionHosts(initial_hosts_, HostsPerLocalityImpl::empty()), {}, + priority_set_.updateHosts( + 0, HostSetImpl::partitionHosts(initial_hosts_, HostsPerLocalityImpl::empty()), {}, *initial_hosts_, {}, absl::nullopt); } diff --git a/source/common/upstream/load_balancer_impl.cc b/source/common/upstream/load_balancer_impl.cc index 9afaa483f4595..ea3dad58b5fb5 100644 --- a/source/common/upstream/load_balancer_impl.cc +++ b/source/common/upstream/load_balancer_impl.cc @@ -136,20 +136,21 @@ void LoadBalancerBase::recalculatePerPriorityState(uint32_t priority, // by the overprovisioning factor. HostSet& host_set = *priority_set.hostSetsPerPriority()[priority]; per_priority_health.get()[priority] = 0; - if (host_set.hosts().size() > 0) { + per_priority_degraded.get()[priority] = 0; + const auto host_count = host_set.hosts().size() - host_set.excludedHosts().size(); + + if (host_count > 0) { // Each priority level's health is ratio of healthy hosts to total number of hosts in a priority // multiplied by overprovisioning factor of 1.4 and capped at 100%. It means that if all // hosts are healthy that priority's health is 100%*1.4=140% and is capped at 100% which results // in 100%. If 80% of hosts are healthy, that priority's health is still 100% (80%*1.4=112% and // capped at 100%). - per_priority_health.get()[priority] = - std::min(100, (host_set.overprovisioningFactor() * - host_set.healthyHosts().size() / host_set.hosts().size())); + per_priority_health.get()[priority] = std::min( + 100, (host_set.overprovisioningFactor() * host_set.healthyHosts().size() / host_count)); // We perform the same computation for degraded hosts. - per_priority_degraded.get()[priority] = - std::min(100, (host_set.overprovisioningFactor() * - host_set.degradedHosts().size() / host_set.hosts().size())); + per_priority_degraded.get()[priority] = std::min( + 100, (host_set.overprovisioningFactor() * host_set.degradedHosts().size() / host_count)); } // Now that we've updated health for the changed priority level, we need to calculate percentage @@ -442,13 +443,12 @@ HostConstSharedPtr LoadBalancerBase::chooseHost(LoadBalancerContext* context) { bool LoadBalancerBase::isGlobalPanic(const HostSet& host_set) { uint64_t global_panic_threshold = std::min( 100, runtime_.snapshot().getInteger(RuntimePanicThreshold, default_healthy_panic_percent_)); - double healthy_percent = host_set.hosts().size() == 0 - ? 0 - : 100.0 * host_set.healthyHosts().size() / host_set.hosts().size(); + const auto host_count = host_set.hosts().size() - host_set.excludedHosts().size(); + double healthy_percent = + host_count == 0 ? 0.0 : 100.0 * host_set.healthyHosts().size() / host_count; - double degraded_percent = host_set.hosts().size() == 0 - ? 0 - : 100.0 * host_set.degradedHosts().size() / host_set.hosts().size(); + double degraded_percent = + host_count == 0 ? 0.0 : 100.0 * host_set.degradedHosts().size() / host_count; // If the % of healthy hosts in the cluster is less than our panic threshold, we use all hosts. if ((healthy_percent + degraded_percent) < global_panic_threshold) { return true; @@ -464,6 +464,8 @@ void ZoneAwareLoadBalancerBase::calculateLocalityPercentage( total_hosts += locality_hosts.size(); } + // TODO(snowp): Should we ignore excluded hosts here too? + size_t i = 0; for (const auto& locality_hosts : hosts_per_locality.get()) { ret[i++] = total_hosts > 0 ? 10000ULL * locality_hosts.size() / total_hosts : 0; @@ -538,7 +540,13 @@ ZoneAwareLoadBalancerBase::hostSourceToUse(LoadBalancerContext* context) { } // If we're doing locality weighted balancing, pick locality. - const absl::optional locality = host_set.chooseLocality(); + absl::optional locality; + if (host_availability == HostAvailability::Degraded) { + locality = host_set.chooseDegradedLocality(); + } else { + locality = host_set.chooseHealthyLocality(); + } + if (locality.has_value()) { hosts_source.source_type_ = localitySourceType(host_availability); hosts_source.locality_index_ = locality.value(); @@ -686,7 +694,7 @@ HostConstSharedPtr EdfLoadBalancerBase::chooseHostOnce(LoadBalancerContext* cont return host; } else { const HostVector& hosts_to_use = hostSourceToHosts(hosts_source); - if (hosts_to_use.size() == 0) { + if (hosts_to_use.empty()) { return nullptr; } return unweightedHostPick(hosts_to_use, hosts_source); diff --git a/source/common/upstream/load_balancer_impl.h b/source/common/upstream/load_balancer_impl.h index 7ee9c82e225b2..3f0dca76df6a4 100644 --- a/source/common/upstream/load_balancer_impl.h +++ b/source/common/upstream/load_balancer_impl.h @@ -148,6 +148,8 @@ class LoadBalancerContextBase : public LoadBalancerContext { bool shouldSelectAnotherHost(const Host&) override { return false; } uint32_t hostSelectionRetryCount() const override { return 1; } + + Network::Socket::OptionsSharedPtr upstreamSocketOptions() const override { return {}; } }; /** @@ -482,7 +484,8 @@ class LoadBalancerSubsetInfoImpl : public LoadBalancerSubsetInfo { fallback_policy_(subset_config.fallback_policy()), default_subset_(subset_config.default_subset()), locality_weight_aware_(subset_config.locality_weight_aware()), - scale_locality_weight_(subset_config.scale_locality_weight()) { + scale_locality_weight_(subset_config.scale_locality_weight()), + panic_mode_any_(subset_config.panic_mode_any()) { for (const auto& subset : subset_config.subset_selectors()) { if (!subset.keys().empty()) { subset_keys_.emplace_back( @@ -500,6 +503,7 @@ class LoadBalancerSubsetInfoImpl : public LoadBalancerSubsetInfo { const std::vector>& subsetKeys() const override { return subset_keys_; } bool localityWeightAware() const override { return locality_weight_aware_; } bool scaleLocalityWeight() const override { return scale_locality_weight_; } + bool panicModeAny() const override { return panic_mode_any_; } private: const bool enabled_; @@ -508,6 +512,7 @@ class LoadBalancerSubsetInfoImpl : public LoadBalancerSubsetInfo { std::vector> subset_keys_; const bool locality_weight_aware_; const bool scale_locality_weight_; + const bool panic_mode_any_; }; } // namespace Upstream diff --git a/source/common/upstream/load_stats_reporter.cc b/source/common/upstream/load_stats_reporter.cc index eac5bb726ffbe..37ccc8caf1d69 100644 --- a/source/common/upstream/load_stats_reporter.cc +++ b/source/common/upstream/load_stats_reporter.cc @@ -16,7 +16,7 @@ LoadStatsReporter::LoadStatsReporter(const LocalInfo::LocalInfo& local_info, async_client_(std::move(async_client)), service_method_(*Protobuf::DescriptorPool::generated_pool()->FindMethodByName( "envoy.service.load_stats.v2.LoadReportingService.StreamLoadStats")), - time_source_(dispatcher.timeSystem()) { + time_source_(dispatcher.timeSource()) { request_.mutable_node()->MergeFrom(local_info.node()); retry_timer_ = dispatcher.createTimer([this]() -> void { establishNewStream(); }); response_timer_ = dispatcher.createTimer([this]() -> void { sendLoadStatsRequest(); }); @@ -53,17 +53,22 @@ void LoadStatsReporter::sendLoadStatsRequest() { auto& cluster = it->second.get(); auto* cluster_stats = request_.add_cluster_stats(); cluster_stats->set_cluster_name(cluster_name); + if (cluster.info()->eds_service_name().has_value()) { + cluster_stats->set_cluster_service_name(cluster.info()->eds_service_name().value()); + } for (auto& host_set : cluster.prioritySet().hostSetsPerPriority()) { ENVOY_LOG(trace, "Load report locality count {}", host_set->hostsPerLocality().get().size()); for (auto& hosts : host_set->hostsPerLocality().get()) { - ASSERT(hosts.size() > 0); + ASSERT(!hosts.empty()); uint64_t rq_success = 0; uint64_t rq_error = 0; uint64_t rq_active = 0; + uint64_t rq_issued = 0; for (auto host : hosts) { rq_success += host->stats().rq_success_.latch(); rq_error += host->stats().rq_error_.latch(); rq_active += host->stats().rq_active_.value(); + rq_issued += host->stats().rq_total_.latch(); } if (rq_success + rq_error + rq_active != 0) { auto* locality_stats = cluster_stats->add_upstream_locality_stats(); @@ -72,6 +77,7 @@ void LoadStatsReporter::sendLoadStatsRequest() { locality_stats->set_total_successful_requests(rq_success); locality_stats->set_total_error_requests(rq_error); locality_stats->set_total_requests_in_progress(rq_active); + locality_stats->set_total_issued_requests(rq_issued); } } } @@ -151,6 +157,7 @@ void LoadStatsReporter::startLoadReportPeriod() { for (auto host : host_set->hosts()) { host->stats().rq_success_.latch(); host->stats().rq_error_.latch(); + host->stats().rq_total_.latch(); } } cluster.info()->loadReportStats().upstream_rq_dropped_.latch(); diff --git a/source/common/upstream/logical_dns_cluster.cc b/source/common/upstream/logical_dns_cluster.cc index c5bcc8bbb67df..11c6e1e8549be 100644 --- a/source/common/upstream/logical_dns_cluster.cc +++ b/source/common/upstream/logical_dns_cluster.cc @@ -42,25 +42,17 @@ LogicalDnsCluster::LogicalDnsCluster( } } - switch (cluster.dns_lookup_family()) { - case envoy::api::v2::Cluster::V6_ONLY: - dns_lookup_family_ = Network::DnsLookupFamily::V6Only; - break; - case envoy::api::v2::Cluster::V4_ONLY: - dns_lookup_family_ = Network::DnsLookupFamily::V4Only; - break; - case envoy::api::v2::Cluster::AUTO: - dns_lookup_family_ = Network::DnsLookupFamily::Auto; - break; - default: - NOT_REACHED_GCOVR_EXCL_LINE; - } - const envoy::api::v2::core::SocketAddress& socket_address = lbEndpoint().endpoint().address().socket_address(); + + if (!socket_address.resolver_name().empty()) { + throw EnvoyException("LOGICAL_DNS clusters must NOT have a custom resolver name set"); + } + dns_url_ = fmt::format("tcp://{}:{}", socket_address.address(), socket_address.port_value()); hostname_ = Network::Utility::hostFromTcpUrl(dns_url_); Network::Utility::portFromTcpUrl(dns_url_); + dns_lookup_family_ = getDnsLookupFamilyFromCluster(cluster); tls_->set([](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr { return std::make_shared(); @@ -110,7 +102,7 @@ void LogicalDnsCluster::startResolve() { break; } const auto& locality_lb_endpoint = localityLbEndpoint(); - PriorityStateManager priority_state_manager(*this, local_info_); + PriorityStateManager priority_state_manager(*this, local_info_, nullptr); priority_state_manager.initializePriorityFor(locality_lb_endpoint); priority_state_manager.registerHostForPriority(logical_host_, locality_lb_endpoint); @@ -151,5 +143,21 @@ Upstream::Host::CreateConnectionData LogicalDnsCluster::LogicalHost::createConne parent_.lbEndpoint(), shared_from_this())}}; } +ClusterImplBaseSharedPtr LogicalDnsClusterFactory::createClusterImpl( + const envoy::api::v2::Cluster& cluster, ClusterFactoryContext& context, + Server::Configuration::TransportSocketFactoryContext& socket_factory_context, + Stats::ScopePtr&& stats_scope) { + auto selected_dns_resolver = selectDnsResolver(cluster, context); + + return std::make_unique(cluster, context.runtime(), selected_dns_resolver, + context.tls(), socket_factory_context, + std::move(stats_scope), context.addedViaApi()); +} + +/** + * Static registration for the strict dns cluster factory. @see RegisterFactory. + */ +REGISTER_FACTORY(LogicalDnsClusterFactory, ClusterFactory); + } // namespace Upstream } // namespace Envoy diff --git a/source/common/upstream/logical_dns_cluster.h b/source/common/upstream/logical_dns_cluster.h index b75b7858da03b..79e60b9698c56 100644 --- a/source/common/upstream/logical_dns_cluster.h +++ b/source/common/upstream/logical_dns_cluster.h @@ -9,8 +9,11 @@ #include "envoy/thread_local/thread_local.h" #include "common/common/empty_string.h" +#include "common/upstream/cluster_factory_impl.h" #include "common/upstream/upstream_impl.h" +#include "extensions/clusters/well_known_names.h" + namespace Envoy { namespace Upstream { @@ -154,5 +157,17 @@ class LogicalDnsCluster : public ClusterImplBase { const envoy::api::v2::ClusterLoadAssignment load_assignment_; }; +class LogicalDnsClusterFactory : public ClusterFactoryImplBase { +public: + LogicalDnsClusterFactory() + : ClusterFactoryImplBase(Extensions::Clusters::ClusterTypes::get().LogicalDns) {} + +private: + ClusterImplBaseSharedPtr + createClusterImpl(const envoy::api::v2::Cluster& cluster, ClusterFactoryContext& context, + Server::Configuration::TransportSocketFactoryContext& socket_factory_context, + Stats::ScopePtr&& stats_scope) override; +}; + } // namespace Upstream } // namespace Envoy diff --git a/source/common/upstream/maglev_lb.cc b/source/common/upstream/maglev_lb.cc index 701e325b40190..2fba318f88302 100644 --- a/source/common/upstream/maglev_lb.cc +++ b/source/common/upstream/maglev_lb.cc @@ -3,9 +3,10 @@ namespace Envoy { namespace Upstream { -MaglevTable::MaglevTable(const HostsPerLocality& hosts_per_locality, - const LocalityWeightsConstSharedPtr& locality_weights, uint64_t table_size) - : table_size_(table_size) { +MaglevTable::MaglevTable(const NormalizedHostWeightVector& normalized_host_weights, + double max_normalized_weight, uint64_t table_size, + MaglevLoadBalancerStats& stats) + : table_size_(table_size), stats_(stats) { // TODO(mattklein123): The Maglev table must have a size that is a prime number for the algorithm // to work. Currently, the table size is not user configurable. In the future, if the table size // is made user configurable, we will need proper error checking that the user cannot configure a @@ -13,77 +14,37 @@ MaglevTable::MaglevTable(const HostsPerLocality& hosts_per_locality, // not good!). ASSERT(Primes::isPrime(table_size)); - // Sanity-check that the locality weights, if provided, line up with the hosts per locality. - if (locality_weights != nullptr) { - ASSERT(locality_weights->size() == hosts_per_locality.get().size()); - } - - // Compute host weight combined with locality weight where applicable. - const auto effective_weight = [&locality_weights](uint32_t host_weight, - uint32_t locality_index) -> uint32_t { - ASSERT(host_weight != 0); - if (locality_weights == nullptr) { - return host_weight; - } else { - auto locality_weight = (*locality_weights)[locality_index]; - // This might be zero, since locality weight might not be specified. - return host_weight * locality_weight; - } - }; - - // Compute maximum host weight. - uint32_t max_host_weight = 0; - uint32_t total_hosts = 0; - for (uint32_t i = 0; i < hosts_per_locality.get().size(); ++i) { - for (const auto& host : hosts_per_locality.get()[i]) { - max_host_weight = std::max(effective_weight(host->weight(), i), max_host_weight); - ++total_hosts; - } - } - // We can't do anything sensible with no hosts. - if (total_hosts == 0) { + if (normalized_host_weights.empty()) { return; } // Implementation of pseudocode listing 1 in the paper (see header file for more info). std::vector table_build_entries; - table_build_entries.reserve(total_hosts); - for (uint32_t i = 0; i < hosts_per_locality.get().size(); ++i) { - for (const auto& host : hosts_per_locality.get()[i]) { - const std::string& address = host->address()->asString(); - const uint32_t weight = effective_weight(host->weight(), i); - // If weight is zero, it should be totally excluded from table building - // below. - if (weight > 0) { - table_build_entries.emplace_back(host, HashUtil::xxHash64(address) % table_size_, - (HashUtil::xxHash64(address, 1) % (table_size_ - 1)) + 1, - weight); - } - } - } - - // We can't do anything sensible with no table entries. - if (table_build_entries.empty()) { - return; + table_build_entries.reserve(normalized_host_weights.size()); + for (const auto& host_weight : normalized_host_weights) { + const auto& host = host_weight.first; + const std::string& address = host->address()->asString(); + table_build_entries.emplace_back(host, HashUtil::xxHash64(address) % table_size_, + (HashUtil::xxHash64(address, 1) % (table_size_ - 1)) + 1, + host_weight.second); } table_.resize(table_size_); + + // Iterate through the table build entries as many times as it takes to fill up the table. uint64_t table_index = 0; - uint32_t iteration = 1; - while (true) { - for (uint64_t i = 0; i < table_build_entries.size(); i++) { + for (uint32_t iteration = 1; table_index < table_size_; ++iteration) { + for (uint64_t i = 0; i < table_build_entries.size() && table_index < table_size; i++) { TableBuildEntry& entry = table_build_entries[i]; - // Counts are in units of max_host_weight. To understand how counts_ and - // weight_ are used below, consider a host with weight equal to - // max_host_weight. This would be picked on every single iteration. If - // it had weight equal to backend_weight_scale / 3, then this would only - // happen every 3 iterations, etc. - if (iteration * entry.weight_ < entry.counts_) { - ASSERT(max_host_weight > 1); + // To understand how target_weight_ and weight_ are used below, consider a host with weight + // equal to max_normalized_weight. This would be picked on every single iteration. If it had + // weight equal to max_normalized_weight / 3, then it would only be picked every 3 iterations, + // etc. + if (iteration * entry.weight_ < entry.target_weight_) { continue; } - entry.counts_ += max_host_weight; + entry.target_weight_ += max_normalized_weight; uint64_t c = permutation(entry); while (table_[c] != nullptr) { entry.next_++; @@ -92,17 +53,24 @@ MaglevTable::MaglevTable(const HostsPerLocality& hosts_per_locality, table_[c] = entry.host_; entry.next_++; + entry.count_++; table_index++; - if (table_index == table_size_) { - if (ENVOY_LOG_CHECK_LEVEL(trace)) { - for (uint64_t i = 0; i < table_.size(); i++) { - ENVOY_LOG(trace, "maglev: i={} host={}", i, table_[i]->address()->asString()); - } - } - return; - } } - ++iteration; + } + + uint64_t min_entries_per_host = table_size_; + uint64_t max_entries_per_host = 0; + for (const auto& entry : table_build_entries) { + min_entries_per_host = std::min(entry.count_, min_entries_per_host); + max_entries_per_host = std::max(entry.count_, max_entries_per_host); + } + stats_.min_entries_per_host_.set(min_entries_per_host); + stats_.max_entries_per_host_.set(max_entries_per_host); + + if (ENVOY_LOG_CHECK_LEVEL(trace)) { + for (uint64_t i = 0; i < table_.size(); i++) { + ENVOY_LOG(trace, "maglev: i={} host={}", i, table_[i]->address()->asString()); + } } } @@ -118,5 +86,18 @@ uint64_t MaglevTable::permutation(const TableBuildEntry& entry) { return (entry.offset_ + (entry.skip_ * entry.next_)) % table_size_; } +MaglevLoadBalancer::MaglevLoadBalancer(const PrioritySet& priority_set, ClusterStats& stats, + Stats::Scope& scope, Runtime::Loader& runtime, + Runtime::RandomGenerator& random, + const envoy::api::v2::Cluster::CommonLbConfig& common_config, + uint64_t table_size) + : ThreadAwareLoadBalancerBase(priority_set, stats, runtime, random, common_config), + scope_(scope.createScope("maglev_lb.")), stats_(generateStats(*scope_)), + table_size_(table_size) {} + +MaglevLoadBalancerStats MaglevLoadBalancer::generateStats(Stats::Scope& scope) { + return {ALL_MAGLEV_LOAD_BALANCER_STATS(POOL_GAUGE(scope))}; +} + } // namespace Upstream } // namespace Envoy diff --git a/source/common/upstream/maglev_lb.h b/source/common/upstream/maglev_lb.h index 58ab7e714bd67..3d2d34d3c7b4a 100644 --- a/source/common/upstream/maglev_lb.h +++ b/source/common/upstream/maglev_lb.h @@ -1,11 +1,30 @@ #pragma once +#include "envoy/stats/scope.h" +#include "envoy/stats/stats_macros.h" + #include "common/upstream/thread_aware_lb_impl.h" #include "common/upstream/upstream_impl.h" namespace Envoy { namespace Upstream { +/** + * All Maglev load balancer stats. @see stats_macros.h + */ +// clang-format off +#define ALL_MAGLEV_LOAD_BALANCER_STATS(GAUGE) \ + GAUGE(min_entries_per_host) \ + GAUGE(max_entries_per_host) +// clang-format on + +/** + * Struct definition for all Maglev load balancer stats. @see stats_macros.h + */ +struct MaglevLoadBalancerStats { + ALL_MAGLEV_LOAD_BALANCER_STATS(GENERATE_GAUGE_STRUCT) +}; + /** * This is an implementation of Maglev consistent hashing as described in: * https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/44824.pdf @@ -15,9 +34,8 @@ namespace Upstream { class MaglevTable : public ThreadAwareLoadBalancerBase::HashingLoadBalancer, Logger::Loggable { public: - MaglevTable(const HostsPerLocality& hosts_per_locality, - const LocalityWeightsConstSharedPtr& locality_weights, - uint64_t table_size = DefaultTableSize); + MaglevTable(const NormalizedHostWeightVector& normalized_host_weights, + double max_normalized_weight, uint64_t table_size, MaglevLoadBalancerStats& stats); // ThreadAwareLoadBalancerBase::HashingLoadBalancer HostConstSharedPtr chooseHost(uint64_t hash) const override; @@ -27,21 +45,23 @@ class MaglevTable : public ThreadAwareLoadBalancerBase::HashingLoadBalancer, private: struct TableBuildEntry { - TableBuildEntry(const HostSharedPtr& host, uint64_t offset, uint64_t skip, uint64_t weight) + TableBuildEntry(const HostConstSharedPtr& host, uint64_t offset, uint64_t skip, double weight) : host_(host), offset_(offset), skip_(skip), weight_(weight) {} - HostSharedPtr host_; + HostConstSharedPtr host_; const uint64_t offset_; const uint64_t skip_; - const uint64_t weight_; - uint64_t counts_{}; + const double weight_; + double target_weight_{}; uint64_t next_{}; + uint64_t count_{}; }; uint64_t permutation(const TableBuildEntry& entry); const uint64_t table_size_; - HostVector table_; + std::vector table_; + MaglevLoadBalancerStats& stats_; }; /** @@ -49,40 +69,26 @@ class MaglevTable : public ThreadAwareLoadBalancerBase::HashingLoadBalancer, */ class MaglevLoadBalancer : public ThreadAwareLoadBalancerBase { public: - MaglevLoadBalancer(const PrioritySet& priority_set, ClusterStats& stats, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, + MaglevLoadBalancer(const PrioritySet& priority_set, ClusterStats& stats, Stats::Scope& scope, + Runtime::Loader& runtime, Runtime::RandomGenerator& random, const envoy::api::v2::Cluster::CommonLbConfig& common_config, - uint64_t table_size = MaglevTable::DefaultTableSize) - : ThreadAwareLoadBalancerBase(priority_set, stats, runtime, random, common_config), - table_size_(table_size) {} + uint64_t table_size = MaglevTable::DefaultTableSize); + + const MaglevLoadBalancerStats& stats() const { return stats_; } private: // ThreadAwareLoadBalancerBase - HashingLoadBalancerSharedPtr createLoadBalancer(const HostSet& host_set, bool in_panic) override { - // Note that we only compute global panic on host set refresh. Given that the runtime setting - // will rarely change, this is a reasonable compromise to avoid creating extra LBs when we only - // need to create one per priority level. - const bool has_locality = - host_set.localityWeights() != nullptr && !host_set.localityWeights()->empty(); - if (in_panic) { - if (!has_locality) { - return std::make_shared(HostsPerLocalityImpl(host_set.hosts(), false), nullptr, - table_size_); - } else { - return std::make_shared(host_set.hostsPerLocality(), - host_set.localityWeights(), table_size_); - } - } else { - if (!has_locality) { - return std::make_shared(HostsPerLocalityImpl(host_set.healthyHosts(), false), - nullptr, table_size_); - } else { - return std::make_shared(host_set.healthyHostsPerLocality(), - host_set.localityWeights(), table_size_); - } - } + HashingLoadBalancerSharedPtr + createLoadBalancer(const NormalizedHostWeightVector& normalized_host_weights, + double /* min_normalized_weight */, double max_normalized_weight) override { + return std::make_shared(normalized_host_weights, max_normalized_weight, + table_size_, stats_); } + static MaglevLoadBalancerStats generateStats(Stats::Scope& scope); + + Stats::ScopePtr scope_; + MaglevLoadBalancerStats stats_; const uint64_t table_size_; }; diff --git a/source/common/upstream/original_dst_cluster.cc b/source/common/upstream/original_dst_cluster.cc index 1b54ca9f4faef..c5c2dfe48a208 100644 --- a/source/common/upstream/original_dst_cluster.cc +++ b/source/common/upstream/original_dst_cluster.cc @@ -69,7 +69,7 @@ HostConstSharedPtr OriginalDstCluster::LoadBalancer::chooseHost(LoadBalancerCont if (host) { ENVOY_LOG(debug, "Using existing host {}.", host->address()->asString()); host->used(true); // Mark as used. - return std::move(host); + return host; } // Add a new host const Network::Address::Ip* dst_ip = dst_addr.ip(); @@ -100,7 +100,7 @@ HostConstSharedPtr OriginalDstCluster::LoadBalancer::chooseHost(LoadBalancerCont }); } - return std::move(host); + return host; } else { ENVOY_LOG(debug, "Failed to create host for {}.", dst_addr.asString()); } @@ -117,8 +117,10 @@ OriginalDstCluster::LoadBalancer::requestOverrideHost(LoadBalancerContext* conte const Http::HeaderMap* downstream_headers = context->downstreamHeaders(); if (downstream_headers && downstream_headers->get(Http::Headers::get().EnvoyOriginalDstHost) != nullptr) { - const std::string& request_override_host = - downstream_headers->get(Http::Headers::get().EnvoyOriginalDstHost)->value().c_str(); + const std::string request_override_host( + downstream_headers->get(Http::Headers::get().EnvoyOriginalDstHost) + ->value() + .getStringView()); try { request_host = Network::Utility::parseInternetAddressAndPort(request_override_host, false); ENVOY_LOG(debug, "Using request override host {}.", request_override_host); @@ -146,11 +148,12 @@ OriginalDstCluster::OriginalDstCluster( void OriginalDstCluster::addHost(HostSharedPtr& host) { // Given the current config, only EDS clusters support multiple priorities. ASSERT(priority_set_.hostSetsPerPriority().size() == 1); - auto& first_host_set = priority_set_.getOrCreateHostSet(0); + const auto& first_host_set = priority_set_.getOrCreateHostSet(0); HostVectorSharedPtr new_hosts(new HostVector(first_host_set.hosts())); new_hosts->emplace_back(host); - first_host_set.updateHosts(HostSetImpl::partitionHosts(new_hosts, HostsPerLocalityImpl::empty()), - {}, {std::move(host)}, {}, absl::nullopt); + priority_set_.updateHosts(0, + HostSetImpl::partitionHosts(new_hosts, HostsPerLocalityImpl::empty()), + {}, {std::move(host)}, {}, absl::nullopt); } void OriginalDstCluster::cleanup() { @@ -158,27 +161,52 @@ void OriginalDstCluster::cleanup() { HostVector to_be_removed; // Given the current config, only EDS clusters support multiple priorities. ASSERT(priority_set_.hostSetsPerPriority().size() == 1); - auto& host_set = priority_set_.getOrCreateHostSet(0); - - ENVOY_LOG(debug, "Cleaning up stale original dst hosts."); - for (const HostSharedPtr& host : host_set.hosts()) { - if (host->used()) { - ENVOY_LOG(debug, "Keeping active host {}.", host->address()->asString()); - new_hosts->emplace_back(host); - host->used(false); // Mark to be removed during the next round. - } else { - ENVOY_LOG(debug, "Removing stale host {}.", host->address()->asString()); - to_be_removed.emplace_back(host); + const auto& host_set = priority_set_.getOrCreateHostSet(0); + ENVOY_LOG(trace, "Stale original dst hosts cleanup triggered."); + if (!host_set.hosts().empty()) { + ENVOY_LOG(debug, "Cleaning up stale original dst hosts."); + for (const HostSharedPtr& host : host_set.hosts()) { + if (host->used()) { + ENVOY_LOG(debug, "Keeping active host {}.", host->address()->asString()); + new_hosts->emplace_back(host); + host->used(false); // Mark to be removed during the next round. + } else { + ENVOY_LOG(debug, "Removing stale host {}.", host->address()->asString()); + to_be_removed.emplace_back(host); + } } } - if (to_be_removed.size() > 0) { - host_set.updateHosts(HostSetImpl::partitionHosts(new_hosts, HostsPerLocalityImpl::empty()), {}, - {}, to_be_removed, absl::nullopt); + if (!to_be_removed.empty()) { + priority_set_.updateHosts(0, + HostSetImpl::partitionHosts(new_hosts, HostsPerLocalityImpl::empty()), + {}, {}, to_be_removed, absl::nullopt); } cleanup_timer_->enableTimer(cleanup_interval_ms_); } +ClusterImplBaseSharedPtr OriginalDstClusterFactory::createClusterImpl( + const envoy::api::v2::Cluster& cluster, ClusterFactoryContext& context, + Server::Configuration::TransportSocketFactoryContext& socket_factory_context, + Stats::ScopePtr&& stats_scope) { + if (cluster.lb_policy() != envoy::api::v2::Cluster::ORIGINAL_DST_LB) { + throw EnvoyException(fmt::format( + "cluster: cluster type 'original_dst' may only be used with LB type 'original_dst_lb'")); + } + if (cluster.has_lb_subset_config() && cluster.lb_subset_config().subset_selectors_size() != 0) { + throw EnvoyException( + fmt::format("cluster: cluster type 'original_dst' may not be used with lb_subset_config")); + } + + return std::make_unique(cluster, context.runtime(), socket_factory_context, + std::move(stats_scope), context.addedViaApi()); +} + +/** + * Static registration for the strict dns cluster factory. @see RegisterFactory. + */ +REGISTER_FACTORY(OriginalDstClusterFactory, ClusterFactory); + } // namespace Upstream } // namespace Envoy diff --git a/source/common/upstream/original_dst_cluster.h b/source/common/upstream/original_dst_cluster.h index b88f18d57be29..a061e8ee18475 100644 --- a/source/common/upstream/original_dst_cluster.h +++ b/source/common/upstream/original_dst_cluster.h @@ -12,8 +12,11 @@ #include "common/common/empty_string.h" #include "common/common/logger.h" +#include "common/upstream/cluster_factory_impl.h" #include "common/upstream/upstream_impl.h" +#include "extensions/clusters/well_known_names.h" + namespace Envoy { namespace Upstream { @@ -116,5 +119,17 @@ class OriginalDstCluster : public ClusterImplBase { Event::TimerPtr cleanup_timer_; }; +class OriginalDstClusterFactory : public ClusterFactoryImplBase { +public: + OriginalDstClusterFactory() + : ClusterFactoryImplBase(Extensions::Clusters::ClusterTypes::get().OriginalDst) {} + +private: + ClusterImplBaseSharedPtr + createClusterImpl(const envoy::api::v2::Cluster& cluster, ClusterFactoryContext& context, + Server::Configuration::TransportSocketFactoryContext& socket_factory_context, + Stats::ScopePtr&& stats_scope) override; +}; + } // namespace Upstream } // namespace Envoy diff --git a/source/common/upstream/outlier_detection_impl.cc b/source/common/upstream/outlier_detection_impl.cc index 0998c5335b131..d9e906a0949d0 100644 --- a/source/common/upstream/outlier_detection_impl.cc +++ b/source/common/upstream/outlier_detection_impl.cc @@ -27,7 +27,7 @@ DetectorSharedPtr DetectorImplFactory::createForCluster( if (cluster_config.has_outlier_detection()) { return DetectorImpl::create(cluster, cluster_config.outlier_detection(), dispatcher, runtime, - dispatcher.timeSystem(), std::move(event_logger)); + dispatcher.timeSource(), std::move(event_logger)); } else { return nullptr; } diff --git a/source/common/upstream/outlier_detection_impl.h b/source/common/upstream/outlier_detection_impl.h index 639a483c9abd6..fb7efd2b03ea6 100644 --- a/source/common/upstream/outlier_detection_impl.h +++ b/source/common/upstream/outlier_detection_impl.h @@ -278,7 +278,7 @@ class EventLoggerImpl : public EventLogger { const HostDescriptionConstSharedPtr& host, absl::optional time); - Filesystem::FileSharedPtr file_; + AccessLog::AccessLogFileSharedPtr file_; TimeSource& time_source_; }; diff --git a/source/common/upstream/priority_conn_pool_map.h b/source/common/upstream/priority_conn_pool_map.h new file mode 100644 index 0000000000000..30636728d31b4 --- /dev/null +++ b/source/common/upstream/priority_conn_pool_map.h @@ -0,0 +1,59 @@ +#pragma once + +#include "envoy/event/dispatcher.h" +#include "envoy/upstream/resource_manager.h" +#include "envoy/upstream/upstream.h" + +#include "common/upstream/conn_pool_map.h" + +namespace Envoy { +namespace Upstream { +/** + * A class mapping keys to connection pools, with some recycling logic built in. + */ +template class PriorityConnPoolMap { +public: + using ConnPoolMapType = ConnPoolMap; + using PoolFactory = typename ConnPoolMapType::PoolFactory; + using DrainedCb = typename ConnPoolMapType::DrainedCb; + using OptPoolRef = typename ConnPoolMapType::OptPoolRef; + + PriorityConnPoolMap(Event::Dispatcher& dispatcher, const HostConstSharedPtr& host); + ~PriorityConnPoolMap(); + /** + * Returns an existing pool for the given priority and `key`, or creates a new one using + * `factory`. Note that it is possible for this to fail if a limit on the number of pools allowed + * is reached. + * @return The pool corresponding to `key`, or `absl::nullopt`. + */ + OptPoolRef getPool(ResourcePriority priority, KEY_TYPE key, const PoolFactory& factory); + + /** + * @return the number of pools across all priorities. + */ + size_t size() const; + + /** + * Destroys all mapped pools. + */ + void clear(); + + /** + * Adds a drain callback to all mapped pools. Any future mapped pools with have the callback + * automatically added. Be careful with the callback. If it itself calls into `this`, modifying + * the state of `this`, there is a good chance it will cause corruption due to the callback firing + * immediately. + */ + void addDrainedCallback(const DrainedCb& cb); + + /** + * Instructs each connection pool to drain its connections. + */ + void drainConnections(); + +private: + std::array, NumResourcePriorities> conn_pool_maps_; +}; + +} // namespace Upstream +} // namespace Envoy diff --git a/source/common/upstream/priority_conn_pool_map_impl.h b/source/common/upstream/priority_conn_pool_map_impl.h new file mode 100644 index 0000000000000..cfe1c021393bc --- /dev/null +++ b/source/common/upstream/priority_conn_pool_map_impl.h @@ -0,0 +1,61 @@ +#pragma once + +#include "common/upstream/conn_pool_map_impl.h" +#include "common/upstream/priority_conn_pool_map.h" + +namespace Envoy { +namespace Upstream { + +template +PriorityConnPoolMap::PriorityConnPoolMap(Envoy::Event::Dispatcher& dispatcher, + const HostConstSharedPtr& host) { + for (size_t pool_map_index = 0; pool_map_index < NumResourcePriorities; ++pool_map_index) { + ResourcePriority priority = static_cast(pool_map_index); + conn_pool_maps_[pool_map_index].reset(new ConnPoolMapType(dispatcher, host, priority)); + } +} + +template +PriorityConnPoolMap::~PriorityConnPoolMap() = default; + +template +typename PriorityConnPoolMap::OptPoolRef +PriorityConnPoolMap::getPool(ResourcePriority priority, KEY_TYPE key, + const PoolFactory& factory) { + size_t index = static_cast(priority); + ASSERT(index < conn_pool_maps_.size()); + return conn_pool_maps_[index]->getPool(key, factory); +} + +template +size_t PriorityConnPoolMap::size() const { + size_t size = 0; + for (const auto& pool_map : conn_pool_maps_) { + size += pool_map->size(); + } + return size; +} + +template +void PriorityConnPoolMap::clear() { + for (auto& pool_map : conn_pool_maps_) { + pool_map->clear(); + } +} + +template +void PriorityConnPoolMap::addDrainedCallback(const DrainedCb& cb) { + for (auto& pool_map : conn_pool_maps_) { + pool_map->addDrainedCallback(cb); + } +} + +template +void PriorityConnPoolMap::drainConnections() { + for (auto& pool_map : conn_pool_maps_) { + pool_map->drainConnections(); + } +} + +} // namespace Upstream +} // namespace Envoy diff --git a/source/common/upstream/resource_manager_impl.h b/source/common/upstream/resource_manager_impl.h index 35cf4bf87b7ff..a887182c63983 100644 --- a/source/common/upstream/resource_manager_impl.h +++ b/source/common/upstream/resource_manager_impl.h @@ -27,40 +27,66 @@ class ResourceManagerImpl : public ResourceManager { public: ResourceManagerImpl(Runtime::Loader& runtime, const std::string& runtime_key, uint64_t max_connections, uint64_t max_pending_requests, - uint64_t max_requests, uint64_t max_retries, + uint64_t max_requests, uint64_t max_retries, uint64_t max_connection_pools, ClusterCircuitBreakersStats cb_stats) - : connections_(max_connections, runtime, runtime_key + "max_connections", cb_stats.cx_open_), + : connections_(max_connections, runtime, runtime_key + "max_connections", cb_stats.cx_open_, + cb_stats.remaining_cx_), pending_requests_(max_pending_requests, runtime, runtime_key + "max_pending_requests", - cb_stats.rq_pending_open_), - requests_(max_requests, runtime, runtime_key + "max_requests", cb_stats.rq_open_), - retries_(max_retries, runtime, runtime_key + "max_retries", cb_stats.rq_retry_open_) {} + cb_stats.rq_pending_open_, cb_stats.remaining_pending_), + requests_(max_requests, runtime, runtime_key + "max_requests", cb_stats.rq_open_, + cb_stats.remaining_rq_), + retries_(max_retries, runtime, runtime_key + "max_retries", cb_stats.rq_retry_open_, + cb_stats.remaining_retries_), + connection_pools_(max_connection_pools, runtime, runtime_key + "max_connection_pools", + cb_stats.cx_pool_open_, cb_stats.remaining_cx_pools_) {} // Upstream::ResourceManager Resource& connections() override { return connections_; } Resource& pendingRequests() override { return pending_requests_; } Resource& requests() override { return requests_; } Resource& retries() override { return retries_; } + Resource& connectionPools() override { return connection_pools_; } private: struct ResourceImpl : public Resource { ResourceImpl(uint64_t max, Runtime::Loader& runtime, const std::string& runtime_key, - Stats::Gauge& open_gauge) - : max_(max), runtime_(runtime), runtime_key_(runtime_key), open_gauge_(open_gauge) {} + Stats::Gauge& open_gauge, Stats::Gauge& remaining) + : max_(max), runtime_(runtime), runtime_key_(runtime_key), open_gauge_(open_gauge), + remaining_(remaining) { + remaining_.set(max); + } ~ResourceImpl() { ASSERT(current_ == 0); } // Upstream::Resource bool canCreate() override { return current_ < max(); } void inc() override { current_++; + updateRemaining(); open_gauge_.set(canCreate() ? 0 : 1); } - void dec() override { - ASSERT(current_ > 0); - current_--; + void dec() override { decBy(1); } + void decBy(uint64_t amount) override { + ASSERT(current_ >= amount); + current_ -= amount; + updateRemaining(); open_gauge_.set(canCreate() ? 0 : 1); } uint64_t max() override { return runtime_.snapshot().getInteger(runtime_key_, max_); } + /** + * We set the gauge instead of incrementing and decrementing because, + * though atomics are used, it is possible for the current resource count + * to be greater than the supplied max. + */ + void updateRemaining() { + /** + * We cannot use std::max here because max() and current_ are + * unsigned and subtracting them may overflow. + */ + const uint64_t current_copy = current_; + remaining_.set(max() > current_copy ? max() - current_copy : 0); + } + const uint64_t max_; std::atomic current_{}; Runtime::Loader& runtime_; @@ -72,12 +98,18 @@ class ResourceManagerImpl : public ResourceManager { * is open. */ Stats::Gauge& open_gauge_; + + /** + * The number of resources remaining before the circuit breaker opens. + */ + Stats::Gauge& remaining_; }; ResourceImpl connections_; ResourceImpl pending_requests_; ResourceImpl requests_; ResourceImpl retries_; + ResourceImpl connection_pools_; }; typedef std::unique_ptr ResourceManagerImplPtr; diff --git a/source/common/upstream/ring_hash_lb.cc b/source/common/upstream/ring_hash_lb.cc index 134bf38daba46..2489b8e44cd91 100644 --- a/source/common/upstream/ring_hash_lb.cc +++ b/source/common/upstream/ring_hash_lb.cc @@ -14,12 +14,31 @@ namespace Envoy { namespace Upstream { RingHashLoadBalancer::RingHashLoadBalancer( - PrioritySet& priority_set, ClusterStats& stats, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, + const PrioritySet& priority_set, ClusterStats& stats, Stats::Scope& scope, + Runtime::Loader& runtime, Runtime::RandomGenerator& random, const absl::optional& config, const envoy::api::v2::Cluster::CommonLbConfig& common_config) : ThreadAwareLoadBalancerBase(priority_set, stats, runtime, random, common_config), - config_(config) {} + scope_(scope.createScope("ring_hash_lb.")), stats_(generateStats(*scope_)), + min_ring_size_(config ? PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.value(), minimum_ring_size, + DefaultMinRingSize) + : DefaultMinRingSize), + max_ring_size_(config ? PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.value(), maximum_ring_size, + DefaultMaxRingSize) + : DefaultMaxRingSize), + hash_function_(config ? config.value().hash_function() + : HashFunction::Cluster_RingHashLbConfig_HashFunction_XX_HASH) { + // It's important to do any config validation here, rather than deferring to Ring's ctor, because + // any exceptions thrown here will be caught and handled properly. + if (min_ring_size_ > max_ring_size_) { + throw EnvoyException(fmt::format("ring hash: minimum_ring_size ({}) > maximum_ring_size ({})", + min_ring_size_, max_ring_size_)); + } +} + +RingHashLoadBalancerStats RingHashLoadBalancer::generateStats(Stats::Scope& scope) { + return {ALL_RING_HASH_LOAD_BALANCER_STATS(POOL_GAUGE(scope))}; +} HostConstSharedPtr RingHashLoadBalancer::Ring::chooseHost(uint64_t h) const { if (ring_.empty()) { @@ -59,46 +78,55 @@ HostConstSharedPtr RingHashLoadBalancer::Ring::chooseHost(uint64_t h) const { } using HashFunction = envoy::api::v2::Cluster_RingHashLbConfig_HashFunction; -RingHashLoadBalancer::Ring::Ring( - const absl::optional& config, - const HostVector& hosts) { +RingHashLoadBalancer::Ring::Ring(const NormalizedHostWeightVector& normalized_host_weights, + double min_normalized_weight, uint64_t min_ring_size, + uint64_t max_ring_size, HashFunction hash_function, + RingHashLoadBalancerStats& stats) + : stats_(stats) { ENVOY_LOG(trace, "ring hash: building ring"); - if (hosts.empty()) { - return; - } - // Currently we specify the minimum size of the ring, and determine the replication factor - // based on the number of hosts. It's possible we might want to support more sophisticated - // configuration in the future. - // NOTE: Currently we keep a ring for healthy hosts and unhealthy hosts, and this is done per - // thread. This is the simplest implementation, but it's expensive from a memory - // standpoint and duplicates the regeneration computation. In the future we might want - // to generate the rings centrally and then just RCU them out to each thread. This is - // sufficient for getting started. - const uint64_t min_ring_size = - config ? PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.value(), minimum_ring_size, 1024) : 1024; - - uint64_t hashes_per_host = 1; - if (hosts.size() < min_ring_size) { - hashes_per_host = min_ring_size / hosts.size(); - if ((min_ring_size % hosts.size()) != 0) { - hashes_per_host++; - } + // We can't do anything sensible with no hosts. + if (normalized_host_weights.empty()) { + return; } - ENVOY_LOG(info, "ring hash: min_ring_size={} hashes_per_host={}", min_ring_size, hashes_per_host); - ring_.reserve(hosts.size() * hashes_per_host); - - const bool use_std_hash = - config ? PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.value().deprecated_v1(), use_std_hash, false) - : false; - - const HashFunction hash_function = - config ? config.value().hash_function() - : HashFunction::Cluster_RingHashLbConfig_HashFunction_XX_HASH; - + // Scale up the number of hashes per host such that the least-weighted host gets a whole number + // of hashes on the ring. Other hosts might not end up with whole numbers, and that's fine (the + // ring-building algorithm below can handle this). This preserves the original implementation's + // behavior: when weights aren't provided, all hosts should get an equal number of hashes. In the + // case where this number exceeds the max_ring_size, it's scaled back down to fit. + const double scale = + std::min(std::ceil(min_normalized_weight * min_ring_size) / min_normalized_weight, + static_cast(max_ring_size)); + + // Reserve memory for the entire ring up front. + const uint64_t ring_size = std::ceil(scale); + ring_.reserve(ring_size); + + // Populate the hash ring by walking through the (host, weight) pairs in normalized_host_weights, + // and generating (scale * weight) hashes for each host. Since these aren't necessarily whole + // numbers, we maintain running sums -- current_hashes and target_hashes -- which allows us to + // populate the ring in a mostly stable way. + // + // For example, suppose we have 4 hosts, each with a normalized weight of 0.25, and a scale of + // 6.0 (because the max_ring_size is 6). That means we want to generate 1.5 hashes per host. + // We start the outer loop with current_hashes = 0 and target_hashes = 0. + // - For the first host, we set target_hashes = 1.5. After one run of the inner loop, + // current_hashes = 1. After another run, current_hashes = 2, so the inner loop ends. + // - For the second host, target_hashes becomes 3.0, and current_hashes is 2 from before. + // After only one run of the inner loop, current_hashes = 3, so the inner loop ends. + // - Likewise, the third host gets two hashes, and the fourth host gets one hash. + // + // For stats reporting, keep track of the minimum and maximum actual number of hashes per host. + // Users should hopefully pay attention to these numbers and alert if min_hashes_per_host is too + // low, since that implies an inaccurate request distribution. char hash_key_buffer[196]; - for (const auto& host : hosts) { + double current_hashes = 0.0; + double target_hashes = 0.0; + uint64_t min_hashes_per_host = ring_size; + uint64_t max_hashes_per_host = 0; + for (const auto& entry : normalized_host_weights) { + const auto& host = entry.first; const std::string& address_string = host->address()->asString(); uint64_t offset_start = address_string.size(); @@ -113,24 +141,29 @@ RingHashLoadBalancer::Ring::Ring( address_string.size() + 1 + StringUtil::MIN_ITOA_OUT_LEN <= sizeof(hash_key_buffer), ""); memcpy(hash_key_buffer, address_string.c_str(), offset_start); hash_key_buffer[offset_start++] = '_'; - for (uint64_t i = 0; i < hashes_per_host; i++) { + + // As noted above: maintain current_hashes and target_hashes as running sums across the entire + // host set. `i` is needed only to construct the hash key, and tally min/max hashes per host. + target_hashes += scale * entry.second; + uint64_t i = 0; + while (current_hashes < target_hashes) { const uint64_t total_hash_key_len = offset_start + StringUtil::itoa(hash_key_buffer + offset_start, StringUtil::MIN_ITOA_OUT_LEN, i); absl::string_view hash_key(hash_key_buffer, total_hash_key_len); - // Sadly std::hash provides no mechanism for hashing arbitrary bytes so we must copy here. - // xxHash is done without copies. const uint64_t hash = - use_std_hash - ? std::hash()(std::string(hash_key)) - : (hash_function == HashFunction::Cluster_RingHashLbConfig_HashFunction_MURMUR_HASH_2) - ? MurmurHash::murmurHash2_64(hash_key, MurmurHash::STD_HASH_SEED) - : HashUtil::xxHash64(hash_key); + (hash_function == HashFunction::Cluster_RingHashLbConfig_HashFunction_MURMUR_HASH_2) + ? MurmurHash::murmurHash2_64(hash_key, MurmurHash::STD_HASH_SEED) + : HashUtil::xxHash64(hash_key); ENVOY_LOG(trace, "ring hash: hash_key={} hash={}", hash_key.data(), hash); ring_.push_back({hash, host}); + ++i; + ++current_hashes; } + min_hashes_per_host = std::min(i, min_hashes_per_host); + max_hashes_per_host = std::max(i, max_hashes_per_host); } std::sort(ring_.begin(), ring_.end(), [](const RingEntry& lhs, const RingEntry& rhs) -> bool { @@ -142,6 +175,10 @@ RingHashLoadBalancer::Ring::Ring( entry.hash_); } } + + stats_.size_.set(ring_size); + stats_.min_hashes_per_host_.set(min_hashes_per_host); + stats_.max_hashes_per_host_.set(max_hashes_per_host); } } // namespace Upstream diff --git a/source/common/upstream/ring_hash_lb.h b/source/common/upstream/ring_hash_lb.h index 0de89f105db6f..da5d122e3ccd6 100644 --- a/source/common/upstream/ring_hash_lb.h +++ b/source/common/upstream/ring_hash_lb.h @@ -3,6 +3,8 @@ #include #include "envoy/runtime/runtime.h" +#include "envoy/stats/scope.h" +#include "envoy/stats/stats_macros.h" #include "common/common/logger.h" #include "common/upstream/thread_aware_lb_impl.h" @@ -10,6 +12,23 @@ namespace Envoy { namespace Upstream { +/** + * All ring hash load balancer stats. @see stats_macros.h + */ +// clang-format off +#define ALL_RING_HASH_LOAD_BALANCER_STATS(GAUGE) \ + GAUGE(size) \ + GAUGE(min_hashes_per_host) \ + GAUGE(max_hashes_per_host) +// clang-format on + +/** + * Struct definition for all ring hash load balancer stats. @see stats_macros.h + */ +struct RingHashLoadBalancerStats { + ALL_RING_HASH_LOAD_BALANCER_STATS(GENERATE_GAUGE_STRUCT) +}; + /** * A load balancer that implements consistent modulo hashing ("ketama"). Currently, zone aware * routing is not supported. A ring is kept for all hosts as well as a ring for healthy hosts. @@ -22,41 +41,53 @@ namespace Upstream { class RingHashLoadBalancer : public ThreadAwareLoadBalancerBase, Logger::Loggable { public: - RingHashLoadBalancer(PrioritySet& priority_set, ClusterStats& stats, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, + RingHashLoadBalancer(const PrioritySet& priority_set, ClusterStats& stats, Stats::Scope& scope, + Runtime::Loader& runtime, Runtime::RandomGenerator& random, const absl::optional& config, const envoy::api::v2::Cluster::CommonLbConfig& common_config); + const RingHashLoadBalancerStats& stats() const { return stats_; } + private: + using HashFunction = envoy::api::v2::Cluster_RingHashLbConfig_HashFunction; + struct RingEntry { uint64_t hash_; HostConstSharedPtr host_; }; struct Ring : public HashingLoadBalancer { - Ring(const absl::optional& config, - const HostVector& hosts); + Ring(const NormalizedHostWeightVector& normalized_host_weights, double min_normalized_weight, + uint64_t min_ring_size, uint64_t max_ring_size, HashFunction hash_function, + RingHashLoadBalancerStats& stats); // ThreadAwareLoadBalancerBase::HashingLoadBalancer HostConstSharedPtr chooseHost(uint64_t hash) const override; std::vector ring_; + + RingHashLoadBalancerStats& stats_; }; typedef std::shared_ptr RingConstSharedPtr; // ThreadAwareLoadBalancerBase - HashingLoadBalancerSharedPtr createLoadBalancer(const HostSet& host_set, bool in_panic) override { - // Note that we only compute global panic on host set refresh. Given that the runtime setting - // will rarely change, this is a reasonable compromise to avoid creating extra LBs when we only - // need to create one per priority level. - if (in_panic) { - return std::make_shared(config_, host_set.hosts()); - } else { - return std::make_shared(config_, host_set.healthyHosts()); - } + HashingLoadBalancerSharedPtr + createLoadBalancer(const NormalizedHostWeightVector& normalized_host_weights, + double min_normalized_weight, double /* max_normalized_weight */) override { + return std::make_shared(normalized_host_weights, min_normalized_weight, min_ring_size_, + max_ring_size_, hash_function_, stats_); } - const absl::optional& config_; + static RingHashLoadBalancerStats generateStats(Stats::Scope& scope); + + Stats::ScopePtr scope_; + RingHashLoadBalancerStats stats_; + + static const uint64_t DefaultMinRingSize = 1024; + static const uint64_t DefaultMaxRingSize = 1024 * 1024 * 8; + const uint64_t min_ring_size_; + const uint64_t max_ring_size_; + const HashFunction hash_function_; }; } // namespace Upstream diff --git a/source/common/upstream/static_cluster.cc b/source/common/upstream/static_cluster.cc new file mode 100644 index 0000000000000..c5586e4ee8e08 --- /dev/null +++ b/source/common/upstream/static_cluster.cc @@ -0,0 +1,67 @@ +#include "common/upstream/static_cluster.h" + +namespace Envoy { +namespace Upstream { + +StaticClusterImpl::StaticClusterImpl( + const envoy::api::v2::Cluster& cluster, Runtime::Loader& runtime, + Server::Configuration::TransportSocketFactoryContext& factory_context, + Stats::ScopePtr&& stats_scope, bool added_via_api) + : ClusterImplBase(cluster, runtime, factory_context, std::move(stats_scope), added_via_api), + priority_state_manager_( + new PriorityStateManager(*this, factory_context.localInfo(), nullptr)) { + // TODO(dio): Use by-reference when cluster.hosts() is removed. + const envoy::api::v2::ClusterLoadAssignment cluster_load_assignment( + cluster.has_load_assignment() ? cluster.load_assignment() + : Config::Utility::translateClusterHosts(cluster.hosts())); + + overprovisioning_factor_ = PROTOBUF_GET_WRAPPED_OR_DEFAULT( + cluster_load_assignment.policy(), overprovisioning_factor, kDefaultOverProvisioningFactor); + + for (const auto& locality_lb_endpoint : cluster_load_assignment.endpoints()) { + priority_state_manager_->initializePriorityFor(locality_lb_endpoint); + for (const auto& lb_endpoint : locality_lb_endpoint.lb_endpoints()) { + priority_state_manager_->registerHostForPriority( + "", resolveProtoAddress(lb_endpoint.endpoint().address()), locality_lb_endpoint, + lb_endpoint); + } + } +} + +void StaticClusterImpl::startPreInit() { + // At this point see if we have a health checker. If so, mark all the hosts unhealthy and + // then fire update callbacks to start the health checking process. + const auto& health_checker_flag = + health_checker_ != nullptr + ? absl::optional(Host::HealthFlag::FAILED_ACTIVE_HC) + : absl::nullopt; + + auto& priority_state = priority_state_manager_->priorityState(); + for (size_t i = 0; i < priority_state.size(); ++i) { + if (priority_state[i].first == nullptr) { + priority_state[i].first = std::make_unique(); + } + priority_state_manager_->updateClusterPrioritySet( + i, std::move(priority_state[i].first), absl::nullopt, absl::nullopt, health_checker_flag, + overprovisioning_factor_); + } + priority_state_manager_.reset(); + + onPreInitComplete(); +} + +ClusterImplBaseSharedPtr StaticClusterFactory::createClusterImpl( + const envoy::api::v2::Cluster& cluster, ClusterFactoryContext& context, + Server::Configuration::TransportSocketFactoryContext& socket_factory_context, + Stats::ScopePtr&& stats_scope) { + return std::make_unique(cluster, context.runtime(), socket_factory_context, + std::move(stats_scope), context.addedViaApi()); +} + +/** + * Static registration for the static cluster factory. @see RegisterFactory. + */ +REGISTER_FACTORY(StaticClusterFactory, ClusterFactory); + +} // namespace Upstream +} // namespace Envoy diff --git a/source/common/upstream/static_cluster.h b/source/common/upstream/static_cluster.h new file mode 100644 index 0000000000000..984fd5fd7c948 --- /dev/null +++ b/source/common/upstream/static_cluster.h @@ -0,0 +1,46 @@ +#pragma once + +#include "common/upstream/cluster_factory_impl.h" +#include "common/upstream/upstream_impl.h" + +namespace Envoy { +namespace Upstream { + +/** + * Implementation of Upstream::Cluster for static clusters (clusters that have a fixed number of + * hosts with resolved IP addresses). + */ +class StaticClusterImpl : public ClusterImplBase { +public: + StaticClusterImpl(const envoy::api::v2::Cluster& cluster, Runtime::Loader& runtime, + Server::Configuration::TransportSocketFactoryContext& factory_context, + Stats::ScopePtr&& stats_scope, bool added_via_api); + + // Upstream::Cluster + InitializePhase initializePhase() const override { return InitializePhase::Primary; } + +private: + // ClusterImplBase + void startPreInit() override; + + PriorityStateManagerPtr priority_state_manager_; + uint32_t overprovisioning_factor_; +}; + +/** + * Factory for StaticClusterImpl cluster. + */ +class StaticClusterFactory : public ClusterFactoryImplBase { +public: + StaticClusterFactory() + : ClusterFactoryImplBase(Extensions::Clusters::ClusterTypes::get().Static) {} + +private: + ClusterImplBaseSharedPtr + createClusterImpl(const envoy::api::v2::Cluster& cluster, ClusterFactoryContext& context, + Server::Configuration::TransportSocketFactoryContext& socket_factory_context, + Stats::ScopePtr&& stats_scope) override; +}; + +} // namespace Upstream +} // namespace Envoy diff --git a/source/common/upstream/strict_dns_cluster.cc b/source/common/upstream/strict_dns_cluster.cc new file mode 100644 index 0000000000000..8914eb6553583 --- /dev/null +++ b/source/common/upstream/strict_dns_cluster.cc @@ -0,0 +1,154 @@ +#include "common/upstream/strict_dns_cluster.h" + +namespace Envoy { +namespace Upstream { + +StrictDnsClusterImpl::StrictDnsClusterImpl( + const envoy::api::v2::Cluster& cluster, Runtime::Loader& runtime, + Network::DnsResolverSharedPtr dns_resolver, + Server::Configuration::TransportSocketFactoryContext& factory_context, + Stats::ScopePtr&& stats_scope, bool added_via_api) + : BaseDynamicClusterImpl(cluster, runtime, factory_context, std::move(stats_scope), + added_via_api), + local_info_(factory_context.localInfo()), dns_resolver_(dns_resolver), + dns_refresh_rate_ms_( + std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(cluster, dns_refresh_rate, 5000))) { + std::list resolve_targets; + const envoy::api::v2::ClusterLoadAssignment load_assignment( + cluster.has_load_assignment() ? cluster.load_assignment() + : Config::Utility::translateClusterHosts(cluster.hosts())); + const auto& locality_lb_endpoints = load_assignment.endpoints(); + for (const auto& locality_lb_endpoint : locality_lb_endpoints) { + for (const auto& lb_endpoint : locality_lb_endpoint.lb_endpoints()) { + const auto& socket_address = lb_endpoint.endpoint().address().socket_address(); + if (!socket_address.resolver_name().empty()) { + throw EnvoyException("STRICT_DNS clusters must NOT have a custom resolver name set"); + } + + const std::string& url = + fmt::format("tcp://{}:{}", socket_address.address(), socket_address.port_value()); + resolve_targets.emplace_back(new ResolveTarget(*this, factory_context.dispatcher(), url, + locality_lb_endpoint, lb_endpoint)); + } + } + resolve_targets_ = std::move(resolve_targets); + dns_lookup_family_ = getDnsLookupFamilyFromCluster(cluster); + + overprovisioning_factor_ = PROTOBUF_GET_WRAPPED_OR_DEFAULT( + load_assignment.policy(), overprovisioning_factor, kDefaultOverProvisioningFactor); +} + +void StrictDnsClusterImpl::startPreInit() { + for (const ResolveTargetPtr& target : resolve_targets_) { + target->startResolve(); + } +} + +void StrictDnsClusterImpl::updateAllHosts(const HostVector& hosts_added, + const HostVector& hosts_removed, + uint32_t current_priority) { + PriorityStateManager priority_state_manager(*this, local_info_, nullptr); + // At this point we know that we are different so make a new host list and notify. + // + // TODO(dio): The uniqueness of a host address resolved in STRICT_DNS cluster per priority is not + // guaranteed. Need a clear agreement on the behavior here, whether it is allowable to have + // duplicated hosts inside a priority. And if we want to enforce this behavior, it should be done + // inside the priority state manager. + for (const ResolveTargetPtr& target : resolve_targets_) { + priority_state_manager.initializePriorityFor(target->locality_lb_endpoint_); + for (const HostSharedPtr& host : target->hosts_) { + if (target->locality_lb_endpoint_.priority() == current_priority) { + priority_state_manager.registerHostForPriority(host, target->locality_lb_endpoint_); + } + } + } + + // TODO(dio): Add assertion in here. + priority_state_manager.updateClusterPrioritySet( + current_priority, std::move(priority_state_manager.priorityState()[current_priority].first), + hosts_added, hosts_removed, absl::nullopt, overprovisioning_factor_); +} + +StrictDnsClusterImpl::ResolveTarget::ResolveTarget( + StrictDnsClusterImpl& parent, Event::Dispatcher& dispatcher, const std::string& url, + const envoy::api::v2::endpoint::LocalityLbEndpoints& locality_lb_endpoint, + const envoy::api::v2::endpoint::LbEndpoint& lb_endpoint) + : parent_(parent), dns_address_(Network::Utility::hostFromTcpUrl(url)), + port_(Network::Utility::portFromTcpUrl(url)), + resolve_timer_(dispatcher.createTimer([this]() -> void { startResolve(); })), + locality_lb_endpoint_(locality_lb_endpoint), lb_endpoint_(lb_endpoint) {} + +StrictDnsClusterImpl::ResolveTarget::~ResolveTarget() { + if (active_query_) { + active_query_->cancel(); + } +} + +void StrictDnsClusterImpl::ResolveTarget::startResolve() { + ENVOY_LOG(trace, "starting async DNS resolution for {}", dns_address_); + parent_.info_->stats().update_attempt_.inc(); + + active_query_ = parent_.dns_resolver_->resolve( + dns_address_, parent_.dns_lookup_family_, + [this](const std::list&& address_list) -> void { + active_query_ = nullptr; + ENVOY_LOG(trace, "async DNS resolution complete for {}", dns_address_); + parent_.info_->stats().update_success_.inc(); + + std::unordered_map updated_hosts; + HostVector new_hosts; + for (const Network::Address::InstanceConstSharedPtr& address : address_list) { + // TODO(mattklein123): Currently the DNS interface does not consider port. We need to + // make a new address that has port in it. We need to both support IPv6 as well as + // potentially move port handling into the DNS interface itself, which would work better + // for SRV. + ASSERT(address != nullptr); + new_hosts.emplace_back(new HostImpl( + parent_.info_, dns_address_, Network::Utility::getAddressWithPort(*address, port_), + lb_endpoint_.metadata(), lb_endpoint_.load_balancing_weight().value(), + locality_lb_endpoint_.locality(), lb_endpoint_.endpoint().health_check_config(), + locality_lb_endpoint_.priority(), lb_endpoint_.health_status())); + } + + HostVector hosts_added; + HostVector hosts_removed; + if (parent_.updateDynamicHostList(new_hosts, hosts_, hosts_added, hosts_removed, + updated_hosts, all_hosts_)) { + ENVOY_LOG(debug, "DNS hosts have changed for {}", dns_address_); + ASSERT(std::all_of(hosts_.begin(), hosts_.end(), [&](const auto& host) { + return host->priority() == locality_lb_endpoint_.priority(); + })); + parent_.updateAllHosts(hosts_added, hosts_removed, locality_lb_endpoint_.priority()); + } else { + parent_.info_->stats().update_no_rebuild_.inc(); + } + + all_hosts_ = std::move(updated_hosts); + + // If there is an initialize callback, fire it now. Note that if the cluster refers to + // multiple DNS names, this will return initialized after a single DNS resolution + // completes. This is not perfect but is easier to code and unclear if the extra + // complexity is needed so will start with this. + parent_.onPreInitComplete(); + resolve_timer_->enableTimer(parent_.dns_refresh_rate_ms_); + }); +} + +ClusterImplBaseSharedPtr StrictDnsClusterFactory::createClusterImpl( + const envoy::api::v2::Cluster& cluster, ClusterFactoryContext& context, + Server::Configuration::TransportSocketFactoryContext& socket_factory_context, + Stats::ScopePtr&& stats_scope) { + auto selected_dns_resolver = selectDnsResolver(cluster, context); + + return std::make_unique(cluster, context.runtime(), selected_dns_resolver, + socket_factory_context, std::move(stats_scope), + context.addedViaApi()); +} + +/** + * Static registration for the strict dns cluster factory. @see RegisterFactory. + */ +REGISTER_FACTORY(StrictDnsClusterFactory, ClusterFactory); + +} // namespace Upstream +} // namespace Envoy diff --git a/source/common/upstream/strict_dns_cluster.h b/source/common/upstream/strict_dns_cluster.h new file mode 100644 index 0000000000000..24496dd5a79f3 --- /dev/null +++ b/source/common/upstream/strict_dns_cluster.h @@ -0,0 +1,75 @@ +#pragma once + +#include "common/upstream/cluster_factory_impl.h" +#include "common/upstream/upstream_impl.h" + +namespace Envoy { +namespace Upstream { + +/** + * Implementation of Upstream::Cluster that does periodic DNS resolution and updates the host + * member set if the DNS members change. + */ +class StrictDnsClusterImpl : public BaseDynamicClusterImpl { +public: + StrictDnsClusterImpl(const envoy::api::v2::Cluster& cluster, Runtime::Loader& runtime, + Network::DnsResolverSharedPtr dns_resolver, + Server::Configuration::TransportSocketFactoryContext& factory_context, + Stats::ScopePtr&& stats_scope, bool added_via_api); + + // Upstream::Cluster + InitializePhase initializePhase() const override { return InitializePhase::Primary; } + +private: + struct ResolveTarget { + ResolveTarget(StrictDnsClusterImpl& parent, Event::Dispatcher& dispatcher, + const std::string& url, + const envoy::api::v2::endpoint::LocalityLbEndpoints& locality_lb_endpoint, + const envoy::api::v2::endpoint::LbEndpoint& lb_endpoint); + ~ResolveTarget(); + void startResolve(); + + StrictDnsClusterImpl& parent_; + Network::ActiveDnsQuery* active_query_{}; + std::string dns_address_; + uint32_t port_; + Event::TimerPtr resolve_timer_; + HostVector hosts_; + const envoy::api::v2::endpoint::LocalityLbEndpoints locality_lb_endpoint_; + const envoy::api::v2::endpoint::LbEndpoint lb_endpoint_; + HostMap all_hosts_; + }; + + typedef std::unique_ptr ResolveTargetPtr; + + void updateAllHosts(const HostVector& hosts_added, const HostVector& hosts_removed, + uint32_t priority); + + // ClusterImplBase + void startPreInit() override; + + const LocalInfo::LocalInfo& local_info_; + Network::DnsResolverSharedPtr dns_resolver_; + std::list resolve_targets_; + const std::chrono::milliseconds dns_refresh_rate_ms_; + Network::DnsLookupFamily dns_lookup_family_; + uint32_t overprovisioning_factor_; +}; + +/** + * Factory for StrictDnsClusterImpl + */ +class StrictDnsClusterFactory : public ClusterFactoryImplBase { +public: + StrictDnsClusterFactory() + : ClusterFactoryImplBase(Extensions::Clusters::ClusterTypes::get().StrictDns) {} + +private: + ClusterImplBaseSharedPtr + createClusterImpl(const envoy::api::v2::Cluster& cluster, ClusterFactoryContext& context, + Server::Configuration::TransportSocketFactoryContext& socket_factory_context, + Stats::ScopePtr&& stats_scope) override; +}; + +} // namespace Upstream +} // namespace Envoy diff --git a/source/common/upstream/subset_lb.cc b/source/common/upstream/subset_lb.cc index f6395881d6725..56e048d265fe7 100644 --- a/source/common/upstream/subset_lb.cc +++ b/source/common/upstream/subset_lb.cc @@ -19,14 +19,14 @@ namespace Upstream { SubsetLoadBalancer::SubsetLoadBalancer( LoadBalancerType lb_type, PrioritySet& priority_set, const PrioritySet* local_priority_set, - ClusterStats& stats, Runtime::Loader& runtime, Runtime::RandomGenerator& random, - const LoadBalancerSubsetInfo& subsets, + ClusterStats& stats, Stats::Scope& scope, Runtime::Loader& runtime, + Runtime::RandomGenerator& random, const LoadBalancerSubsetInfo& subsets, const absl::optional& lb_ring_hash_config, const absl::optional& least_request_config, const envoy::api::v2::Cluster::CommonLbConfig& common_config) : lb_type_(lb_type), lb_ring_hash_config_(lb_ring_hash_config), least_request_config_(least_request_config), common_config_(common_config), stats_(stats), - runtime_(runtime), random_(random), fallback_policy_(subsets.fallbackPolicy()), + scope_(scope), runtime_(runtime), random_(random), fallback_policy_(subsets.fallbackPolicy()), default_subset_metadata_(subsets.defaultSubset().fields().begin(), subsets.defaultSubset().fields().end()), subset_keys_(subsets.subsetKeys()), original_priority_set_(priority_set), @@ -35,6 +35,34 @@ SubsetLoadBalancer::SubsetLoadBalancer( scale_locality_weight_(subsets.scaleLocalityWeight()) { ASSERT(subsets.isEnabled()); + if (fallback_policy_ != envoy::api::v2::Cluster::LbSubsetConfig::NO_FALLBACK) { + HostPredicate predicate; + if (fallback_policy_ == envoy::api::v2::Cluster::LbSubsetConfig::ANY_ENDPOINT) { + predicate = [](const Host&) -> bool { return true; }; + + ENVOY_LOG(debug, "subset lb: creating any-endpoint fallback load balancer"); + } else { + predicate = [this](const Host& host) -> bool { + return hostMatches(default_subset_metadata_, host); + }; + + ENVOY_LOG(debug, "subset lb: creating fallback load balancer for {}", + describeMetadata(default_subset_metadata_)); + } + + fallback_subset_ = std::make_unique(); + fallback_subset_->priority_subset_ = std::make_unique( + *this, predicate, locality_weight_aware_, scale_locality_weight_); + } + + if (subsets.panicModeAny()) { + HostPredicate predicate = [](const Host&) -> bool { return true; }; + + panic_mode_subset_ = std::make_unique(); + panic_mode_subset_->priority_subset_ = std::make_unique( + *this, predicate, locality_weight_aware_, scale_locality_weight_); + } + // Create filtered default subset (if necessary) and other subsets based on current hosts. refreshSubsets(); @@ -96,8 +124,21 @@ HostConstSharedPtr SubsetLoadBalancer::chooseHost(LoadBalancerContext* context) return nullptr; } - stats_.lb_subsets_fallback_.inc(); - return fallback_subset_->priority_subset_->lb_->chooseHost(context); + HostConstSharedPtr host = fallback_subset_->priority_subset_->lb_->chooseHost(context); + if (host != nullptr) { + stats_.lb_subsets_fallback_.inc(); + return host; + } + + if (panic_mode_subset_ != nullptr) { + HostConstSharedPtr host = panic_mode_subset_->priority_subset_->lb_->chooseHost(context); + if (host != nullptr) { + stats_.lb_subsets_fallback_panic_.inc(); + return host; + } + } + + return nullptr; } // Find a host from the subsets. Sets host_chosen to false and returns nullptr if the context has @@ -165,34 +206,18 @@ SubsetLoadBalancer::LbSubsetEntryPtr SubsetLoadBalancer::findSubset( void SubsetLoadBalancer::updateFallbackSubset(uint32_t priority, const HostVector& hosts_added, const HostVector& hosts_removed) { - if (fallback_policy_ == envoy::api::v2::Cluster::LbSubsetConfig::NO_FALLBACK) { + if (fallback_subset_ == nullptr) { ENVOY_LOG(debug, "subset lb: fallback load balancer disabled"); return; } - if (fallback_subset_ == nullptr) { - // First update: create the default host subset. - HostPredicate predicate; - if (fallback_policy_ == envoy::api::v2::Cluster::LbSubsetConfig::ANY_ENDPOINT) { - predicate = [](const Host&) -> bool { return true; }; - - ENVOY_LOG(debug, "subset lb: creating any-endpoint fallback load balancer"); - } else { - predicate = std::bind(&SubsetLoadBalancer::hostMatches, this, default_subset_metadata_, - std::placeholders::_1); - - ENVOY_LOG(debug, "subset lb: creating fallback load balancer for {}", - describeMetadata(default_subset_metadata_)); - } + // Add/remove hosts. + fallback_subset_->priority_subset_->update(priority, hosts_added, hosts_removed); - fallback_subset_.reset(new LbSubsetEntry()); - fallback_subset_->priority_subset_.reset( - new PrioritySubsetImpl(*this, predicate, locality_weight_aware_, scale_locality_weight_)); - return; + // Same thing for the panic mode subset. + if (panic_mode_subset_ != nullptr) { + panic_mode_subset_->priority_subset_->update(priority, hosts_added, hosts_removed); } - - // Subsequent updates: add/remove hosts. - fallback_subset_->priority_subset_->update(priority, hosts_added, hosts_removed); } // Iterates over the added and removed hosts, looking up an LbSubsetEntryPtr for each. For every @@ -228,9 +253,9 @@ void SubsetLoadBalancer::processSubsets( if (entry->initialized()) { update_cb(entry); } else { - HostPredicate predicate = - std::bind(&SubsetLoadBalancer::hostMatches, this, kvs, std::placeholders::_1); - + HostPredicate predicate = [this, kvs](const Host& host) -> bool { + return hostMatches(kvs, host); + }; new_cb(entry, predicate, kvs, adding_hosts); } } @@ -256,35 +281,35 @@ void SubsetLoadBalancer::update(uint32_t priority, const HostVector& hosts_added const HostVector& hosts_removed) { updateFallbackSubset(priority, hosts_added, hosts_removed); - processSubsets(hosts_added, hosts_removed, - [&](LbSubsetEntryPtr entry) { - const bool active_before = entry->active(); - entry->priority_subset_->update(priority, hosts_added, hosts_removed); - - if (active_before && !entry->active()) { - stats_.lb_subsets_active_.dec(); - stats_.lb_subsets_removed_.inc(); - } else if (!active_before && entry->active()) { - stats_.lb_subsets_active_.inc(); - stats_.lb_subsets_created_.inc(); - } - }, - [&](LbSubsetEntryPtr entry, HostPredicate predicate, const SubsetMetadata& kvs, - bool adding_host) { - UNREFERENCED_PARAMETER(kvs); - if (adding_host) { - ENVOY_LOG(debug, "subset lb: creating load balancer for {}", - describeMetadata(kvs)); - - // Initialize new entry with hosts and update stats. (An uninitialized entry - // with only removed hosts is a degenerate case and we leave the entry - // uninitialized.) - entry->priority_subset_.reset(new PrioritySubsetImpl( - *this, predicate, locality_weight_aware_, scale_locality_weight_)); - stats_.lb_subsets_active_.inc(); - stats_.lb_subsets_created_.inc(); - } - }); + processSubsets( + hosts_added, hosts_removed, + [&](LbSubsetEntryPtr entry) { + const bool active_before = entry->active(); + entry->priority_subset_->update(priority, hosts_added, hosts_removed); + + if (active_before && !entry->active()) { + stats_.lb_subsets_active_.dec(); + stats_.lb_subsets_removed_.inc(); + } else if (!active_before && entry->active()) { + stats_.lb_subsets_active_.inc(); + stats_.lb_subsets_created_.inc(); + } + }, + [&](LbSubsetEntryPtr entry, HostPredicate predicate, const SubsetMetadata& kvs, + bool adding_host) { + UNREFERENCED_PARAMETER(kvs); + if (adding_host) { + ENVOY_LOG(debug, "subset lb: creating load balancer for {}", describeMetadata(kvs)); + + // Initialize new entry with hosts and update stats. (An uninitialized entry + // with only removed hosts is a degenerate case and we leave the entry + // uninitialized.) + entry->priority_subset_.reset(new PrioritySubsetImpl( + *this, predicate, locality_weight_aware_, scale_locality_weight_)); + stats_.lb_subsets_active_.inc(); + stats_.lb_subsets_created_.inc(); + } + }); } bool SubsetLoadBalancer::hostMatches(const SubsetMetadata& kvs, const Host& host) { @@ -293,7 +318,7 @@ bool SubsetLoadBalancer::hostMatches(const SubsetMetadata& kvs, const Host& host host_metadata.filter_metadata().find(Config::MetadataFilters::get().ENVOY_LB); if (filter_it == host_metadata.filter_metadata().end()) { - return kvs.size() == 0; + return kvs.empty(); } const ProtobufWkt::Struct& data_struct = filter_it->second; @@ -458,7 +483,7 @@ SubsetLoadBalancer::PrioritySubsetImpl::PrioritySubsetImpl(const SubsetLoadBalan // We should make the subset LB thread aware since the calculations are costly, and then we // can also use a thread aware sub-LB properly. The following works fine but is not optimal. thread_aware_lb_ = std::make_unique( - *this, subset_lb.stats_, subset_lb.runtime_, subset_lb.random_, + *this, subset_lb.stats_, subset_lb.scope_, subset_lb.runtime_, subset_lb.random_, subset_lb.lb_ring_hash_config_, subset_lb.common_config_); thread_aware_lb_->initialize(); lb_ = thread_aware_lb_->factory()->create(); @@ -469,7 +494,8 @@ SubsetLoadBalancer::PrioritySubsetImpl::PrioritySubsetImpl(const SubsetLoadBalan // We should make the subset LB thread aware since the calculations are costly, and then we // can also use a thread aware sub-LB properly. The following works fine but is not optimal. thread_aware_lb_ = std::make_unique( - *this, subset_lb.stats_, subset_lb.runtime_, subset_lb.random_, subset_lb.common_config_); + *this, subset_lb.stats_, subset_lb.scope_, subset_lb.runtime_, subset_lb.random_, + subset_lb.common_config_); thread_aware_lb_->initialize(); lb_ = thread_aware_lb_->factory()->create(); break; @@ -487,75 +513,96 @@ SubsetLoadBalancer::PrioritySubsetImpl::PrioritySubsetImpl(const SubsetLoadBalan void SubsetLoadBalancer::HostSubsetImpl::update(const HostVector& hosts_added, const HostVector& hosts_removed, std::function predicate) { - std::unordered_set predicate_added; - - HostVector filtered_added; - for (const auto host : hosts_added) { + // We cache the result of matching the host against the predicate. This ensures + // that we maintain a consistent view of the metadata and saves on computation + // since metadata lookups can be expensive. + // + // We use an unordered_set because this can potentially be in the tens of thousands. + std::unordered_set matching_hosts; + + auto cached_predicate = [&matching_hosts](const auto& host) { + return matching_hosts.count(&host) == 1; + }; + + // TODO(snowp): If we had a unhealthyHosts() function we could avoid potentially traversing + // the list of hosts twice. + auto hosts = std::make_shared(); + hosts->reserve(original_host_set_.hosts().size()); + for (const auto& host : original_host_set_.hosts()) { if (predicate(*host)) { - predicate_added.insert(host); - filtered_added.emplace_back(host); + matching_hosts.insert(host.get()); + hosts->emplace_back(host); } } - HostVector filtered_removed; - for (const auto host : hosts_removed) { - if (predicate(*host)) { - filtered_removed.emplace_back(host); + auto healthy_hosts = std::make_shared(); + healthy_hosts->get().reserve(original_host_set_.healthyHosts().size()); + for (const auto& host : original_host_set_.healthyHosts()) { + if (cached_predicate(*host)) { + healthy_hosts->get().emplace_back(host); } } - HostVectorSharedPtr hosts(new HostVector()); - HostVectorSharedPtr healthy_hosts(new HostVector()); - HostVectorSharedPtr degraded_hosts(new HostVector()); + auto degraded_hosts = std::make_shared(); + degraded_hosts->get().reserve(original_host_set_.degradedHosts().size()); + for (const auto& host : original_host_set_.degradedHosts()) { + if (cached_predicate(*host)) { + degraded_hosts->get().emplace_back(host); + } + } - // It's possible that hosts_added == original_host_set_.hosts(), e.g.: when - // calling refreshSubsets() if only metadata change. If so, we can avoid the - // predicate() call. - for (const auto host : original_host_set_.hosts()) { - bool host_seen = predicate_added.count(host) == 1; - if (host_seen || predicate(*host)) { - hosts->emplace_back(host); - switch (host->health()) { - case Host::Health::Healthy: - healthy_hosts->emplace_back(host); - break; - case Host::Health::Degraded: - degraded_hosts->emplace_back(host); - break; - case Host::Health::Unhealthy: - break; - } + auto excluded_hosts = std::make_shared(); + excluded_hosts->get().reserve(original_host_set_.excludedHosts().size()); + for (const auto& host : original_host_set_.excludedHosts()) { + if (cached_predicate(*host)) { + excluded_hosts->get().emplace_back(host); } } - // Calling predicate() is expensive since it involves metadata lookups; so we - // avoid it in the 2nd call to filter() by using the result from the first call - // to filter() as the starting point. - // - // Also, if we only have one locality we can avoid the first call to filter() by + // If we only have one locality we can avoid the first call to filter() by // just creating a new HostsPerLocality from the list of all hosts. // // TODO(rgs1): merge these two filter() calls in one loop. HostsPerLocalityConstSharedPtr hosts_per_locality; if (original_host_set_.hostsPerLocality().get().size() == 1) { - hosts_per_locality.reset( - new HostsPerLocalityImpl(*hosts, original_host_set_.hostsPerLocality().hasLocalLocality())); + hosts_per_locality = std::make_shared( + *hosts, original_host_set_.hostsPerLocality().hasLocalLocality()); } else { - hosts_per_locality = original_host_set_.hostsPerLocality().filter(predicate); + hosts_per_locality = original_host_set_.hostsPerLocality().filter({cached_predicate})[0]; } - HostsPerLocalityConstSharedPtr healthy_hosts_per_locality = hosts_per_locality->filter( - [](const Host& host) { return host.health() == Host::Health::Healthy; }); - HostsPerLocalityConstSharedPtr degraded_hosts_per_locality = hosts_per_locality->filter( - [](const Host& host) { return host.health() == Host::Health::Degraded; }); + HostsPerLocalityConstSharedPtr healthy_hosts_per_locality = + original_host_set_.healthyHostsPerLocality().filter({cached_predicate})[0]; + HostsPerLocalityConstSharedPtr degraded_hosts_per_locality = + original_host_set_.degradedHostsPerLocality().filter({cached_predicate})[0]; + auto excluded_hosts_per_locality = + original_host_set_.excludedHostsPerLocality().filter({cached_predicate})[0]; + + // We can use the cached predicate here, since we trust that the hosts in hosts_added were also + // present in the list of all hosts. + HostVector filtered_added; + for (const auto& host : hosts_added) { + if (cached_predicate(*host)) { + filtered_added.emplace_back(host); + } + } + + // Since the removed hosts would not be present in the list of all hosts, we need to evaluate the + // predicate directly for these hosts. + HostVector filtered_removed; + for (const auto& host : hosts_removed) { + if (predicate(*host)) { + filtered_removed.emplace_back(host); + } + } - // TODO(snowp): Use partitionHosts here. HostSetImpl::updateHosts(HostSetImpl::updateHostsParams( hosts, hosts_per_locality, healthy_hosts, healthy_hosts_per_locality, - degraded_hosts, degraded_hosts_per_locality), + degraded_hosts, degraded_hosts_per_locality, excluded_hosts, + excluded_hosts_per_locality), determineLocalityWeights(*hosts_per_locality), filtered_added, - filtered_removed); + filtered_removed, absl::nullopt); } LocalityWeightsConstSharedPtr SubsetLoadBalancer::HostSubsetImpl::determineLocalityWeights( @@ -563,6 +610,11 @@ LocalityWeightsConstSharedPtr SubsetLoadBalancer::HostSubsetImpl::determineLocal if (locality_weight_aware_) { if (scale_locality_weight_) { const auto& original_hosts_per_locality = original_host_set_.hostsPerLocality().get(); + // E.g. we can be here in static clusters with actual locality weighting before pre-init + // completes. + if (!original_host_set_.localityWeights()) { + return {}; + } const auto& original_weights = *original_host_set_.localityWeights(); auto scaled_locality_weights = std::make_shared(original_weights.size()); @@ -603,10 +655,10 @@ HostSetImplPtr SubsetLoadBalancer::PrioritySubsetImpl::createHostSet( void SubsetLoadBalancer::PrioritySubsetImpl::update(uint32_t priority, const HostVector& hosts_added, const HostVector& hosts_removed) { - HostSubsetImpl* host_subset = getOrCreateHostSubset(priority); - host_subset->update(hosts_added, hosts_removed, predicate_); + const auto& host_subset = getOrCreateHostSet(priority); + updateSubset(priority, hosts_added, hosts_removed, predicate_); - if (host_subset->hosts().empty() != empty_) { + if (host_subset.hosts().empty() != empty_) { empty_ = true; for (auto& host_set : hostSetsPerPriority()) { empty_ &= host_set->hosts().empty(); diff --git a/source/common/upstream/subset_lb.h b/source/common/upstream/subset_lb.h index 4e89de35a8d62..3980cce1330a1 100644 --- a/source/common/upstream/subset_lb.h +++ b/source/common/upstream/subset_lb.h @@ -7,6 +7,7 @@ #include #include "envoy/runtime/runtime.h" +#include "envoy/stats/scope.h" #include "envoy/upstream/load_balancer.h" #include "common/common/macros.h" @@ -23,8 +24,8 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::Loggable& lb_ring_hash_config, const absl::optional& least_request_config, const envoy::api::v2::Cluster::CommonLbConfig& common_config); @@ -69,16 +70,24 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::Loggable(&getOrCreateHostSet(priority)); + const HostSubsetImpl* getOrCreateHostSubset(uint32_t priority) { + return reinterpret_cast(&getOrCreateHostSet(priority)); } void triggerCallbacks() { for (size_t i = 0; i < hostSetsPerPriority().size(); ++i) { - getOrCreateHostSubset(i)->triggerCallbacks(); + runReferenceUpdateCallbacks(i, {}, {}); } } + void updateSubset(uint32_t priority, const HostVector& hosts_added, + const HostVector& hosts_removed, HostPredicate predicate) { + reinterpret_cast(host_sets_[priority].get()) + ->update(hosts_added, hosts_removed, predicate); + + runUpdateCallbacks(hosts_added, hosts_removed); + } + // Thread aware LB if applicable. ThreadAwareLoadBalancerPtr thread_aware_lb_; // Current active LB. @@ -153,6 +162,7 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::Loggable least_request_config_; const envoy::api::v2::Cluster::CommonLbConfig common_config_; ClusterStats& stats_; + Stats::Scope& scope_; Runtime::Loader& runtime_; Runtime::RandomGenerator& random_; @@ -165,6 +175,7 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::Loggableweight(); + } + + for (const auto& host : hosts) { + const double weight = host->weight() * normalized_locality_weight / sum; + normalized_host_weights.push_back({host, weight}); + min_normalized_weight = std::min(min_normalized_weight, weight); + max_normalized_weight = std::max(max_normalized_weight, weight); + } +} + +void normalizeLocalityWeights(const HostsPerLocality& hosts_per_locality, + const LocalityWeights& locality_weights, + NormalizedHostWeightVector& normalized_host_weights, + double& min_normalized_weight, double& max_normalized_weight) { + ASSERT(locality_weights.size() == hosts_per_locality.get().size()); + + uint32_t sum = 0; + for (const auto weight : locality_weights) { + sum += weight; + } + + // Locality weights (unlike host weights) may be 0. If _all_ locality weights were 0, bail out. + if (sum == 0) { + return; + } + + // Compute normalized weights for all hosts in each locality. If a locality was assigned zero + // weight, all hosts in that locality will be skipped. + for (LocalityWeights::size_type i = 0; i < locality_weights.size(); ++i) { + if (locality_weights[i] != 0) { + const HostVector& hosts = hosts_per_locality.get()[i]; + const double normalized_locality_weight = static_cast(locality_weights[i]) / sum; + normalizeHostWeights(hosts, normalized_locality_weight, normalized_host_weights, + min_normalized_weight, max_normalized_weight); + } + } +} + +void normalizeWeights(const HostSet& host_set, bool in_panic, + NormalizedHostWeightVector& normalized_host_weights, + double& min_normalized_weight, double& max_normalized_weight) { + if (host_set.localityWeights() == nullptr || host_set.localityWeights()->empty()) { + // If we're not dealing with locality weights, just normalize weights for the flat set of hosts. + const auto& hosts = in_panic ? host_set.hosts() : host_set.healthyHosts(); + normalizeHostWeights(hosts, 1.0, normalized_host_weights, min_normalized_weight, + max_normalized_weight); + } else { + // Otherwise, normalize weights across all localities. + const auto& hosts_per_locality = + in_panic ? host_set.hostsPerLocality() : host_set.healthyHostsPerLocality(); + normalizeLocalityWeights(hosts_per_locality, *(host_set.localityWeights()), + normalized_host_weights, min_normalized_weight, max_normalized_weight); + } +} + +} // namespace + void ThreadAwareLoadBalancerBase::initialize() { // TODO(mattklein123): In the future, once initialized and the initial LB is built, it would be // better to use a background thread for computing LB updates. This has the substantial benefit @@ -33,8 +100,15 @@ void ThreadAwareLoadBalancerBase::refresh() { // Copy panic flag from LoadBalancerBase. It is calculated when there is a change // in hosts set or hosts' health. per_priority_state->global_panic_ = per_priority_panic_[priority]; + + // Normalize host and locality weights such that the sum of all normalized weights is 1. + NormalizedHostWeightVector normalized_host_weights; + double min_normalized_weight = 1.0; + double max_normalized_weight = 0.0; + normalizeWeights(*host_set, per_priority_state->global_panic_, normalized_host_weights, + min_normalized_weight, max_normalized_weight); per_priority_state->current_lb_ = - createLoadBalancer(*host_set, per_priority_state->global_panic_); + createLoadBalancer(normalized_host_weights, min_normalized_weight, max_normalized_weight); } { @@ -81,7 +155,7 @@ LoadBalancerPtr ThreadAwareLoadBalancerBase::LoadBalancerFactoryImpl::create() { lb->degraded_per_priority_load_ = degraded_per_priority_load_; lb->per_priority_state_ = per_priority_state_; - return std::move(lb); + return lb; } } // namespace Upstream diff --git a/source/common/upstream/thread_aware_lb_impl.h b/source/common/upstream/thread_aware_lb_impl.h index 7630fc1d45ae5..d8e7de1be87fd 100644 --- a/source/common/upstream/thread_aware_lb_impl.h +++ b/source/common/upstream/thread_aware_lb_impl.h @@ -7,6 +7,8 @@ namespace Envoy { namespace Upstream { +typedef std::vector> NormalizedHostWeightVector; + class ThreadAwareLoadBalancerBase : public LoadBalancerBase, public ThreadAwareLoadBalancer { public: /** @@ -77,8 +79,9 @@ class ThreadAwareLoadBalancerBase : public LoadBalancerBase, public ThreadAwareL std::shared_ptr degraded_per_priority_load_ GUARDED_BY(mutex_); }; - virtual HashingLoadBalancerSharedPtr createLoadBalancer(const HostSet& host_set, - bool in_panic) PURE; + virtual HashingLoadBalancerSharedPtr + createLoadBalancer(const NormalizedHostWeightVector& normalized_host_weights, + double min_normalized_weight, double max_normalized_weight) PURE; void refresh(); std::shared_ptr factory_; diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index e232be55eb270..518218c27731f 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -2,6 +2,7 @@ #include #include +#include #include #include #include @@ -101,7 +102,7 @@ parseClusterSocketOptions(const envoy::api::v2::Cluster& config, // Cluster socket_options trump cluster manager wide. if (bind_config.socket_options().size() + config.upstream_bind_config().socket_options().size() > 0) { - auto socket_options = config.upstream_bind_config().socket_options().size() > 0 + auto socket_options = !config.upstream_bind_config().socket_options().empty() ? config.upstream_bind_config().socket_options() : bind_config.socket_options(); Network::Socket::appendOptions( @@ -194,6 +195,23 @@ bool updateHealthFlag(const Host& updated_host, Host& existing_host, Host::Healt return false; } +// Converts a set of hosts into a HostVector, excluding certain hosts. +// @param hosts hosts to convert +// @param excluded_hosts hosts to exclude from the resulting vector. +HostVector filterHosts(const std::unordered_set& hosts, + const std::unordered_set& excluded_hosts) { + HostVector net_hosts; + net_hosts.reserve(hosts.size()); + + for (const auto& host : hosts) { + if (excluded_hosts.find(host) == excluded_hosts.end()) { + net_hosts.emplace_back(host); + } + } + + return net_hosts; +} + } // namespace Host::CreateConnectionData HostImpl::createConnection( @@ -256,26 +274,42 @@ HostImpl::createConnection(Event::Dispatcher& dispatcher, const ClusterInfo& clu void HostImpl::weight(uint32_t new_weight) { weight_ = std::max(1U, std::min(128U, new_weight)); } -HostsPerLocalityConstSharedPtr -HostsPerLocalityImpl::filter(std::function predicate) const { - auto* filtered_clone = new HostsPerLocalityImpl(); - HostsPerLocalityConstSharedPtr shared_filtered_clone{filtered_clone}; +std::vector HostsPerLocalityImpl::filter( + const std::vector>& predicates) const { + // We keep two lists: one for being able to mutate the clone and one for returning to the caller. + // Creating them both at the start avoids iterating over the mutable values at the end to convert + // them to a const pointer. + std::vector> mutable_clones; + std::vector filtered_clones; + + for (size_t i = 0; i < predicates.size(); ++i) { + mutable_clones.emplace_back(std::make_shared()); + filtered_clones.emplace_back(mutable_clones.back()); + mutable_clones.back()->local_ = local_; + } - filtered_clone->local_ = local_; for (const auto& hosts_locality : hosts_per_locality_) { - HostVector current_locality_hosts; + std::vector current_locality_hosts; + current_locality_hosts.resize(predicates.size()); + + // Since # of hosts >> # of predicates, we iterate over the hosts in the outer loop. for (const auto& host : hosts_locality) { - if (predicate(*host)) { - current_locality_hosts.emplace_back(host); + for (size_t i = 0; i < predicates.size(); ++i) { + if (predicates[i](*host)) { + current_locality_hosts[i].emplace_back(host); + } } } - filtered_clone->hosts_per_locality_.push_back(std::move(current_locality_hosts)); + + for (size_t i = 0; i < predicates.size(); ++i) { + mutable_clones[i]->hosts_per_locality_.push_back(std::move(current_locality_hosts[i])); + } } - return shared_filtered_clone; + return filtered_clones; } -void HostSetImpl::updateHosts(UpdateHostsParams&& update_hosts_params, +void HostSetImpl::updateHosts(PrioritySet::UpdateHostsParams&& update_hosts_params, LocalityWeightsConstSharedPtr locality_weights, const HostVector& hosts_added, const HostVector& hosts_removed, absl::optional overprovisioning_factor) { @@ -286,17 +320,21 @@ void HostSetImpl::updateHosts(UpdateHostsParams&& update_hosts_params, hosts_ = std::move(update_hosts_params.hosts); healthy_hosts_ = std::move(update_hosts_params.healthy_hosts); degraded_hosts_ = std::move(update_hosts_params.degraded_hosts); + excluded_hosts_ = std::move(update_hosts_params.excluded_hosts); hosts_per_locality_ = std::move(update_hosts_params.hosts_per_locality); healthy_hosts_per_locality_ = std::move(update_hosts_params.healthy_hosts_per_locality); degraded_hosts_per_locality_ = std::move(update_hosts_params.degraded_hosts_per_locality); + excluded_hosts_per_locality_ = std::move(update_hosts_params.excluded_hosts_per_locality); locality_weights_ = std::move(locality_weights); - rebuildLocalityScheduler(locality_scheduler_, locality_entries_, *healthy_hosts_per_locality_, - *healthy_hosts_, hosts_per_locality_, locality_weights_, + rebuildLocalityScheduler(healthy_locality_scheduler_, healthy_locality_entries_, + *healthy_hosts_per_locality_, healthy_hosts_->get(), hosts_per_locality_, + excluded_hosts_per_locality_, locality_weights_, overprovisioning_factor_); rebuildLocalityScheduler(degraded_locality_scheduler_, degraded_locality_entries_, - *degraded_hosts_per_locality_, *degraded_hosts_, hosts_per_locality_, - locality_weights_, overprovisioning_factor_); + *degraded_hosts_per_locality_, degraded_hosts_->get(), + hosts_per_locality_, excluded_hosts_per_locality_, locality_weights_, + overprovisioning_factor_); runUpdateCallbacks(hosts_added, hosts_removed); } @@ -306,6 +344,7 @@ void HostSetImpl::rebuildLocalityScheduler( std::vector>& locality_entries, const HostsPerLocality& eligible_hosts_per_locality, const HostVector& eligible_hosts, HostsPerLocalityConstSharedPtr all_hosts_per_locality, + HostsPerLocalityConstSharedPtr excluded_hosts_per_locality, LocalityWeightsConstSharedPtr locality_weights, uint32_t overprovisioning_factor) { // Rebuild the locality scheduler by computing the effective weight of each // locality in this priority. The scheduler is reset by default, and is rebuilt only if we have @@ -328,9 +367,9 @@ void HostSetImpl::rebuildLocalityScheduler( locality_scheduler = std::make_unique>(); locality_entries.clear(); for (uint32_t i = 0; i < all_hosts_per_locality->get().size(); ++i) { - const double effective_weight = - effectiveLocalityWeight(i, eligible_hosts_per_locality, *all_hosts_per_locality, - *locality_weights, overprovisioning_factor); + const double effective_weight = effectiveLocalityWeight( + i, eligible_hosts_per_locality, *excluded_hosts_per_locality, *all_hosts_per_locality, + *locality_weights, overprovisioning_factor); if (effective_weight > 0) { locality_entries.emplace_back(std::make_shared(i, effective_weight)); locality_scheduler->add(effective_weight, locality_entries.back()); @@ -343,78 +382,84 @@ void HostSetImpl::rebuildLocalityScheduler( } } -absl::optional HostSetImpl::chooseLocality() { - if (locality_scheduler_ == nullptr) { +absl::optional HostSetImpl::chooseHealthyLocality() { + return chooseLocality(healthy_locality_scheduler_.get()); +} + +absl::optional HostSetImpl::chooseDegradedLocality() { + return chooseLocality(degraded_locality_scheduler_.get()); +} + +absl::optional +HostSetImpl::chooseLocality(EdfScheduler* locality_scheduler) { + if (locality_scheduler == nullptr) { return {}; } - const std::shared_ptr locality = locality_scheduler_->pick(); + const std::shared_ptr locality = locality_scheduler->pick(); // We don't build a schedule if there are no weighted localities, so we should always succeed. ASSERT(locality != nullptr); // If we picked it before, its weight must have been positive. ASSERT(locality->effective_weight_ > 0); - locality_scheduler_->add(locality->effective_weight_, locality); + locality_scheduler->add(locality->effective_weight_, locality); return locality->index_; } -HostSetImpl::UpdateHostsParams -HostSetImpl::updateHostsParams(HostVectorConstSharedPtr hosts, - HostsPerLocalityConstSharedPtr hosts_per_locality) { - return updateHostsParams(std::move(hosts), std::move(hosts_per_locality), - std::make_shared(), HostsPerLocalityImpl::empty()); -} - -HostSetImpl::UpdateHostsParams -HostSetImpl::updateHostsParams(HostVectorConstSharedPtr hosts, - HostsPerLocalityConstSharedPtr hosts_per_locality, - HostVectorConstSharedPtr healthy_hosts, - HostsPerLocalityConstSharedPtr healthy_hosts_per_locality) { - return updateHostsParams(std::move(hosts), std::move(hosts_per_locality), - std::move(healthy_hosts), std::move(healthy_hosts_per_locality), - std::make_shared(), HostsPerLocalityImpl::empty()); -} - -HostSetImpl::UpdateHostsParams +PrioritySet::UpdateHostsParams HostSetImpl::updateHostsParams(HostVectorConstSharedPtr hosts, HostsPerLocalityConstSharedPtr hosts_per_locality, - HostVectorConstSharedPtr healthy_hosts, + HealthyHostVectorConstSharedPtr healthy_hosts, HostsPerLocalityConstSharedPtr healthy_hosts_per_locality, - HostVectorConstSharedPtr degraded_hosts, - HostsPerLocalityConstSharedPtr degraded_hosts_per_locality) { - return UpdateHostsParams{std::move(hosts), - std::move(healthy_hosts), - std::move(degraded_hosts), - std::move(hosts_per_locality), - std::move(healthy_hosts_per_locality), - std::move(degraded_hosts_per_locality)}; + DegradedHostVectorConstSharedPtr degraded_hosts, + HostsPerLocalityConstSharedPtr degraded_hosts_per_locality, + ExcludedHostVectorConstSharedPtr excluded_hosts, + HostsPerLocalityConstSharedPtr excluded_hosts_per_locality) { + return PrioritySet::UpdateHostsParams{std::move(hosts), + std::move(healthy_hosts), + std::move(degraded_hosts), + std::move(excluded_hosts), + std::move(hosts_per_locality), + std::move(healthy_hosts_per_locality), + std::move(degraded_hosts_per_locality), + std::move(excluded_hosts_per_locality)}; } -HostSetImpl::UpdateHostsParams +PrioritySet::UpdateHostsParams HostSetImpl::updateHostsParams(const HostSet& host_set) { + return updateHostsParams(host_set.hostsPtr(), host_set.hostsPerLocalityPtr(), + host_set.healthyHostsPtr(), host_set.healthyHostsPerLocalityPtr(), + host_set.degradedHostsPtr(), host_set.degradedHostsPerLocalityPtr(), + host_set.excludedHostsPtr(), host_set.excludedHostsPerLocalityPtr()); +} +PrioritySet::UpdateHostsParams HostSetImpl::partitionHosts(HostVectorConstSharedPtr hosts, HostsPerLocalityConstSharedPtr hosts_per_locality) { - auto healthy_hosts = ClusterImplBase::createHostList(*hosts, Host::Health::Healthy); - auto degraded_hosts = ClusterImplBase::createHostList(*hosts, Host::Health::Degraded); - auto healthy_hosts_per_locality = - ClusterImplBase::createHostLists(*hosts_per_locality, Host::Health::Healthy); - auto degraded_hosts_per_locality = - ClusterImplBase::createHostLists(*hosts_per_locality, Host::Health::Degraded); + auto partitioned_hosts = ClusterImplBase::partitionHostList(*hosts); + auto healthy_degraded_excluded_hosts_per_locality = + ClusterImplBase::partitionHostsPerLocality(*hosts_per_locality); return updateHostsParams(std::move(hosts), std::move(hosts_per_locality), - std::move(healthy_hosts), std::move(healthy_hosts_per_locality), - std::move(degraded_hosts), std::move(degraded_hosts_per_locality)); + std::move(std::get<0>(partitioned_hosts)), + std::move(std::get<0>(healthy_degraded_excluded_hosts_per_locality)), + std::move(std::get<1>(partitioned_hosts)), + std::move(std::get<1>(healthy_degraded_excluded_hosts_per_locality)), + std::move(std::get<2>(partitioned_hosts)), + std::move(std::get<2>(healthy_degraded_excluded_hosts_per_locality))); } double HostSetImpl::effectiveLocalityWeight(uint32_t index, const HostsPerLocality& eligible_hosts_per_locality, + const HostsPerLocality& excluded_hosts_per_locality, const HostsPerLocality& all_hosts_per_locality, const LocalityWeights& locality_weights, uint32_t overprovisioning_factor) { - const auto& locality_hosts = all_hosts_per_locality.get()[index]; const auto& locality_eligible_hosts = eligible_hosts_per_locality.get()[index]; - if (locality_hosts.empty()) { + const uint32_t excluded_count = excluded_hosts_per_locality.get().size() > index + ? excluded_hosts_per_locality.get()[index].size() + : 0; + const auto host_count = all_hosts_per_locality.get()[index].size() - excluded_count; + if (host_count == 0) { return 0.0; } - const double locality_availability_ratio = - 1.0 * locality_eligible_hosts.size() / locality_hosts.size(); + const double locality_availability_ratio = 1.0 * locality_eligible_hosts.size() / host_count; const uint32_t weight = locality_weights[index]; // Availability ranges from 0-1.0, and is the ratio of eligible hosts to total hosts, modified by // the overprovisioning factor. @@ -423,14 +468,14 @@ double HostSetImpl::effectiveLocalityWeight(uint32_t index, return weight * effective_locality_availability_ratio; } -HostSet& PrioritySetImpl::getOrCreateHostSet(uint32_t priority, - absl::optional overprovisioning_factor) { +const HostSet& +PrioritySetImpl::getOrCreateHostSet(uint32_t priority, + absl::optional overprovisioning_factor) { if (host_sets_.size() < priority + 1) { for (size_t i = host_sets_.size(); i <= priority; ++i) { HostSetImplPtr host_set = createHostSet(i, overprovisioning_factor); host_set->addPriorityUpdateCb([this](uint32_t priority, const HostVector& hosts_added, const HostVector& hosts_removed) { - runUpdateCallbacks(hosts_added, hosts_removed); runReferenceUpdateCallbacks(priority, hosts_added, hosts_removed); }); host_sets_.push_back(std::move(host_set)); @@ -439,6 +484,54 @@ HostSet& PrioritySetImpl::getOrCreateHostSet(uint32_t priority, return *host_sets_[priority]; } +void PrioritySetImpl::updateHosts(uint32_t priority, UpdateHostsParams&& update_hosts_params, + LocalityWeightsConstSharedPtr locality_weights, + const HostVector& hosts_added, const HostVector& hosts_removed, + absl::optional overprovisioning_factor) { + // Ensure that we have a HostSet for the given priority. + getOrCreateHostSet(priority, overprovisioning_factor); + static_cast(host_sets_[priority].get()) + ->updateHosts(std::move(update_hosts_params), std::move(locality_weights), hosts_added, + hosts_removed, overprovisioning_factor); + + if (!batch_update_) { + runUpdateCallbacks(hosts_added, hosts_removed); + } +} + +void PrioritySetImpl::batchHostUpdate(BatchUpdateCb& callback) { + BatchUpdateScope scope(*this); + + // We wrap the update call with a lambda that tracks all the hosts that have been added/removed. + callback.batchUpdate(scope); + + // Now that all the updates have been complete, we can compute the diff. + HostVector net_hosts_added = filterHosts(scope.all_hosts_added_, scope.all_hosts_removed_); + HostVector net_hosts_removed = filterHosts(scope.all_hosts_removed_, scope.all_hosts_added_); + + runUpdateCallbacks(net_hosts_added, net_hosts_removed); +} + +void PrioritySetImpl::BatchUpdateScope::updateHosts( + uint32_t priority, PrioritySet::UpdateHostsParams&& update_hosts_params, + LocalityWeightsConstSharedPtr locality_weights, const HostVector& hosts_added, + const HostVector& hosts_removed, absl::optional overprovisioning_factor) { + // We assume that each call updates a different priority. + ASSERT(priorities_.find(priority) == priorities_.end()); + priorities_.insert(priority); + + for (const auto& host : hosts_added) { + all_hosts_added_.insert(host); + } + + for (const auto& host : hosts_removed) { + all_hosts_removed_.insert(host); + } + + parent_.updateHosts(priority, std::move(update_hosts_params), locality_weights, hosts_added, + hosts_removed, overprovisioning_factor); +} + ClusterStats ClusterInfoImpl::generateStats(Stats::Scope& scope) { return {ALL_CLUSTER_STATS(POOL_COUNTER(scope), POOL_GAUGE(scope), POOL_HISTOGRAM(scope))}; } @@ -475,8 +568,9 @@ ClusterInfoImpl::ClusterInfoImpl(const envoy::api::v2::Cluster& config, metadata_(config.metadata()), typed_metadata_(config.metadata()), common_lb_config_(config.common_lb_config()), cluster_socket_options_(parseClusterSocketOptions(config, bind_config)), - drain_connections_on_host_removal_(config.drain_connections_on_host_removal()) { - + drain_connections_on_host_removal_(config.drain_connections_on_host_removal()), + warm_hosts_(!config.health_checks().empty() && + common_lb_config_.ignore_new_hosts_until_first_hc()) { switch (config.lb_policy()) { case envoy::api::v2::Cluster::ROUND_ROBIN: lb_type_ = LoadBalancerType::RoundRobin; @@ -504,6 +598,13 @@ ClusterInfoImpl::ClusterInfoImpl(const envoy::api::v2::Cluster& config, NOT_REACHED_GCOVR_EXCL_LINE; } + if (config.lb_subset_config().locality_weight_aware() && + !config.common_lb_config().has_locality_weighted_lb_config()) { + throw EnvoyException(fmt::format( + "Locality weight aware subset LB requires that a locality_weighted_lb_config be set in {}", + name_)); + } + if (config.protocol_selection() == envoy::api::v2::Cluster::USE_CONFIGURED_PROTOCOL) { // Make sure multiple protocol configurations are not present if (config.has_http_protocol_options() && config.has_http2_protocol_options()) { @@ -516,6 +617,12 @@ ClusterInfoImpl::ClusterInfoImpl(const envoy::api::v2::Cluster& config, idle_timeout_ = std::chrono::milliseconds( DurationUtil::durationToMilliseconds(config.common_http_protocol_options().idle_timeout())); } + if (config.has_eds_cluster_config()) { + if (config.type() != envoy::api::v2::Cluster::EDS) { + throw EnvoyException("eds_cluster_config set in a non-EDS cluster"); + } + eds_service_name_ = config.eds_cluster_config().service_name(); + } // TODO(htuch): Remove this temporary workaround when we have // https://github.com/lyft/protoc-gen-validate/issues/97 resolved. This just provides early @@ -533,15 +640,6 @@ ClusterInfoImpl::extensionProtocolOptions(const std::string& name) const { return nullptr; } -namespace { - -Stats::ScopePtr generateStatsScope(const envoy::api::v2::Cluster& config, Stats::Store& stats) { - return stats.createScope(fmt::format( - "cluster.{}.", config.alt_stat_name().empty() ? config.name() : config.alt_stat_name())); -} - -} // namespace - Network::TransportSocketFactoryPtr createTransportSocketFactory( const envoy::api::v2::Cluster& config, Server::Configuration::TransportSocketFactoryContext& factory_context) { @@ -566,98 +664,12 @@ Network::TransportSocketFactoryPtr createTransportSocketFactory( return config_factory.createTransportSocketFactory(*message, factory_context); } -ClusterSharedPtr ClusterImplBase::create( - const envoy::api::v2::Cluster& cluster, ClusterManager& cm, Stats::Store& stats, - ThreadLocal::Instance& tls, Network::DnsResolverSharedPtr dns_resolver, - Ssl::ContextManager& ssl_context_manager, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, - AccessLog::AccessLogManager& log_manager, const LocalInfo::LocalInfo& local_info, - Server::Admin& admin, Singleton::Manager& singleton_manager, - Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api, Api::Api& api) { - std::unique_ptr new_cluster; - - // We make this a shared pointer to deal with the distinct ownership - // scenarios that can exist: in one case, we pass in the "default" - // DNS resolver that is owned by the Server::Instance. In the case - // where 'dns_resolvers' is specified, we have per-cluster DNS - // resolvers that are created here but ownership resides with - // StrictDnsClusterImpl/LogicalDnsCluster. - auto selected_dns_resolver = dns_resolver; - if (!cluster.dns_resolvers().empty()) { - const auto& resolver_addrs = cluster.dns_resolvers(); - std::vector resolvers; - resolvers.reserve(resolver_addrs.size()); - for (const auto& resolver_addr : resolver_addrs) { - resolvers.push_back(Network::Address::resolveProtoAddress(resolver_addr)); - } - selected_dns_resolver = dispatcher.createDnsResolver(resolvers); - } - - auto stats_scope = generateStatsScope(cluster, stats); - Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin, ssl_context_manager, *stats_scope, cm, local_info, dispatcher, random, stats, - singleton_manager, tls, api); - - switch (cluster.type()) { - case envoy::api::v2::Cluster::STATIC: - new_cluster = std::make_unique(cluster, runtime, factory_context, - std::move(stats_scope), added_via_api); - break; - case envoy::api::v2::Cluster::STRICT_DNS: - new_cluster = std::make_unique(cluster, runtime, selected_dns_resolver, - factory_context, std::move(stats_scope), - added_via_api); - break; - case envoy::api::v2::Cluster::LOGICAL_DNS: - new_cluster = - std::make_unique(cluster, runtime, selected_dns_resolver, tls, - factory_context, std::move(stats_scope), added_via_api); - break; - case envoy::api::v2::Cluster::ORIGINAL_DST: - if (cluster.lb_policy() != envoy::api::v2::Cluster::ORIGINAL_DST_LB) { - throw EnvoyException(fmt::format( - "cluster: cluster type 'original_dst' may only be used with LB type 'original_dst_lb'")); - } - if (cluster.has_lb_subset_config() && cluster.lb_subset_config().subset_selectors_size() != 0) { - throw EnvoyException(fmt::format( - "cluster: cluster type 'original_dst' may not be used with lb_subset_config")); - } - new_cluster = std::make_unique(cluster, runtime, factory_context, - std::move(stats_scope), added_via_api); - break; - case envoy::api::v2::Cluster::EDS: - if (!cluster.has_eds_cluster_config()) { - throw EnvoyException("cannot create an EDS cluster without an EDS config"); - } - - // We map SDS to EDS, since EDS provides backwards compatibility with SDS. - new_cluster = std::make_unique(cluster, runtime, factory_context, - std::move(stats_scope), added_via_api); - break; - default: - NOT_REACHED_GCOVR_EXCL_LINE; - } - - if (!cluster.health_checks().empty()) { - // TODO(htuch): Need to support multiple health checks in v2. - if (cluster.health_checks().size() != 1) { - throw EnvoyException("Multiple health checks not supported"); - } else { - new_cluster->setHealthChecker(HealthCheckerFactory::create( - cluster.health_checks()[0], *new_cluster, runtime, random, dispatcher, log_manager)); - } - } - - new_cluster->setOutlierDetector(Outlier::DetectorImplFactory::createForCluster( - *new_cluster, cluster, dispatcher, runtime, outlier_event_logger)); - return std::move(new_cluster); -} - ClusterImplBase::ClusterImplBase( const envoy::api::v2::Cluster& cluster, Runtime::Loader& runtime, Server::Configuration::TransportSocketFactoryContext& factory_context, Stats::ScopePtr&& stats_scope, bool added_via_api) - : runtime_(runtime) { + : init_manager_(fmt::format("Cluster {}", cluster.name())), + init_watcher_("ClusterImplBase", [this]() { onInitDone(); }), runtime_(runtime) { factory_context.setInitManager(init_manager_); auto socket_factory = createTransportSocketFactory(cluster, factory_context); info_ = std::make_unique(cluster, factory_context.clusterManager().bindConfig(), @@ -674,33 +686,53 @@ ClusterImplBase::ClusterImplBase( uint32_t healthy_hosts = 0; uint32_t degraded_hosts = 0; + uint32_t excluded_hosts = 0; uint32_t hosts = 0; for (const auto& host_set : prioritySet().hostSetsPerPriority()) { hosts += host_set->hosts().size(); healthy_hosts += host_set->healthyHosts().size(); degraded_hosts += host_set->degradedHosts().size(); + excluded_hosts += host_set->excludedHosts().size(); } info_->stats().membership_total_.set(hosts); info_->stats().membership_healthy_.set(healthy_hosts); info_->stats().membership_degraded_.set(degraded_hosts); + info_->stats().membership_excluded_.set(excluded_hosts); }); } -HostVectorConstSharedPtr ClusterImplBase::createHostList(const HostVector& hosts, - Host::Health health) { - HostVectorSharedPtr healthy_list(new HostVector()); +std::tuple +ClusterImplBase::partitionHostList(const HostVector& hosts) { + auto healthy_list = std::make_shared(); + auto degraded_list = std::make_shared(); + auto excluded_list = std::make_shared(); + for (const auto& host : hosts) { - if (host->health() == health) { - healthy_list->emplace_back(host); + if (host->health() == Host::Health::Healthy) { + healthy_list->get().emplace_back(host); + } + if (host->health() == Host::Health::Degraded) { + degraded_list->get().emplace_back(host); + } + if (host->healthFlagGet(Host::HealthFlag::PENDING_ACTIVE_HC)) { + excluded_list->get().emplace_back(host); } } - return healthy_list; + return std::make_tuple(healthy_list, degraded_list, excluded_list); } -HostsPerLocalityConstSharedPtr ClusterImplBase::createHostLists(const HostsPerLocality& hosts, - Host::Health health) { - return hosts.filter([&health](const Host& host) { return host.health() == health; }); +std::tuple +ClusterImplBase::partitionHostsPerLocality(const HostsPerLocality& hosts) { + auto filtered_clones = hosts.filter( + {[](const Host& host) { return host.health() == Host::Health::Healthy; }, + [](const Host& host) { return host.health() == Host::Health::Degraded; }, + [](const Host& host) { return host.healthFlagGet(Host::HealthFlag::PENDING_ACTIVE_HC); }}); + + return std::make_tuple(std::move(filtered_clones[0]), std::move(filtered_clones[1]), + std::move(filtered_clones[2])); } bool ClusterInfoImpl::maintenanceMode() const { @@ -727,7 +759,7 @@ void ClusterImplBase::onPreInitComplete() { initialization_started_ = true; ENVOY_LOG(debug, "initializing secondary cluster {} completed", info()->name()); - init_manager_.initialize([this]() { onInitDone(); }); + init_manager_.initialize(init_watcher_); } void ClusterImplBase::onInitDone() { @@ -759,7 +791,7 @@ void ClusterImplBase::finishInitialization() { initialization_complete_callback_ = nullptr; if (health_checker_ != nullptr) { - reloadHealthyHosts(); + reloadHealthyHosts(nullptr); } if (snapped_callback != nullptr) { @@ -772,11 +804,11 @@ void ClusterImplBase::setHealthChecker(const HealthCheckerSharedPtr& health_chec health_checker_ = health_checker; health_checker_->start(); health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr, HealthTransition changed_state) -> void { + [this](const HostSharedPtr& host, HealthTransition changed_state) -> void { // If we get a health check completion that resulted in a state change, signal to // update the host sets on all threads. if (changed_state == HealthTransition::Changed) { - reloadHealthyHosts(); + reloadHealthyHosts(host); } }); } @@ -787,10 +819,11 @@ void ClusterImplBase::setOutlierDetector(const Outlier::DetectorSharedPtr& outli } outlier_detector_ = outlier_detector; - outlier_detector_->addChangedStateCb([this](HostSharedPtr) -> void { reloadHealthyHosts(); }); + outlier_detector_->addChangedStateCb( + [this](const HostSharedPtr& host) -> void { reloadHealthyHosts(host); }); } -void ClusterImplBase::reloadHealthyHosts() { +void ClusterImplBase::reloadHealthyHosts(const HostSharedPtr& host) { // Every time a host changes Health Check state we cause a full healthy host recalculation which // for expensive LBs (ring, subset, etc.) can be quite time consuming. During startup, this // can also block worker threads by doing this repeatedly. There is no reason to do this @@ -800,12 +833,20 @@ void ClusterImplBase::reloadHealthyHosts() { return; } - for (auto& host_set : prioritySet().hostSetsPerPriority()) { + reloadHealthyHostsHelper(host); +} + +void ClusterImplBase::reloadHealthyHostsHelper(const HostSharedPtr&) { + const auto& host_sets = prioritySet().hostSetsPerPriority(); + for (size_t priority = 0; priority < host_sets.size(); ++priority) { + const auto& host_set = host_sets[priority]; // TODO(htuch): Can we skip these copies by exporting out const shared_ptr from HostSet? HostVectorConstSharedPtr hosts_copy(new HostVector(host_set->hosts())); + HostsPerLocalityConstSharedPtr hosts_per_locality_copy = host_set->hostsPerLocality().clone(); - host_set->updateHosts(HostSetImpl::partitionHosts(hosts_copy, hosts_per_locality_copy), - host_set->localityWeights(), {}, {}, absl::nullopt); + prioritySet().updateHosts(priority, + HostSetImpl::partitionHosts(hosts_copy, hosts_per_locality_copy), + host_set->localityWeights(), {}, {}, absl::nullopt); } } @@ -835,9 +876,16 @@ ClusterInfoImpl::ResourceManagers::ResourceManagers(const envoy::api::v2::Cluste } ClusterCircuitBreakersStats -ClusterInfoImpl::generateCircuitBreakersStats(Stats::Scope& scope, const std::string& stat_prefix) { +ClusterInfoImpl::generateCircuitBreakersStats(Stats::Scope& scope, const std::string& stat_prefix, + bool track_remaining) { std::string prefix(fmt::format("circuit_breakers.{}.", stat_prefix)); - return {ALL_CLUSTER_CIRCUIT_BREAKERS_STATS(POOL_GAUGE_PREFIX(scope, prefix))}; + if (track_remaining) { + return {ALL_CLUSTER_CIRCUIT_BREAKERS_STATS(POOL_GAUGE_PREFIX(scope, prefix), + POOL_GAUGE_PREFIX(scope, prefix))}; + } else { + return {ALL_CLUSTER_CIRCUIT_BREAKERS_STATS(POOL_GAUGE_PREFIX(scope, prefix), + NULL_POOL_GAUGE(scope))}; + } } ResourceManagerImplPtr @@ -849,6 +897,9 @@ ClusterInfoImpl::ResourceManagers::load(const envoy::api::v2::Cluster& config, uint64_t max_pending_requests = 1024; uint64_t max_requests = 1024; uint64_t max_retries = 3; + uint64_t max_connection_pools = std::numeric_limits::max(); + + bool track_remaining = false; std::string priority_name; switch (priority) { @@ -877,15 +928,20 @@ ClusterInfoImpl::ResourceManagers::load(const envoy::api::v2::Cluster& config, PROTOBUF_GET_WRAPPED_OR_DEFAULT(*it, max_pending_requests, max_pending_requests); max_requests = PROTOBUF_GET_WRAPPED_OR_DEFAULT(*it, max_requests, max_requests); max_retries = PROTOBUF_GET_WRAPPED_OR_DEFAULT(*it, max_retries, max_retries); + track_remaining = it->track_remaining(); + max_connection_pools = + PROTOBUF_GET_WRAPPED_OR_DEFAULT(*it, max_connection_pools, max_connection_pools); } return std::make_unique( runtime, runtime_prefix, max_connections, max_pending_requests, max_requests, max_retries, - ClusterInfoImpl::generateCircuitBreakersStats(stats_scope, priority_name)); + max_connection_pools, + ClusterInfoImpl::generateCircuitBreakersStats(stats_scope, priority_name, track_remaining)); } PriorityStateManager::PriorityStateManager(ClusterImplBase& cluster, - const LocalInfo::LocalInfo& local_info) - : parent_(cluster), local_info_node_(local_info.node()) {} + const LocalInfo::LocalInfo& local_info, + PrioritySet::HostUpdateCb* update_cb) + : parent_(cluster), local_info_node_(local_info.node()), update_cb_(update_cb) {} void PriorityStateManager::initializePriorityFor( const envoy::api::v2::endpoint::LocalityLbEndpoints& locality_lb_endpoint) { @@ -988,54 +1044,18 @@ void PriorityStateManager::updateClusterPrioritySet( auto per_locality_shared = std::make_shared(std::move(per_locality), non_empty_local_locality); - auto& host_set = static_cast(parent_.prioritySet()) - .getOrCreateHostSet(priority, overprovisioning_factor); - host_set.updateHosts(HostSetImpl::partitionHosts(hosts, per_locality_shared), - std::move(locality_weights), hosts_added.value_or(*hosts), - hosts_removed.value_or({}), overprovisioning_factor); -} - -StaticClusterImpl::StaticClusterImpl( - const envoy::api::v2::Cluster& cluster, Runtime::Loader& runtime, - Server::Configuration::TransportSocketFactoryContext& factory_context, - Stats::ScopePtr&& stats_scope, bool added_via_api) - : ClusterImplBase(cluster, runtime, factory_context, std::move(stats_scope), added_via_api), - priority_state_manager_(new PriorityStateManager(*this, factory_context.localInfo())) { - // TODO(dio): Use by-reference when cluster.hosts() is removed. - const envoy::api::v2::ClusterLoadAssignment cluster_load_assignment( - cluster.has_load_assignment() ? cluster.load_assignment() - : Config::Utility::translateClusterHosts(cluster.hosts())); - - overprovisioning_factor_ = PROTOBUF_GET_WRAPPED_OR_DEFAULT( - cluster_load_assignment.policy(), overprovisioning_factor, kDefaultOverProvisioningFactor); - - for (const auto& locality_lb_endpoint : cluster_load_assignment.endpoints()) { - priority_state_manager_->initializePriorityFor(locality_lb_endpoint); - for (const auto& lb_endpoint : locality_lb_endpoint.lb_endpoints()) { - priority_state_manager_->registerHostForPriority( - "", resolveProtoAddress(lb_endpoint.endpoint().address()), locality_lb_endpoint, - lb_endpoint); - } - } -} - -void StaticClusterImpl::startPreInit() { - // At this point see if we have a health checker. If so, mark all the hosts unhealthy and - // then fire update callbacks to start the health checking process. - const auto& health_checker_flag = - health_checker_ != nullptr - ? absl::optional(Host::HealthFlag::FAILED_ACTIVE_HC) - : absl::nullopt; - - auto& priority_state = priority_state_manager_->priorityState(); - for (size_t i = 0; i < priority_state.size(); ++i) { - priority_state_manager_->updateClusterPrioritySet( - i, std::move(priority_state[i].first), absl::nullopt, absl::nullopt, health_checker_flag, - overprovisioning_factor_); + // If a batch update callback was provided, use that. Otherwise directly update + // the PrioritySet. + if (update_cb_ != nullptr) { + update_cb_->updateHosts(priority, HostSetImpl::partitionHosts(hosts, per_locality_shared), + std::move(locality_weights), hosts_added.value_or(*hosts), + hosts_removed.value_or({}), overprovisioning_factor); + } else { + parent_.prioritySet().updateHosts( + priority, HostSetImpl::partitionHosts(hosts, per_locality_shared), + std::move(locality_weights), hosts_added.value_or(*hosts), + hosts_removed.value_or({}), overprovisioning_factor); } - priority_state_manager_.reset(); - - onPreInitComplete(); } bool BaseDynamicClusterImpl::updateDynamicHostList(const HostVector& new_hosts, @@ -1082,6 +1102,12 @@ bool BaseDynamicClusterImpl::updateDynamicHostList(const HostVector& new_hosts, auto existing_host = all_hosts.find(host->address()->asString()); const bool existing_host_found = existing_host != all_hosts.end(); + // Clear any pending deletion flag on an existing host in case it came back while it was + // being stabilized. We will set it again below if needed. + if (existing_host_found) { + existing_host->second->healthFlagClear(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL); + } + // Check if in-place host update should be skipped, i.e. when the following criteria are met // (currently there is only one criterion, but we might add more in the future): // - The cluster health checker is activated and a new host is matched with the existing one, @@ -1126,6 +1152,7 @@ bool BaseDynamicClusterImpl::updateDynamicHostList(const HostVector& new_hosts, // Did the priority change? if (host->priority() != existing_host->second->priority()) { existing_host->second->priority(host->priority()); + hosts_added_to_current_priority.emplace_back(existing_host->second); } existing_host->second->weight(host->weight()); @@ -1139,6 +1166,12 @@ bool BaseDynamicClusterImpl::updateDynamicHostList(const HostVector& new_hosts, // If we are depending on a health checker, we initialize to unhealthy. if (health_checker_ != nullptr) { host->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC); + + // If we want to exclude hosts until they have been health checked, mark them with + // a flag to indicate that they have not been health checked yet. + if (info_->warmHosts()) { + host->healthFlagSet(Host::HealthFlag::PENDING_ACTIVE_HC); + } } updated_hosts[host->address()->asString()] = host; @@ -1169,19 +1202,23 @@ bool BaseDynamicClusterImpl::updateDynamicHostList(const HostVector& new_hosts, // The remaining hosts are hosts that are not referenced in the config update. We remove them from // the priority if any of the following is true: // - Active health checking is not enabled. - // - The removed hosts are failing active health checking. + // - The removed hosts are failing active health checking OR have been explicitly marked as + // unhealthy by a previous EDS update. We do not count outlier as a reason to remove a host + // or any other future health condition that may be added so we do not use the health() API. // - We have explicitly configured the cluster to remove hosts regardless of active health status. const bool dont_remove_healthy_hosts = health_checker_ != nullptr && !info()->drainConnectionsOnHostRemoval(); if (!current_priority_hosts.empty() && dont_remove_healthy_hosts) { for (auto i = current_priority_hosts.begin(); i != current_priority_hosts.end();) { - if (!(*i)->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC)) { + if (!((*i)->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC) || + (*i)->healthFlagGet(Host::HealthFlag::FAILED_EDS_HEALTH))) { if ((*i)->weight() > max_host_weight) { max_host_weight = (*i)->weight(); } final_hosts.push_back(*i); updated_hosts[(*i)->address()->asString()] = *i; + (*i)->healthFlagSet(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL); i = current_priority_hosts.erase(i); } else { i++; @@ -1215,142 +1252,17 @@ bool BaseDynamicClusterImpl::updateDynamicHostList(const HostVector& new_hosts, return hosts_changed; } -StrictDnsClusterImpl::StrictDnsClusterImpl( - const envoy::api::v2::Cluster& cluster, Runtime::Loader& runtime, - Network::DnsResolverSharedPtr dns_resolver, - Server::Configuration::TransportSocketFactoryContext& factory_context, - Stats::ScopePtr&& stats_scope, bool added_via_api) - : BaseDynamicClusterImpl(cluster, runtime, factory_context, std::move(stats_scope), - added_via_api), - local_info_(factory_context.localInfo()), dns_resolver_(dns_resolver), - dns_refresh_rate_ms_( - std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(cluster, dns_refresh_rate, 5000))) { +Network::DnsLookupFamily getDnsLookupFamilyFromCluster(const envoy::api::v2::Cluster& cluster) { switch (cluster.dns_lookup_family()) { case envoy::api::v2::Cluster::V6_ONLY: - dns_lookup_family_ = Network::DnsLookupFamily::V6Only; - break; + return Network::DnsLookupFamily::V6Only; case envoy::api::v2::Cluster::V4_ONLY: - dns_lookup_family_ = Network::DnsLookupFamily::V4Only; - break; + return Network::DnsLookupFamily::V4Only; case envoy::api::v2::Cluster::AUTO: - dns_lookup_family_ = Network::DnsLookupFamily::Auto; - break; + return Network::DnsLookupFamily::Auto; default: NOT_REACHED_GCOVR_EXCL_LINE; } - - const envoy::api::v2::ClusterLoadAssignment load_assignment( - cluster.has_load_assignment() ? cluster.load_assignment() - : Config::Utility::translateClusterHosts(cluster.hosts())); - const auto& locality_lb_endpoints = load_assignment.endpoints(); - for (const auto& locality_lb_endpoint : locality_lb_endpoints) { - for (const auto& lb_endpoint : locality_lb_endpoint.lb_endpoints()) { - const auto& host = lb_endpoint.endpoint().address(); - const std::string& url = fmt::format("tcp://{}:{}", host.socket_address().address(), - host.socket_address().port_value()); - resolve_targets_.emplace_back(new ResolveTarget(*this, factory_context.dispatcher(), url, - locality_lb_endpoint, lb_endpoint)); - } - } - - overprovisioning_factor_ = PROTOBUF_GET_WRAPPED_OR_DEFAULT( - load_assignment.policy(), overprovisioning_factor, kDefaultOverProvisioningFactor); -} - -void StrictDnsClusterImpl::startPreInit() { - for (const ResolveTargetPtr& target : resolve_targets_) { - target->startResolve(); - } -} - -void StrictDnsClusterImpl::updateAllHosts(const HostVector& hosts_added, - const HostVector& hosts_removed, - uint32_t current_priority) { - PriorityStateManager priority_state_manager(*this, local_info_); - // At this point we know that we are different so make a new host list and notify. - // - // TODO(dio): The uniqueness of a host address resolved in STRICT_DNS cluster per priority is not - // guaranteed. Need a clear agreement on the behavior here, whether it is allowable to have - // duplicated hosts inside a priority. And if we want to enforce this behavior, it should be done - // inside the priority state manager. - for (const ResolveTargetPtr& target : resolve_targets_) { - priority_state_manager.initializePriorityFor(target->locality_lb_endpoint_); - for (const HostSharedPtr& host : target->hosts_) { - if (target->locality_lb_endpoint_.priority() == current_priority) { - priority_state_manager.registerHostForPriority(host, target->locality_lb_endpoint_); - } - } - } - - // TODO(dio): Add assertion in here. - priority_state_manager.updateClusterPrioritySet( - current_priority, std::move(priority_state_manager.priorityState()[current_priority].first), - hosts_added, hosts_removed, absl::nullopt, overprovisioning_factor_); -} - -StrictDnsClusterImpl::ResolveTarget::ResolveTarget( - StrictDnsClusterImpl& parent, Event::Dispatcher& dispatcher, const std::string& url, - const envoy::api::v2::endpoint::LocalityLbEndpoints& locality_lb_endpoint, - const envoy::api::v2::endpoint::LbEndpoint& lb_endpoint) - : parent_(parent), dns_address_(Network::Utility::hostFromTcpUrl(url)), - port_(Network::Utility::portFromTcpUrl(url)), - resolve_timer_(dispatcher.createTimer([this]() -> void { startResolve(); })), - locality_lb_endpoint_(locality_lb_endpoint), lb_endpoint_(lb_endpoint) {} - -StrictDnsClusterImpl::ResolveTarget::~ResolveTarget() { - if (active_query_) { - active_query_->cancel(); - } -} - -void StrictDnsClusterImpl::ResolveTarget::startResolve() { - ENVOY_LOG(debug, "starting async DNS resolution for {}", dns_address_); - parent_.info_->stats().update_attempt_.inc(); - - active_query_ = parent_.dns_resolver_->resolve( - dns_address_, parent_.dns_lookup_family_, - [this](const std::list&& address_list) -> void { - active_query_ = nullptr; - ENVOY_LOG(debug, "async DNS resolution complete for {}", dns_address_); - parent_.info_->stats().update_success_.inc(); - - std::unordered_map updated_hosts; - HostVector new_hosts; - for (const Network::Address::InstanceConstSharedPtr& address : address_list) { - // TODO(mattklein123): Currently the DNS interface does not consider port. We need to - // make a new address that has port in it. We need to both support IPv6 as well as - // potentially move port handling into the DNS interface itself, which would work better - // for SRV. - ASSERT(address != nullptr); - new_hosts.emplace_back(new HostImpl( - parent_.info_, dns_address_, Network::Utility::getAddressWithPort(*address, port_), - lb_endpoint_.metadata(), lb_endpoint_.load_balancing_weight().value(), - locality_lb_endpoint_.locality(), lb_endpoint_.endpoint().health_check_config(), - locality_lb_endpoint_.priority(), lb_endpoint_.health_status())); - } - - HostVector hosts_added; - HostVector hosts_removed; - if (parent_.updateDynamicHostList(new_hosts, hosts_, hosts_added, hosts_removed, - updated_hosts, all_hosts_)) { - ENVOY_LOG(debug, "DNS hosts have changed for {}", dns_address_); - ASSERT(std::all_of(hosts_.begin(), hosts_.end(), [&](const auto& host) { - return host->priority() == locality_lb_endpoint_.priority(); - })); - parent_.updateAllHosts(hosts_added, hosts_removed, locality_lb_endpoint_.priority()); - } else { - parent_.info_->stats().update_no_rebuild_.inc(); - } - - all_hosts_ = std::move(updated_hosts); - - // If there is an initialize callback, fire it now. Note that if the cluster refers to - // multiple DNS names, this will return initialized after a single DNS resolution - // completes. This is not perfect but is easier to code and unclear if the extra - // complexity is needed so will start with this. - parent_.onPreInitComplete(); - resolve_timer_->enableTimer(parent_.dns_refresh_rate_ms_); - }); } } // namespace Upstream diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index ba40c584a4aa0..bd49dc9d676d1 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -34,14 +34,13 @@ #include "common/common/logger.h" #include "common/config/metadata.h" #include "common/config/well_known_names.h" +#include "common/init/manager_impl.h" #include "common/network/utility.h" #include "common/stats/isolated_store_impl.h" #include "common/upstream/load_balancer_impl.h" #include "common/upstream/outlier_detection_impl.h" #include "common/upstream/resource_manager_impl.h" -#include "server/init_manager_impl.h" - #include "absl/synchronization/mutex.h" namespace Envoy { @@ -69,17 +68,25 @@ class HostDescriptionImpl : virtual public HostDescription { const envoy::api::v2::endpoint::Endpoint::HealthCheckConfig& health_check_config, uint32_t priority) : cluster_(cluster), hostname_(hostname), address_(dest_address), - health_check_address_(health_check_config.port_value() == 0 - ? dest_address - : Network::Utility::getAddressWithPort( - *dest_address, health_check_config.port_value())), canary_(Config::Metadata::metadataValue(metadata, Config::MetadataFilters::get().ENVOY_LB, Config::MetadataEnvoyLbKeys::get().CANARY) .bool_value()), metadata_(std::make_shared(metadata)), locality_(locality), stats_{ALL_HOST_STATS(POOL_COUNTER(stats_store_), POOL_GAUGE(stats_store_))}, - priority_(priority) {} + priority_(priority) { + if (health_check_config.port_value() != 0 && + dest_address->type() != Network::Address::Type::Ip) { + // Setting the health check port to non-0 only works for IP-type addresses. Setting the port + // for a pipe address is a misconfiguration. Throw an exception. + throw EnvoyException( + fmt::format("Invalid host configuration: non-zero port for non-IP address")); + } + health_check_address_ = + health_check_config.port_value() == 0 + ? dest_address + : Network::Utility::getAddressWithPort(*dest_address, health_check_config.port_value()); + } // Upstream::HostDescription bool canary() const override { return canary_; } @@ -195,10 +202,6 @@ class HostImpl : public HostDescriptionImpl, outlier_detector_ = std::move(outlier_detector); } Host::Health health() const override { - if (!health_flags_) { - return Host::Health::Healthy; - } - // If any of the unhealthy flags are set, host is unhealthy. if (healthFlagGet(HealthFlag::FAILED_ACTIVE_HC) || healthFlagGet(HealthFlag::FAILED_OUTLIER_CHECK) || @@ -206,10 +209,15 @@ class HostImpl : public HostDescriptionImpl, return Host::Health::Unhealthy; } - // Only possible option at this point is that the host is degraded. - ASSERT(healthFlagGet(HealthFlag::DEGRADED_ACTIVE_HC) || - healthFlagGet(HealthFlag::DEGRADED_EDS_HEALTH)); - return Host::Health::Degraded; + // If any of the degraded flags are set, host is degraded. + if (healthFlagGet(HealthFlag::DEGRADED_ACTIVE_HC) || + healthFlagGet(HealthFlag::DEGRADED_EDS_HEALTH)) { + return Host::Health::Degraded; + } + + // The host must have no flags or be pending removal. + ASSERT(health_flags_ == 0 || healthFlagGet(HealthFlag::PENDING_DYNAMIC_REMOVAL)); + return Host::Health::Healthy; } uint32_t weight() const override { return weight_; } @@ -248,7 +256,8 @@ class HostsPerLocalityImpl : public HostsPerLocality { bool hasLocalLocality() const override { return local_; } const std::vector& get() const override { return hosts_per_locality_; } - HostsPerLocalityConstSharedPtr filter(std::function predicate) const override; + std::vector + filter(const std::vector>& predicate) const override; // The const shared pointer for the empty HostsPerLocalityImpl. static HostsPerLocalityConstSharedPtr empty() { @@ -272,13 +281,8 @@ class HostSetImpl : public HostSet { : priority_(priority), overprovisioning_factor_(overprovisioning_factor.has_value() ? overprovisioning_factor.value() : kDefaultOverProvisioningFactor), - hosts_(new HostVector()), healthy_hosts_(new HostVector()), - degraded_hosts_(new HostVector()) {} - - void updateHosts(UpdateHostsParams&& update_hosts_params, - LocalityWeightsConstSharedPtr locality_weights, const HostVector& hosts_added, - const HostVector& hosts_removed, - absl::optional overprovisioning_factor = absl::nullopt) override; + hosts_(new HostVector()), healthy_hosts_(new HealthyHostVector()), + degraded_hosts_(new DegradedHostVector()), excluded_hosts_(new ExcludedHostVector()) {} /** * Install a callback that will be invoked when the host set membership changes. @@ -291,37 +295,58 @@ class HostSetImpl : public HostSet { // Upstream::HostSet const HostVector& hosts() const override { return *hosts_; } - const HostVector& healthyHosts() const override { return *healthy_hosts_; } - const HostVector& degradedHosts() const override { return *degraded_hosts_; } + HostVectorConstSharedPtr hostsPtr() const override { return hosts_; } + const HostVector& healthyHosts() const override { return healthy_hosts_->get(); } + HealthyHostVectorConstSharedPtr healthyHostsPtr() const override { return healthy_hosts_; } + const HostVector& degradedHosts() const override { return degraded_hosts_->get(); } + DegradedHostVectorConstSharedPtr degradedHostsPtr() const override { return degraded_hosts_; } + const HostVector& excludedHosts() const override { return excluded_hosts_->get(); } + ExcludedHostVectorConstSharedPtr excludedHostsPtr() const override { return excluded_hosts_; } const HostsPerLocality& hostsPerLocality() const override { return *hosts_per_locality_; } + HostsPerLocalityConstSharedPtr hostsPerLocalityPtr() const override { + return hosts_per_locality_; + } const HostsPerLocality& healthyHostsPerLocality() const override { return *healthy_hosts_per_locality_; } + HostsPerLocalityConstSharedPtr healthyHostsPerLocalityPtr() const override { + return healthy_hosts_per_locality_; + } const HostsPerLocality& degradedHostsPerLocality() const override { return *degraded_hosts_per_locality_; } + HostsPerLocalityConstSharedPtr degradedHostsPerLocalityPtr() const override { + return degraded_hosts_per_locality_; + } + const HostsPerLocality& excludedHostsPerLocality() const override { + return *excluded_hosts_per_locality_; + } + HostsPerLocalityConstSharedPtr excludedHostsPerLocalityPtr() const override { + return excluded_hosts_per_locality_; + } LocalityWeightsConstSharedPtr localityWeights() const override { return locality_weights_; } - absl::optional chooseLocality() override; + absl::optional chooseHealthyLocality() override; + absl::optional chooseDegradedLocality() override; uint32_t priority() const override { return priority_; } uint32_t overprovisioningFactor() const override { return overprovisioning_factor_; } - // Utility methods for creating UpdateHostsParams. - static UpdateHostsParams updateHostsParams(HostVectorConstSharedPtr hosts, - HostsPerLocalityConstSharedPtr hosts_per_locality); - static UpdateHostsParams + static PrioritySet::UpdateHostsParams updateHostsParams(HostVectorConstSharedPtr hosts, HostsPerLocalityConstSharedPtr hosts_per_locality, - HostVectorConstSharedPtr healthy_hosts, - HostsPerLocalityConstSharedPtr healthy_hosts_per_locality); - static UpdateHostsParams - updateHostsParams(HostVectorConstSharedPtr hosts, - HostsPerLocalityConstSharedPtr hosts_per_locality, - HostVectorConstSharedPtr healthy_hosts, + HealthyHostVectorConstSharedPtr healthy_hosts, HostsPerLocalityConstSharedPtr healthy_hosts_per_locality, - HostVectorConstSharedPtr degraded_hosts, - HostsPerLocalityConstSharedPtr degraded_hosts_per_locality); - static UpdateHostsParams partitionHosts(HostVectorConstSharedPtr hosts, - HostsPerLocalityConstSharedPtr hosts_per_locality); + DegradedHostVectorConstSharedPtr degraded_hosts, + HostsPerLocalityConstSharedPtr degraded_hosts_per_locality, + ExcludedHostVectorConstSharedPtr excluded_hosts, + HostsPerLocalityConstSharedPtr excluded_hosts_per_locality); + static PrioritySet::UpdateHostsParams updateHostsParams(const HostSet& host_set); + static PrioritySet::UpdateHostsParams + partitionHosts(HostVectorConstSharedPtr hosts, HostsPerLocalityConstSharedPtr hosts_per_locality); + + void updateHosts(PrioritySet::UpdateHostsParams&& update_hosts_params, + LocalityWeightsConstSharedPtr locality_weights, const HostVector& hosts_added, + const HostVector& hosts_removed, + absl::optional overprovisioning_factor = absl::nullopt); protected: virtual void runUpdateCallbacks(const HostVector& hosts_added, const HostVector& hosts_removed) { @@ -333,6 +358,7 @@ class HostSetImpl : public HostSet { // locality. static double effectiveLocalityWeight(uint32_t index, const HostsPerLocality& eligible_hosts_per_locality, + const HostsPerLocality& excluded_hosts_per_locality, const HostsPerLocality& all_hosts_per_locality, const LocalityWeights& locality_weights, uint32_t overprovisioning_factor); @@ -340,11 +366,13 @@ class HostSetImpl : public HostSet { uint32_t priority_; uint32_t overprovisioning_factor_; HostVectorConstSharedPtr hosts_; - HostVectorConstSharedPtr healthy_hosts_; - HostVectorConstSharedPtr degraded_hosts_; + HealthyHostVectorConstSharedPtr healthy_hosts_; + DegradedHostVectorConstSharedPtr degraded_hosts_; + ExcludedHostVectorConstSharedPtr excluded_hosts_; HostsPerLocalityConstSharedPtr hosts_per_locality_{HostsPerLocalityImpl::empty()}; HostsPerLocalityConstSharedPtr healthy_hosts_per_locality_{HostsPerLocalityImpl::empty()}; HostsPerLocalityConstSharedPtr degraded_hosts_per_locality_{HostsPerLocalityImpl::empty()}; + HostsPerLocalityConstSharedPtr excluded_hosts_per_locality_{HostsPerLocalityImpl::empty()}; // TODO(mattklein123): Remove mutable. mutable Common::CallbackManager member_update_cb_helper_; @@ -376,10 +404,13 @@ class HostSetImpl : public HostSet { std::vector>& locality_entries, const HostsPerLocality& eligible_hosts_per_locality, const HostVector& eligible_hosts, HostsPerLocalityConstSharedPtr all_hosts_per_locality, + HostsPerLocalityConstSharedPtr excluded_hosts_per_locality, LocalityWeightsConstSharedPtr locality_weights, uint32_t overprovisioning_factor); - std::vector> locality_entries_; - std::unique_ptr> locality_scheduler_; + static absl::optional chooseLocality(EdfScheduler* locality_scheduler); + + std::vector> healthy_locality_entries_; + std::unique_ptr> healthy_locality_scheduler_; std::vector> degraded_locality_entries_; std::unique_ptr> degraded_locality_scheduler_; }; @@ -389,9 +420,9 @@ typedef std::unique_ptr HostSetImplPtr; /** * A class for management of the set of hosts in a given cluster. */ - class PrioritySetImpl : public PrioritySet { public: + PrioritySetImpl() : batch_update_(false) {} // From PrioritySet Common::CallbackHandle* addMemberUpdateCb(MemberUpdateCb callback) const override { return member_update_cb_helper_.add(callback); @@ -402,10 +433,17 @@ class PrioritySetImpl : public PrioritySet { const std::vector>& hostSetsPerPriority() const override { return host_sets_; } - std::vector>& hostSetsPerPriority() override { return host_sets_; } // Get the host set for this priority level, creating it if necessary. - HostSet& getOrCreateHostSet(uint32_t priority, - absl::optional overprovisioning_factor = absl::nullopt); + const HostSet& + getOrCreateHostSet(uint32_t priority, + absl::optional overprovisioning_factor = absl::nullopt); + + void updateHosts(uint32_t priority, UpdateHostsParams&& update_hosts_params, + LocalityWeightsConstSharedPtr locality_weights, const HostVector& hosts_added, + const HostVector& hosts_removed, + absl::optional overprovisioning_factor = absl::nullopt) override; + + void batchHostUpdate(BatchUpdateCb& callback) override; protected: // Allows subclasses of PrioritySetImpl to create their own type of HostSetImpl. @@ -414,7 +452,7 @@ class PrioritySetImpl : public PrioritySet { return std::make_unique(priority, overprovisioning_factor); } -private: +protected: virtual void runUpdateCallbacks(const HostVector& hosts_added, const HostVector& hosts_removed) { member_update_cb_helper_.runCallbacks(hosts_added, hosts_removed); } @@ -426,10 +464,38 @@ class PrioritySetImpl : public PrioritySet { // It will expand as host sets are added but currently does not shrink to // avoid any potential lifetime issues. std::vector> host_sets_; + +private: // TODO(mattklein123): Remove mutable. mutable Common::CallbackManager member_update_cb_helper_; mutable Common::CallbackManager priority_update_cb_helper_; + bool batch_update_ : 1; + + // Helper class to maintain state as we perform multiple host updates. Keeps track of all hosts + // that have been added/removed throughout the batch update, and ensures that we properly manage + // the batch_update_ flag. + class BatchUpdateScope : public HostUpdateCb { + public: + explicit BatchUpdateScope(PrioritySetImpl& parent) : parent_(parent) { + ASSERT(!parent_.batch_update_); + parent_.batch_update_ = true; + } + ~BatchUpdateScope() { parent_.batch_update_ = false; } + + virtual void updateHosts(uint32_t priority, + PrioritySet::UpdateHostsParams&& update_hosts_params, + LocalityWeightsConstSharedPtr locality_weights, + const HostVector& hosts_added, const HostVector& hosts_removed, + absl::optional overprovisioning_factor) override; + + std::unordered_set all_hosts_added_; + std::unordered_set all_hosts_removed_; + + private: + PrioritySetImpl& parent_; + std::unordered_set priorities_; + }; }; /** @@ -445,7 +511,8 @@ class ClusterInfoImpl : public ClusterInfo { static ClusterStats generateStats(Stats::Scope& scope); static ClusterLoadReportStats generateLoadReportStats(Stats::Scope& scope); static ClusterCircuitBreakersStats generateCircuitBreakersStats(Stats::Scope& scope, - const std::string& stat_prefix); + const std::string& stat_prefix, + bool track_remaining); // Upstream::ClusterInfo bool addedViaApi() const override { return added_via_api_; } @@ -499,6 +566,9 @@ class ClusterInfoImpl : public ClusterInfo { }; bool drainConnectionsOnHostRemoval() const override { return drain_connections_on_host_removal_; } + bool warmHosts() const override { return warm_hosts_; } + + absl::optional eds_service_name() const override { return eds_service_name_; } private: struct ResourceManagers { @@ -542,6 +612,8 @@ class ClusterInfoImpl : public ClusterInfo { const envoy::api::v2::Cluster::CommonLbConfig common_lb_config_; const Network::ConnectionSocket::OptionsSharedPtr cluster_socket_options_; const bool drain_connections_on_host_removal_; + const bool warm_hosts_; + absl::optional eds_service_name_; }; /** @@ -559,14 +631,6 @@ createTransportSocketFactory(const envoy::api::v2::Cluster& config, class ClusterImplBase : public Cluster, protected Logger::Loggable { public: - static ClusterSharedPtr - create(const envoy::api::v2::Cluster& cluster, ClusterManager& cm, Stats::Store& stats, - ThreadLocal::Instance& tls, Network::DnsResolverSharedPtr dns_resolver, - Ssl::ContextManager& ssl_context_manager, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, - AccessLog::AccessLogManager& log_manager, const LocalInfo::LocalInfo& local_info, - Server::Admin& admin, Singleton::Manager& singleton_manager, - Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api, Api::Api& api); // Upstream::Cluster PrioritySet& prioritySet() override { return priority_set_; } const PrioritySet& prioritySet() const override { return priority_set_; } @@ -593,9 +657,17 @@ class ClusterImplBase : public Cluster, protected Logger::Loggable + partitionHostList(const HostVector& hosts); + + // Partitions the provided list of hosts per locality into three new lists containing the healthy, + // degraded and excluded hosts respectively. + static std::tuple + partitionHostsPerLocality(const HostsPerLocality& hosts); // Upstream::Cluster HealthChecker* healthChecker() override { return health_checker_.get(); } @@ -629,8 +701,16 @@ class ClusterImplBase : public Cluster, protected Logger::Loggable initialization_complete_callback_; uint64_t pending_initialize_health_checks_{}; }; +using ClusterImplBaseSharedPtr = std::shared_ptr; + /** * Manages PriorityState of a cluster. PriorityState is a per-priority binding of a set of hosts * with its corresponding locality weight map. This is useful to store priorities/hosts/localities @@ -655,7 +738,8 @@ class ClusterImplBase : public Cluster, protected Logger::Loggable { public: - PriorityStateManager(ClusterImplBase& cluster, const LocalInfo::LocalInfo& local_info); + PriorityStateManager(ClusterImplBase& cluster, const LocalInfo::LocalInfo& local_info, + PrioritySet::HostUpdateCb* update_cb); // Initializes the PriorityState vector based on the priority specified in locality_lb_endpoint. void @@ -693,31 +777,11 @@ class PriorityStateManager : protected Logger::Loggable { ClusterImplBase& parent_; PriorityState priority_state_; const envoy::api::v2::core::Node& local_info_node_; + PrioritySet::HostUpdateCb* update_cb_; }; typedef std::unique_ptr PriorityStateManagerPtr; -/** - * Implementation of Upstream::Cluster for static clusters (clusters that have a fixed number of - * hosts with resolved IP addresses). - */ -class StaticClusterImpl : public ClusterImplBase { -public: - StaticClusterImpl(const envoy::api::v2::Cluster& cluster, Runtime::Loader& runtime, - Server::Configuration::TransportSocketFactoryContext& factory_context, - Stats::ScopePtr&& stats_scope, bool added_via_api); - - // Upstream::Cluster - InitializePhase initializePhase() const override { return InitializePhase::Primary; } - -private: - // ClusterImplBase - void startPreInit() override; - - PriorityStateManagerPtr priority_state_manager_; - uint32_t overprovisioning_factor_; -}; - /** * Base for all dynamic cluster types. */ @@ -747,54 +811,9 @@ class BaseDynamicClusterImpl : public ClusterImplBase { }; /** - * Implementation of Upstream::Cluster that does periodic DNS resolution and updates the host - * member set if the DNS members change. + * Utility function to get Dns from cluster. */ -class StrictDnsClusterImpl : public BaseDynamicClusterImpl { -public: - StrictDnsClusterImpl(const envoy::api::v2::Cluster& cluster, Runtime::Loader& runtime, - Network::DnsResolverSharedPtr dns_resolver, - Server::Configuration::TransportSocketFactoryContext& factory_context, - Stats::ScopePtr&& stats_scope, bool added_via_api); - - // Upstream::Cluster - InitializePhase initializePhase() const override { return InitializePhase::Primary; } - -private: - struct ResolveTarget { - ResolveTarget(StrictDnsClusterImpl& parent, Event::Dispatcher& dispatcher, - const std::string& url, - const envoy::api::v2::endpoint::LocalityLbEndpoints& locality_lb_endpoint, - const envoy::api::v2::endpoint::LbEndpoint& lb_endpoint); - ~ResolveTarget(); - void startResolve(); - - StrictDnsClusterImpl& parent_; - Network::ActiveDnsQuery* active_query_{}; - std::string dns_address_; - uint32_t port_; - Event::TimerPtr resolve_timer_; - HostVector hosts_; - const envoy::api::v2::endpoint::LocalityLbEndpoints locality_lb_endpoint_; - const envoy::api::v2::endpoint::LbEndpoint lb_endpoint_; - HostMap all_hosts_; - }; - - typedef std::unique_ptr ResolveTargetPtr; - - void updateAllHosts(const HostVector& hosts_added, const HostVector& hosts_removed, - uint32_t priority); - - // ClusterImplBase - void startPreInit() override; - - const LocalInfo::LocalInfo& local_info_; - Network::DnsResolverSharedPtr dns_resolver_; - std::list resolve_targets_; - const std::chrono::milliseconds dns_refresh_rate_ms_; - Network::DnsLookupFamily dns_lookup_family_; - uint32_t overprovisioning_factor_; -}; +Network::DnsLookupFamily getDnsLookupFamilyFromCluster(const envoy::api::v2::Cluster& cluster); } // namespace Upstream } // namespace Envoy diff --git a/source/docs/flow_control.md b/source/docs/flow_control.md index a0b3a46e768e8..e3673ad2fe439 100644 --- a/source/docs/flow_control.md +++ b/source/docs/flow_control.md @@ -33,7 +33,7 @@ Flow control for the upstream path is much the same. * The upstream `Network::ConnectionImpl::write_buffer_` buffers too much data. It calls `Network::ConnectionCallbacks::onAboveWriteBufferHighWatermark()`. - * The Network::TcpProxy::UpstreamCallbacks receives + * The `Network::TcpProxy::UpstreamCallbacks` receives `onAboveWriteBufferHighWatermark()` and calls `readDisable(true)` on the downstream connection. * When the upstream buffer is drained, it calls @@ -93,11 +93,13 @@ connection, the connection manager tracks how many outstanding high watermark events have occurred and passes any on to the router filter when it subscribes. It is worth noting that the router does not unwind `readDisable(true)` calls on -destruction. Each codec must ensure that any necessary readDisable calls are -unwound. In the case of HTTP/2 the `Envoy::Http::Http2::ConnectionImpl` will consume +destruction. In the case of HTTP/2 the `Envoy::Http::Http2::ConnectionImpl` will consume any outstanding flow control window on stream deletion to avoid leaking the connection-level -window. In the case of HTTP, the Envoy::Http::ConnectionManagerImpl unwinds any readDisable() -calls to ensure that pipelined requests will be read. +window. In the case of HTTP/1, the the Envoy::Http::ConnectionManagerImpl unwinds any readDisable() +calls downstream to ensure that pipelined requests will be read. For HTTP/1 +upstream connections, the `readDisable(true)` calls are unwound in +ClientConnectionImpl::onMessageComplete() to make sure that as connections are +returned to the connection pool they are ready to read. ## HTTP/2 codec recv buffer diff --git a/source/docs/h2_metadata.md b/source/docs/h2_metadata.md index 214ecc3a84f10..e986ff27a2111 100644 --- a/source/docs/h2_metadata.md +++ b/source/docs/h2_metadata.md @@ -114,7 +114,7 @@ The METADATA frame uses a standard frame header, as described in the [HTTP/2 spec](https://httpwg.github.io/specs/rfc7540.html#FrameHeader.) The payload of the METADATA frame is a block of key-value pairs encoded using the [HPACK Literal Header Field Never Indexed representation]( -http://httpwg.org/specs/rfc7541.html#literal.header.never.indexed). Each +https://httpwg.org/specs/rfc7541.html#literal.header.never.indexed). Each key-value pair represents one piece of metadata. The METADATA frame defines the following flags: diff --git a/source/docs/repokitteh.md b/source/docs/repokitteh.md index d3ee6561721d7..e5986fa9889bf 100644 --- a/source/docs/repokitteh.md +++ b/source/docs/repokitteh.md @@ -37,7 +37,7 @@ Only organization members can assign or unassign other users, who must be organi [Demo PR](https://github.com/envoyproxy/envoybot/pull/6) ### [Review](https://github.com/repokitteh/modules/blob/master/review.star) -Requests a a user to recview a pull request. +Requests a user to review a pull request. Examples: ``` diff --git a/source/docs/stats.md b/source/docs/stats.md index 25352688bf6a3..965dcb569d165 100644 --- a/source/docs/stats.md +++ b/source/docs/stats.md @@ -1,39 +1,20 @@ # Envoy Stats System Envoy statistics track numeric metrics on an Envoy instance, optionally spanning -binary restarts. The metrics are tracked as: +binary program restarts. The metrics are tracked as: * Counters: strictly increasing 64-bit integers. * Gauges: 64-bit integers that can rise and fall. * Histograms: mapping ranges of values to frequency. The ranges are auto-adjusted as data accumulates. Unliked counters and gauges, histogram data is not retained across - binary restarts. - -## Hot-restart: `RawStatData` vs `HeapStatData` - -In order to support restarting the Envoy binary without losing counter and gauge -values, they are stored in a shared-memory block, including stats that are -created dynamically at runtime in response to discovery of new clusters at -runtime. To simplify memmory management, each stat is allocated a fixed amount -of storage, controlled via [command-line -flags](https://www.envoyproxy.io/docs/envoy/latest/operations/cli): -`--max-stats` and `--max-obj-name-len`, which determine the size of the pre-allocated -shared-memory block. See -[RawStatData](https://github.com/envoyproxy/envoy/blob/master/source/common/stats/raw_stat_data.h). - -Note in particular that the full stat name is retained in shared-memory, making -it easy to correlate stats across restarts even as the dynamic cluster -configuration changes. - -One challenge with this fixed memory allocation strategy is that it limits -cluster scalabilty. A deployment wishing to use a single Envoy instance to -manage tens of thousands of clusters, each with its own set of scoped stats, -will use more memory than is ideal. - -A flag `--disable-hot-restart` pivots the system toward an alternate heap-based -stat allocator that allocates stats on demand in the heap, with no preset limits -on the number of stats or their length. See -[HeapStatData](https://github.com/envoyproxy/envoy/blob/master/source/common/stats/heap_stat_data.h). + binary program restarts. + +In order to support restarting the Envoy binary program without losing counter and gauge +values, they are passed from parent to child in an RPC protocol. +They were previously held in shared memory, which imposed various restrictions. +Unlike the shared memory implementation, the RPC passing *requires special indication +in source/common/stats/stat_merger.cc when simple addition is not appropriate for +combining two instances of a given stat*. ## Performance and Thread Local Storage @@ -67,10 +48,8 @@ This implementation is complicated so here is a rough overview of the threading reference the old scope which may be about to be cache flushed. * Since it's possible to have overlapping scopes, we de-dup stats when counters() or gauges() is called since these are very uncommon operations. - * Though this implementation is designed to work with a fixed shared memory space, it will fall - back to heap allocated stats if needed. NOTE: In this case, overlapping scopes will not share - the same backing store. This is to keep things simple, it could be done in the future if - needed. + * Overlapping scopes will not share the same backing store. This is to keep things simple, + it could be done in the future if needed. ### Histogram threading model @@ -101,7 +80,7 @@ followed. Stat names are replicated in several places in various forms. - * Held fully elaborated next to the values, in `RawStatData` and `HeapStatData` + * Held with the stat values, in `HeapStatData` * In [MetricImpl](https://github.com/envoyproxy/envoy/blob/master/source/common/stats/metric_impl.h) in a transformed state, with tags extracted into vectors of name/value strings. * In static strings across the codebase where stats are referenced @@ -110,10 +89,9 @@ Stat names are replicated in several places in various forms. used to perform tag extraction. There are stat maps in `ThreadLocalStore` for capturing all stats in a scope, -and each per-thread caches. However, they don't duplicate the stat -names. Instead, they reference the `char*` held in the `RawStatData` or -`HeapStatData itself, and thus are relatively cheap; effectively those maps are -all pointer-to-pointer. +and each per-thread caches. However, they don't duplicate the stat names. +Instead, they reference the `char*` held in the `HeapStatData` itself, and thus +are relatively cheap; effectively those maps are all pointer-to-pointer. For this to be safe, cache lookups from locally scoped strings must use `.find` rather than `operator[]`, as the latter would insert a pointer to a temporary as @@ -121,6 +99,58 @@ the key. If the `.find` fails, the actual stat must be constructed first, and then inserted into the map using its key storage. This strategy saves duplication of the keys, but costs an extra map lookup on each miss. +### Naming Representation + +When stored as flat strings, stat names can dominate Envoy memory usage when +there are a large number of clusters. Stat names typically combine a small +number of keywords, cluster names, host names, and response codes, separated by +`.`. For example `CLUSTER.upstream_cx_connect_attempts_exceeded`. There may be +thousands of clusters, and roughly 100 stats per cluster. Thus, the number +of combinations can be large. It is significantly more efficient to symbolize +each `.`-delimited token and represent stats as arrays of symbols. + +The transformation between flattened string and symbolized form is CPU-intensive +at scale. It requires parsing, encoding, and lookups in a shared map, which must +be mutex-protected. To avoid adding latency and CPU overhead while serving +requests, the tokens can be symbolized and saved in context classes, such as +[Http::CodeStatsImpl](https://github.com/envoyproxy/envoy/blob/master/source/common/http/codes.h). +Symbolization can occur on startup or when new hosts or clusters are configured +dynamically. Users of stats that are allocated dynamically per cluster, host, +etc, must explicitly store partial stat-names their class instances, which later +can be composed dynamically at runtime in order to fully elaborate counters, +gauges, etc, without taking symbol-table locks, via `SymbolTable::join()`. + +### Current State and Strategy To Deploy Symbol Tables + +As of April 1, 2019, there are a fairly large number of files that directly +lookup stats by name, e.g. via `Stats::Scope::counter(const std::string&)` in +the request path. In most cases, this runtime lookup concatenates the scope name +with a string literal or other request-dependent token to form the stat name, so +it is not possible to fully memoize the stats at startup; there must be a +runtime name lookup. + +If a PR is issued that changes the underlying representation of a stat name to +be a symbol table entry then each stat-name will need to be transformed +whenever names are looked up, which would add CPU overhead and lock contention +in the request-path, violating one of the principles of Envoy's [threading +model](https://blog.envoyproxy.io/envoy-threading-model-a8d44b922310). Before +issuing such a PR we need to first iterate through the codebase memoizing the +symbols that are used to form stat-names. + +To resolve this chicken-and-egg challenge of switching to symbol-table stat-name +representation without suffering a temporary loss of performance, we employ a +["fake" symbol table +implementation](https://github.com/envoyproxy/envoy/blob/master/source/common/stats/fake_symbol_table_impl.h). +This implemenation uses elaborated strings as an underlying representation, but +implements the same API as the ["real" +implemention](https://github.com/envoyproxy/envoy/blob/master/source/common/stats/symbol_table_impl.h). +The underlying string representation means that there is minimal runtime +overhead compared to the current state. But once all stat-allocation call-sites +have been converted to use the abstract [SymbolTable +API](https://github.com/envoyproxy/envoy/blob/master/include/envoy/stats/symbol_table.h), +the real implementation can be swapped in, the space savings realized, and the +fake implementation deleted. + ## Tags and Tag Extraction TBD diff --git a/source/exe/BUILD b/source/exe/BUILD index 652073d61b9bb..d2774363fbf63 100644 --- a/source/exe/BUILD +++ b/source/exe/BUILD @@ -7,6 +7,7 @@ load( "envoy_cc_platform_dep", "envoy_cc_posix_library", "envoy_cc_win32_library", + "envoy_google_grpc_external_deps", "envoy_package", ) load( @@ -41,7 +42,7 @@ envoy_cc_library( "//source/server:drain_manager_lib", "//source/server:options_lib", "//source/server:server_lib", - "//source/server:test_hooks_lib", + "//source/server:listener_hooks_lib", ] + select({ "//bazel:windows_x86_64": envoy_windows_extensions(), "//bazel:linux_ppc": envoy_all_extensions(PPC_SKIP_TARGETS), @@ -66,13 +67,13 @@ envoy_cc_library( hdrs = ["main_common.h"], deps = [ ":envoy_common_lib", + ":process_wide_lib", "//source/common/api:os_sys_calls_lib", "//source/common/common:compiler_requirements_lib", "//source/common/common:perf_annotation_lib", - "//source/common/thread:thread_factory_singleton_lib", + "//source/common/stats:fake_symbol_table_lib", "//source/server:hot_restart_lib", "//source/server:hot_restart_nop_lib", - "//source/server:proto_descriptors_lib", "//source/server/config_validation:server_lib", ] + select({ "//bazel:disable_signal_trace": [], @@ -83,11 +84,27 @@ envoy_cc_library( }) + envoy_cc_platform_dep("platform_impl_lib"), ) +envoy_cc_library( + name = "process_wide_lib", + srcs = ["process_wide.cc"], + hdrs = ["process_wide.h"], + external_deps = ["ares"], + deps = [ + "//source/common/common:assert_lib", + "//source/common/event:libevent_lib", + "//source/common/http/http2:nghttp2_lib", + "//source/server:proto_descriptors_lib", + ] + envoy_google_grpc_external_deps(), +) + envoy_cc_posix_library( name = "platform_impl_lib", hdrs = ["posix/platform_impl.h"], strip_include_prefix = "posix", - deps = ["//source/common/common:thread_lib"], + deps = [ + "//source/common/common:thread_lib", + "//source/common/filesystem:filesystem_lib", + ], ) envoy_cc_win32_library( @@ -97,6 +114,7 @@ envoy_cc_win32_library( deps = [ "//source/common/common:assert_lib", "//source/common/common:thread_lib", + "//source/common/filesystem:filesystem_lib", ], ) diff --git a/source/exe/main_common.cc b/source/exe/main_common.cc index ef0d642e7fd7b..200433db3ab98 100644 --- a/source/exe/main_common.cc +++ b/source/exe/main_common.cc @@ -6,17 +6,15 @@ #include "common/common/compiler_requirements.h" #include "common/common/perf_annotation.h" -#include "common/event/libevent.h" #include "common/network/utility.h" #include "common/stats/thread_local_store.h" #include "server/config_validation/server.h" #include "server/drain_manager_impl.h" #include "server/hot_restart_nop_impl.h" +#include "server/listener_hooks.h" #include "server/options_impl.h" -#include "server/proto_descriptors.h" #include "server/server.h" -#include "server/test_hooks.h" #include "absl/strings/str_split.h" @@ -24,8 +22,6 @@ #include "server/hot_restart_impl.h" #endif -#include "ares.h" - namespace Envoy { Server::DrainManagerPtr ProdComponentFactory::createDrainManager(Server::Instance& server) { @@ -42,15 +38,13 @@ Runtime::LoaderPtr ProdComponentFactory::createRuntime(Server::Instance& server, } MainCommonBase::MainCommonBase(const OptionsImpl& options, Event::TimeSystem& time_system, - TestHooks& test_hooks, Server::ComponentFactory& component_factory, + ListenerHooks& listener_hooks, + Server::ComponentFactory& component_factory, std::unique_ptr&& random_generator, - Thread::ThreadFactory& thread_factory) - : options_(options), component_factory_(component_factory), thread_factory_(thread_factory) { - Thread::ThreadFactorySingleton::set(&thread_factory_); - ares_library_init(ARES_LIB_INIT_ALL); - Event::Libevent::Global::initialize(); - RELEASE_ASSERT(Envoy::Server::validateProtoDescriptors(), ""); - + Thread::ThreadFactory& thread_factory, + Filesystem::Instance& file_system) + : options_(options), component_factory_(component_factory), thread_factory_(thread_factory), + file_system_(file_system), stats_allocator_(symbol_table_) { switch (options_.mode()) { case Server::Mode::InitOnly: case Server::Mode::Serve: { @@ -76,12 +70,12 @@ MainCommonBase::MainCommonBase(const OptionsImpl& options, Event::TimeSystem& ti // block or not. std::set_new_handler([]() { PANIC("out of memory"); }); - stats_store_ = std::make_unique(options_.statsOptions(), - restarter_->statsAllocator()); + stats_store_ = std::make_unique(stats_allocator_); server_ = std::make_unique( - options_, time_system, local_address, test_hooks, *restarter_, *stats_store_, - access_log_lock, component_factory, std::move(random_generator), *tls_, thread_factory); + options_, time_system, local_address, listener_hooks, *restarter_, *stats_store_, + access_log_lock, component_factory, std::move(random_generator), *tls_, thread_factory_, + file_system_); break; } @@ -93,11 +87,6 @@ MainCommonBase::MainCommonBase(const OptionsImpl& options, Event::TimeSystem& ti } } -MainCommonBase::~MainCommonBase() { - Thread::ThreadFactorySingleton::set(nullptr); - ares_library_cleanup(); -} - void MainCommonBase::configureComponentLogLevels() { for (auto& component_log_level : options_.componentLogLevels()) { Logger::Logger* logger_to_change = Logger::Registry::logger(component_log_level.first); @@ -113,7 +102,8 @@ bool MainCommonBase::run() { return true; case Server::Mode::Validate: { auto local_address = Network::Utility::getLocalAddress(options_.localAddressIpVersion()); - return Server::validateConfig(options_, local_address, component_factory_, thread_factory_); + return Server::validateConfig(options_, local_address, component_factory_, thread_factory_, + file_system_); } case Server::Mode::InitOnly: PERF_DUMP(); @@ -136,19 +126,17 @@ void MainCommonBase::adminRequest(absl::string_view path_and_query, absl::string MainCommon::MainCommon(int argc, const char* const* argv) : options_(argc, argv, &MainCommon::hotRestartVersion, spdlog::level::info), - base_(options_, real_time_system_, default_test_hooks_, prod_component_factory_, - std::make_unique(), platform_impl_.threadFactory()) {} + base_(options_, real_time_system_, default_listener_hooks_, prod_component_factory_, + std::make_unique(), platform_impl_.threadFactory(), + platform_impl_.fileSystem()) {} -std::string MainCommon::hotRestartVersion(uint64_t max_num_stats, uint64_t max_stat_name_len, - bool hot_restart_enabled) { +std::string MainCommon::hotRestartVersion(bool hot_restart_enabled) { #ifdef ENVOY_HOT_RESTART if (hot_restart_enabled) { - return Server::HotRestartImpl::hotRestartVersion(max_num_stats, max_stat_name_len); + return Server::HotRestartImpl::hotRestartVersion(); } #else UNREFERENCED_PARAMETER(hot_restart_enabled); - UNREFERENCED_PARAMETER(max_num_stats); - UNREFERENCED_PARAMETER(max_stat_name_len); #endif return "disabled"; } diff --git a/source/exe/main_common.h b/source/exe/main_common.h index 2af78ce27c71a..24b737432e1a6 100644 --- a/source/exe/main_common.h +++ b/source/exe/main_common.h @@ -5,14 +5,16 @@ #include "common/common/thread.h" #include "common/event/real_time_system.h" +#include "common/stats/fake_symbol_table_impl.h" #include "common/stats/thread_local_store.h" #include "common/thread_local/thread_local_impl.h" #include "exe/platform_impl.h" +#include "exe/process_wide.h" +#include "server/listener_hooks.h" #include "server/options_impl.h" #include "server/server.h" -#include "server/test_hooks.h" #ifdef ENVOY_HANDLE_SIGNALS #include "exe/signal_action.h" @@ -33,11 +35,10 @@ class MainCommonBase { public: // Consumer must guarantee that all passed references are alive until this object is // destructed. - MainCommonBase(const OptionsImpl& options, Event::TimeSystem& time_system, TestHooks& test_hooks, - Server::ComponentFactory& component_factory, + MainCommonBase(const OptionsImpl& options, Event::TimeSystem& time_system, + ListenerHooks& listener_hooks, Server::ComponentFactory& component_factory, std::unique_ptr&& random_generator, - Thread::ThreadFactory& thread_factory); - ~MainCommonBase(); + Thread::ThreadFactory& thread_factory, Filesystem::Instance& file_system); bool run(); @@ -63,10 +64,13 @@ class MainCommonBase { const AdminRequestFn& handler); protected: + ProcessWide process_wide_; // Process-wide state setup/teardown. const Envoy::OptionsImpl& options_; - + Stats::FakeSymbolTableImpl symbol_table_; Server::ComponentFactory& component_factory_; Thread::ThreadFactory& thread_factory_; + Filesystem::Instance& file_system_; + Stats::HeapStatDataAllocator stats_allocator_; std::unique_ptr tls_; std::unique_ptr restarter_; @@ -84,6 +88,8 @@ class MainCommon { public: MainCommon(int argc, const char* const* argv); bool run() { return base_.run(); } + // Only tests have a legitimate need for this today. + Event::Dispatcher& dispatcherForTest() { return base_.server()->dispatcher(); } // Makes an admin-console request by path, calling handler() when complete. // The caller can initiate this from any thread, but it posts the request @@ -99,8 +105,13 @@ class MainCommon { base_.adminRequest(path_and_query, method, handler); } - static std::string hotRestartVersion(uint64_t max_num_stats, uint64_t max_stat_name_len, - bool hot_restart_enabled); + static std::string hotRestartVersion(bool hot_restart_enabled); + + /** + * @return a pointer to the server instance, or nullptr if initialized into + * validation mode. + */ + Server::Instance* server() { return base_.server(); } private: #ifdef ENVOY_HANDLE_SIGNALS @@ -111,7 +122,7 @@ class MainCommon { PlatformImpl platform_impl_; Envoy::OptionsImpl options_; Event::RealTimeSystem real_time_system_; - DefaultTestHooks default_test_hooks_; + DefaultListenerHooks default_listener_hooks_; ProdComponentFactory prod_component_factory_; MainCommonBase base_; }; diff --git a/source/exe/posix/platform_impl.h b/source/exe/posix/platform_impl.h index f43f765172a19..45fbd7340779c 100644 --- a/source/exe/posix/platform_impl.h +++ b/source/exe/posix/platform_impl.h @@ -1,16 +1,18 @@ #pragma once -#include "common/common/macros.h" #include "common/common/thread_impl.h" +#include "common/filesystem/filesystem_impl.h" namespace Envoy { class PlatformImpl { public: Thread::ThreadFactory& threadFactory() { return thread_factory_; } + Filesystem::Instance& fileSystem() { return file_system_; } private: Thread::ThreadFactoryImplPosix thread_factory_; + Filesystem::InstanceImplPosix file_system_; }; -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/source/exe/process_wide.cc b/source/exe/process_wide.cc new file mode 100644 index 0000000000000..f709e9785bdcc --- /dev/null +++ b/source/exe/process_wide.cc @@ -0,0 +1,47 @@ +#include "exe/process_wide.h" + +#include "common/common/assert.h" +#include "common/event/libevent.h" +#include "common/http/http2/nghttp2.h" + +#include "server/proto_descriptors.h" + +#include "ares.h" + +#ifdef ENVOY_GOOGLE_GRPC +#include "grpc/grpc.h" +#endif + +namespace Envoy { +namespace { +// Static variable to count initialization pairs. For tests like +// main_common_test, we need to count to avoid double initialization or +// shutdown. +uint32_t process_wide_initialized; +} // namespace + +ProcessWide::ProcessWide() : initialization_depth_(process_wide_initialized) { + if (process_wide_initialized++ == 0) { +#ifdef ENVOY_GOOGLE_GRPC + grpc_init(); +#endif + ares_library_init(ARES_LIB_INIT_ALL); + Event::Libevent::Global::initialize(); + RELEASE_ASSERT(Envoy::Server::validateProtoDescriptors(), ""); + Http::Http2::initializeNghttp2Logging(); + } +} + +ProcessWide::~ProcessWide() { + ASSERT(process_wide_initialized > 0); + if (--process_wide_initialized == 0) { + process_wide_initialized = false; + ares_library_cleanup(); +#ifdef ENVOY_GOOGLE_GRPC + grpc_shutdown(); +#endif + } + ASSERT(process_wide_initialized == initialization_depth_); +} + +} // namespace Envoy diff --git a/source/exe/process_wide.h b/source/exe/process_wide.h new file mode 100644 index 0000000000000..2c305e8f1c272 --- /dev/null +++ b/source/exe/process_wide.h @@ -0,0 +1,18 @@ +#pragma once + +#include + +namespace Envoy { + +// Process-wide lifecycle events for global state in third-party dependencies, +// e.g. gRPC, c-ares. There should only ever be a single instance of this. +class ProcessWide { +public: + ProcessWide(); + ~ProcessWide(); + +private: + uint32_t initialization_depth_; +}; + +} // namespace Envoy diff --git a/source/exe/win32/platform_impl.h b/source/exe/win32/platform_impl.h index b69b2f5b1be1b..ffb239dd7ebbc 100644 --- a/source/exe/win32/platform_impl.h +++ b/source/exe/win32/platform_impl.h @@ -2,6 +2,7 @@ #include "common/common/assert.h" #include "common/common/thread_impl.h" +#include "common/filesystem/filesystem_impl.h" // clang-format off #include @@ -21,9 +22,11 @@ class PlatformImpl { ~PlatformImpl() { ::WSACleanup(); } Thread::ThreadFactory& threadFactory() { return thread_factory_; } + Filesystem::Instance& fileSystem() { return file_system_; } private: Thread::ThreadFactoryImplWin32 thread_factory_; + Filesystem::InstanceImplWin32 file_system_; }; -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/source/extensions/access_loggers/file/file_access_log_impl.cc b/source/extensions/access_loggers/file/file_access_log_impl.cc index e2344d2a2806b..75409f34dadcd 100644 --- a/source/extensions/access_loggers/file/file_access_log_impl.cc +++ b/source/extensions/access_loggers/file/file_access_log_impl.cc @@ -30,7 +30,7 @@ void FileAccessLog::log(const Http::HeaderMap* request_headers, } if (filter_) { - if (!filter_->evaluate(stream_info, *request_headers)) { + if (!filter_->evaluate(stream_info, *request_headers, *response_headers, *response_trailers)) { return; } } diff --git a/source/extensions/access_loggers/file/file_access_log_impl.h b/source/extensions/access_loggers/file/file_access_log_impl.h index 93bee65b0f7ad..b14b396befd8e 100644 --- a/source/extensions/access_loggers/file/file_access_log_impl.h +++ b/source/extensions/access_loggers/file/file_access_log_impl.h @@ -21,7 +21,7 @@ class FileAccessLog : public AccessLog::Instance { const StreamInfo::StreamInfo& stream_info) override; private: - Filesystem::FileSharedPtr log_file_; + AccessLog::AccessLogFileSharedPtr log_file_; AccessLog::FilterPtr filter_; AccessLog::FormatterPtr formatter_; }; diff --git a/source/extensions/access_loggers/http_grpc/BUILD b/source/extensions/access_loggers/http_grpc/BUILD index b7860b8f8dbb9..fc664d1fd46d9 100644 --- a/source/extensions/access_loggers/http_grpc/BUILD +++ b/source/extensions/access_loggers/http_grpc/BUILD @@ -31,6 +31,16 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "grpc_access_log_proto_descriptors_lib", + srcs = ["grpc_access_log_proto_descriptors.cc"], + hdrs = ["grpc_access_log_proto_descriptors.h"], + deps = [ + "//source/common/protobuf", + "@envoy_api//envoy/service/accesslog/v2:als_cc", + ], +) + envoy_cc_library( name = "config", srcs = ["config.cc"], @@ -38,8 +48,10 @@ envoy_cc_library( deps = [ "//include/envoy/registry", "//include/envoy/server:access_log_config_interface", + "//source/common/common:assert_lib", "//source/common/protobuf", "//source/extensions/access_loggers:well_known_names", "//source/extensions/access_loggers/http_grpc:grpc_access_log_lib", + "//source/extensions/access_loggers/http_grpc:grpc_access_log_proto_descriptors_lib", ], ) diff --git a/source/extensions/access_loggers/http_grpc/config.cc b/source/extensions/access_loggers/http_grpc/config.cc index 526cf65dbbdee..3fb77307ecd56 100644 --- a/source/extensions/access_loggers/http_grpc/config.cc +++ b/source/extensions/access_loggers/http_grpc/config.cc @@ -5,11 +5,13 @@ #include "envoy/registry/registry.h" #include "envoy/server/filter_config.h" +#include "common/common/assert.h" #include "common/common/macros.h" #include "common/grpc/async_client_impl.h" #include "common/protobuf/protobuf.h" #include "extensions/access_loggers/http_grpc/grpc_access_log_impl.h" +#include "extensions/access_loggers/http_grpc/grpc_access_log_proto_descriptors.h" #include "extensions/access_loggers/well_known_names.h" namespace Envoy { diff --git a/source/extensions/access_loggers/http_grpc/grpc_access_log_impl.cc b/source/extensions/access_loggers/http_grpc/grpc_access_log_impl.cc index 89b28c0dc2eec..32df710bac18d 100644 --- a/source/extensions/access_loggers/http_grpc/grpc_access_log_impl.cc +++ b/source/extensions/access_loggers/http_grpc/grpc_access_log_impl.cc @@ -87,7 +87,7 @@ void HttpGrpcAccessLog::responseFlagsToAccessLogResponseFlags( envoy::data::accesslog::v2::AccessLogCommon& common_access_log, const StreamInfo::StreamInfo& stream_info) { - static_assert(StreamInfo::ResponseFlag::LastFlag == 0x8000, + static_assert(StreamInfo::ResponseFlag::LastFlag == 0x10000, "A flag has been added. Fix this code."); if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::FailedLocalHealthCheck)) { @@ -155,6 +155,10 @@ void HttpGrpcAccessLog::responseFlagsToAccessLogResponseFlags( if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::UpstreamRetryLimitExceeded)) { common_access_log.mutable_response_flags()->set_upstream_retry_limit_exceeded(true); } + + if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::StreamIdleTimeout)) { + common_access_log.mutable_response_flags()->set_stream_idle_timeout(true); + } } void HttpGrpcAccessLog::log(const Http::HeaderMap* request_headers, @@ -173,7 +177,7 @@ void HttpGrpcAccessLog::log(const Http::HeaderMap* request_headers, } if (filter_) { - if (!filter_->evaluate(stream_info, *request_headers)) { + if (!filter_->evaluate(stream_info, *request_headers, *response_headers, *response_trailers)) { return; } } @@ -183,7 +187,6 @@ void HttpGrpcAccessLog::log(const Http::HeaderMap* request_headers, // Common log properties. // TODO(mattklein123): Populate sample_rate field. - // TODO(mattklein123): Populate tls_properties field. auto* common_properties = log_entry->mutable_common_properties(); if (stream_info.downstreamRemoteAddress() != nullptr) { @@ -196,6 +199,28 @@ void HttpGrpcAccessLog::log(const Http::HeaderMap* request_headers, *stream_info.downstreamLocalAddress(), *common_properties->mutable_downstream_local_address()); } + if (stream_info.downstreamSslConnection() != nullptr) { + auto* tls_properties = common_properties->mutable_tls_properties(); + + tls_properties->set_tls_sni_hostname(stream_info.requestedServerName()); + + auto* local_properties = tls_properties->mutable_local_certificate_properties(); + for (const auto& uri_san : stream_info.downstreamSslConnection()->uriSanLocalCertificate()) { + auto* local_san = local_properties->add_subject_alt_name(); + local_san->set_uri(uri_san); + } + local_properties->set_subject(stream_info.downstreamSslConnection()->subjectLocalCertificate()); + + auto* peer_properties = tls_properties->mutable_peer_certificate_properties(); + for (const auto& uri_san : stream_info.downstreamSslConnection()->uriSanPeerCertificate()) { + auto* peer_san = peer_properties->add_subject_alt_name(); + peer_san->set_uri(uri_san); + } + + peer_properties->set_subject(stream_info.downstreamSslConnection()->subjectPeerCertificate()); + + // TODO(snowp): Populate remaining tls_properties fields. + } common_properties->mutable_start_time()->MergeFrom( Protobuf::util::TimeUtil::NanosecondsToTimestamp( std::chrono::duration_cast( @@ -255,6 +280,10 @@ void HttpGrpcAccessLog::log(const Http::HeaderMap* request_headers, *stream_info.upstreamLocalAddress(), *common_properties->mutable_upstream_local_address()); } responseFlagsToAccessLogResponseFlags(*common_properties, stream_info); + if (!stream_info.upstreamTransportFailureReason().empty()) { + common_properties->set_upstream_transport_failure_reason( + stream_info.upstreamTransportFailureReason()); + } if (stream_info.dynamicMetadata().filter_metadata_size() > 0) { common_properties->mutable_metadata()->MergeFrom(stream_info.dynamicMetadata()); } @@ -277,28 +306,34 @@ void HttpGrpcAccessLog::log(const Http::HeaderMap* request_headers, // TODO(mattklein123): Populate port field. auto* request_properties = log_entry->mutable_request(); if (request_headers->Scheme() != nullptr) { - request_properties->set_scheme(request_headers->Scheme()->value().c_str()); + request_properties->set_scheme(std::string(request_headers->Scheme()->value().getStringView())); } if (request_headers->Host() != nullptr) { - request_properties->set_authority(request_headers->Host()->value().c_str()); + request_properties->set_authority( + std::string(request_headers->Host()->value().getStringView())); } if (request_headers->Path() != nullptr) { - request_properties->set_path(request_headers->Path()->value().c_str()); + request_properties->set_path(std::string(request_headers->Path()->value().getStringView())); } if (request_headers->UserAgent() != nullptr) { - request_properties->set_user_agent(request_headers->UserAgent()->value().c_str()); + request_properties->set_user_agent( + std::string(request_headers->UserAgent()->value().getStringView())); } if (request_headers->Referer() != nullptr) { - request_properties->set_referer(request_headers->Referer()->value().c_str()); + request_properties->set_referer( + std::string(request_headers->Referer()->value().getStringView())); } if (request_headers->ForwardedFor() != nullptr) { - request_properties->set_forwarded_for(request_headers->ForwardedFor()->value().c_str()); + request_properties->set_forwarded_for( + std::string(request_headers->ForwardedFor()->value().getStringView())); } if (request_headers->RequestId() != nullptr) { - request_properties->set_request_id(request_headers->RequestId()->value().c_str()); + request_properties->set_request_id( + std::string(request_headers->RequestId()->value().getStringView())); } if (request_headers->EnvoyOriginalPath() != nullptr) { - request_properties->set_original_path(request_headers->EnvoyOriginalPath()->value().c_str()); + request_properties->set_original_path( + std::string(request_headers->EnvoyOriginalPath()->value().getStringView())); } request_properties->set_request_headers_bytes(request_headers->byteSize()); request_properties->set_request_body_bytes(stream_info.bytesReceived()); @@ -306,7 +341,7 @@ void HttpGrpcAccessLog::log(const Http::HeaderMap* request_headers, envoy::api::v2::core::RequestMethod method = envoy::api::v2::core::RequestMethod::METHOD_UNSPECIFIED; envoy::api::v2::core::RequestMethod_Parse( - std::string(request_headers->Method()->value().c_str()), &method); + std::string(request_headers->Method()->value().getStringView()), &method); request_properties->set_request_method(method); } if (!request_headers_to_log_.empty()) { @@ -315,7 +350,7 @@ void HttpGrpcAccessLog::log(const Http::HeaderMap* request_headers, for (const auto& header : request_headers_to_log_) { const Http::HeaderEntry* entry = request_headers->get(header); if (entry != nullptr) { - logged_headers->insert({header.get(), ProtobufTypes::String(entry->value().c_str())}); + logged_headers->insert({header.get(), std::string(entry->value().getStringView())}); } } } @@ -325,6 +360,9 @@ void HttpGrpcAccessLog::log(const Http::HeaderMap* request_headers, if (stream_info.responseCode()) { response_properties->mutable_response_code()->set_value(stream_info.responseCode().value()); } + if (stream_info.responseCodeDetails()) { + response_properties->set_response_code_details(stream_info.responseCodeDetails().value()); + } response_properties->set_response_headers_bytes(response_headers->byteSize()); response_properties->set_response_body_bytes(stream_info.bytesSent()); if (!response_headers_to_log_.empty()) { @@ -333,7 +371,7 @@ void HttpGrpcAccessLog::log(const Http::HeaderMap* request_headers, for (const auto& header : response_headers_to_log_) { const Http::HeaderEntry* entry = response_headers->get(header); if (entry != nullptr) { - logged_headers->insert({header.get(), ProtobufTypes::String(entry->value().c_str())}); + logged_headers->insert({header.get(), std::string(entry->value().getStringView())}); } } } @@ -344,7 +382,7 @@ void HttpGrpcAccessLog::log(const Http::HeaderMap* request_headers, for (const auto& header : response_trailers_to_log_) { const Http::HeaderEntry* entry = response_trailers->get(header); if (entry != nullptr) { - logged_headers->insert({header.get(), ProtobufTypes::String(entry->value().c_str())}); + logged_headers->insert({header.get(), std::string(entry->value().getStringView())}); } } } diff --git a/source/extensions/access_loggers/http_grpc/grpc_access_log_proto_descriptors.cc b/source/extensions/access_loggers/http_grpc/grpc_access_log_proto_descriptors.cc new file mode 100644 index 0000000000000..3e9b3f39ce2cc --- /dev/null +++ b/source/extensions/access_loggers/http_grpc/grpc_access_log_proto_descriptors.cc @@ -0,0 +1,21 @@ +#include "extensions/access_loggers/http_grpc/grpc_access_log_proto_descriptors.h" + +#include "envoy/service/accesslog/v2/als.pb.h" + +#include "common/common/fmt.h" +#include "common/protobuf/protobuf.h" + +namespace Envoy { +namespace Extensions { +namespace AccessLoggers { +namespace HttpGrpc { + +bool validateProtoDescriptors() { + const auto method = "envoy.service.accesslog.v2.AccessLogService.StreamAccessLogs"; + + return Protobuf::DescriptorPool::generated_pool()->FindMethodByName(method) != nullptr; +}; +} // namespace HttpGrpc +} // namespace AccessLoggers +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/access_loggers/http_grpc/grpc_access_log_proto_descriptors.h b/source/extensions/access_loggers/http_grpc/grpc_access_log_proto_descriptors.h new file mode 100644 index 0000000000000..55b1eda0d5bc7 --- /dev/null +++ b/source/extensions/access_loggers/http_grpc/grpc_access_log_proto_descriptors.h @@ -0,0 +1,14 @@ +#pragma once + +namespace Envoy { +namespace Extensions { +namespace AccessLoggers { +namespace HttpGrpc { + +// This function validates that the method descriptors for gRPC services and type descriptors that +// are referenced in Any messages are available in the descriptor pool. +bool validateProtoDescriptors(); +} // namespace HttpGrpc +} // namespace AccessLoggers +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/access_loggers/wasm/wasm_access_log_impl.h b/source/extensions/access_loggers/wasm/wasm_access_log_impl.h index 03ed960c2c4b2..e302d5882e790 100644 --- a/source/extensions/access_loggers/wasm/wasm_access_log_impl.h +++ b/source/extensions/access_loggers/wasm/wasm_access_log_impl.h @@ -19,8 +19,9 @@ class WasmAccessLog : public AccessLog::Instance { void log(const Http::HeaderMap* request_headers, const Http::HeaderMap* response_headers, const Http::HeaderMap* response_trailers, const StreamInfo::StreamInfo& stream_info) override { - if (filter_ && request_headers) { - if (!filter_->evaluate(stream_info, *request_headers)) { + if (filter_ && request_headers && response_headers && response_trailers) { + if (!filter_->evaluate(stream_info, *request_headers, *response_headers, + *response_trailers)) { return; } } diff --git a/source/common/thread/BUILD b/source/extensions/clusters/BUILD similarity index 50% rename from source/common/thread/BUILD rename to source/extensions/clusters/BUILD index 8ba062af10205..7a4780afbdab2 100644 --- a/source/common/thread/BUILD +++ b/source/extensions/clusters/BUILD @@ -9,10 +9,10 @@ load( envoy_package() envoy_cc_library( - name = "thread_factory_singleton_lib", - srcs = ["thread_factory_singleton.cc"], + name = "well_known_names", + hdrs = ["well_known_names.h"], deps = [ - "//include/envoy/thread:thread_interface", - "//source/common/common:assert_lib", + "//source/common/config:well_known_names", + "//source/common/singleton:const_singleton", ], ) diff --git a/source/extensions/clusters/redis/BUILD b/source/extensions/clusters/redis/BUILD new file mode 100644 index 0000000000000..7248e397168b6 --- /dev/null +++ b/source/extensions/clusters/redis/BUILD @@ -0,0 +1,40 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "redis_cluster", + srcs = [ + "redis_cluster.cc", + "redis_cluster.h", + ], + deps = [ + "//include/envoy/api:api_interface", + "//include/envoy/http:codec_interface", + "//include/envoy/upstream:cluster_factory_interface", + "//include/envoy/upstream:cluster_manager_interface", + "//include/envoy/upstream:upstream_interface", + "//source/common/config:metadata_lib", + "//source/common/event:dispatcher_lib", + "//source/common/json:config_schemas_lib", + "//source/common/json:json_loader_lib", + "//source/common/network:utility_lib", + "//source/common/singleton:manager_impl_lib", + "//source/common/upstream:cluster_factory_lib", + "//source/common/upstream:upstream_includes", + "//source/common/upstream:upstream_lib", + "//source/extensions/clusters:well_known_names", + "//source/extensions/filters/network/common/redis:client_interface", + "//source/extensions/filters/network/common/redis:client_lib", + "//source/extensions/filters/network/common/redis:codec_interface", + "//source/extensions/transport_sockets/raw_buffer:config", + "//source/server:transport_socket_config_lib", + "@envoy_api//envoy/config/cluster/redis:redis_cluster_cc", + ], +) diff --git a/source/extensions/clusters/redis/redis_cluster.cc b/source/extensions/clusters/redis/redis_cluster.cc new file mode 100644 index 0000000000000..96c64714211c9 --- /dev/null +++ b/source/extensions/clusters/redis/redis_cluster.cc @@ -0,0 +1,313 @@ +#include "redis_cluster.h" + +#include + +namespace Envoy { +namespace Extensions { +namespace Clusters { +namespace Redis { + +RedisCluster::RedisCluster( + const envoy::api::v2::Cluster& cluster, + const envoy::config::cluster::redis::RedisClusterConfig& redisCluster, + NetworkFilters::Common::Redis::Client::ClientFactory& redis_client_factory, + Upstream::ClusterManager& clusterManager, Runtime::Loader& runtime, + Network::DnsResolverSharedPtr dns_resolver, + Server::Configuration::TransportSocketFactoryContext& factory_context, + Stats::ScopePtr&& stats_scope, bool added_via_api) + : Upstream::BaseDynamicClusterImpl(cluster, runtime, factory_context, std::move(stats_scope), + added_via_api), + cluster_manager_(clusterManager), + cluster_refresh_rate_(std::chrono::milliseconds( + PROTOBUF_GET_MS_OR_DEFAULT(redisCluster, cluster_refresh_rate, 5000))), + cluster_refresh_timeout_(std::chrono::milliseconds( + PROTOBUF_GET_MS_OR_DEFAULT(redisCluster, cluster_refresh_timeout, 3000))), + dispatcher_(factory_context.dispatcher()), dns_resolver_(std::move(dns_resolver)), + dns_lookup_family_(Upstream::getDnsLookupFamilyFromCluster(cluster)), + load_assignment_(cluster.has_load_assignment() + ? cluster.load_assignment() + : Config::Utility::translateClusterHosts(cluster.hosts())), + local_info_(factory_context.localInfo()), random_(factory_context.random()), + redis_discovery_session_(*this, redis_client_factory) { + const auto& locality_lb_endpoints = load_assignment_.endpoints(); + for (const auto& locality_lb_endpoint : locality_lb_endpoints) { + for (const auto& lb_endpoint : locality_lb_endpoint.lb_endpoints()) { + const auto& host = lb_endpoint.endpoint().address(); + dns_discovery_resolve_targets_.emplace_back(new DnsDiscoveryResolveTarget( + *this, host.socket_address().address(), host.socket_address().port_value(), + locality_lb_endpoint, lb_endpoint)); + } + } +}; + +void RedisCluster::startPreInit() { + for (const DnsDiscoveryResolveTargetPtr& target : dns_discovery_resolve_targets_) { + target->startResolve(); + } +} + +void RedisCluster::updateAllHosts(const Upstream::HostVector& hosts_added, + const Upstream::HostVector& hosts_removed, + uint32_t current_priority) { + Upstream::PriorityStateManager priority_state_manager(*this, local_info_, nullptr); + + auto locality_lb_endpoint = localityLbEndpoint(); + priority_state_manager.initializePriorityFor(locality_lb_endpoint); + for (const Upstream::HostSharedPtr& host : hosts_) { + if (locality_lb_endpoint.priority() == current_priority) { + priority_state_manager.registerHostForPriority(host, locality_lb_endpoint); + } + } + + priority_state_manager.updateClusterPrioritySet( + current_priority, std::move(priority_state_manager.priorityState()[current_priority].first), + hosts_added, hosts_removed, absl::nullopt); +} + +void RedisCluster::onClusterSlotUpdate(const std::vector& slots) { + Upstream::HostVector new_hosts; + SlotArray slots_; + + for (const ClusterSlot& slot : slots) { + new_hosts.emplace_back(new RedisHost(info(), "", slot.master_, *this, true)); + } + + std::unordered_map updated_hosts; + Upstream::HostVector hosts_added; + Upstream::HostVector hosts_removed; + if (updateDynamicHostList(new_hosts, hosts_, hosts_added, hosts_removed, updated_hosts, + all_hosts_)) { + ASSERT(std::all_of(hosts_.begin(), hosts_.end(), [&](const auto& host) { + return host->priority() == localityLbEndpoint().priority(); + })); + updateAllHosts(hosts_added, hosts_removed, localityLbEndpoint().priority()); + } else { + info_->stats().update_no_rebuild_.inc(); + } + + for (const ClusterSlot& slot : slots) { + auto host = updated_hosts.find(slot.master_->asString()); + ASSERT(host != updated_hosts.end(), "we expect all address to be found in the updated_hosts"); + for (auto i = slot.start_; i <= slot.end_; ++i) { + slots_[i] = host->second; + } + } + + all_hosts_ = std::move(updated_hosts); + cluster_slots_map_.swap(slots_); + + // TODO(hyang): If there is an initialize callback, fire it now. Note that if the + // cluster refers to multiple DNS names, this will return initialized after a single + // DNS resolution completes. This is not perfect but is easier to code and it is unclear + // if the extra complexity is needed so will start with this. + onPreInitComplete(); +} + +// DnsDiscoveryResolveTarget +RedisCluster::DnsDiscoveryResolveTarget::DnsDiscoveryResolveTarget( + RedisCluster& parent, const std::string& dns_address, const uint32_t port, + const envoy::api::v2::endpoint::LocalityLbEndpoints& locality_lb_endpoint, + const envoy::api::v2::endpoint::LbEndpoint& lb_endpoint) + : parent_(parent), dns_address_(dns_address), port_(port), + locality_lb_endpoint_(locality_lb_endpoint), lb_endpoint_(lb_endpoint) {} + +RedisCluster::DnsDiscoveryResolveTarget::~DnsDiscoveryResolveTarget() { + if (active_query_) { + active_query_->cancel(); + } +} + +void RedisCluster::DnsDiscoveryResolveTarget::startResolve() { + ENVOY_LOG(trace, "starting async DNS resolution for {}", dns_address_); + + active_query_ = parent_.dns_resolver_->resolve( + dns_address_, parent_.dns_lookup_family_, + [this](const std::list&& address_list) -> void { + active_query_ = nullptr; + ENVOY_LOG(trace, "async DNS resolution complete for {}", dns_address_); + parent_.redis_discovery_session_.registerDiscoveryAddress(address_list, port_); + parent_.redis_discovery_session_.startResolve(); + }); +} + +// RedisCluster +RedisCluster::RedisDiscoverySession::RedisDiscoverySession( + Envoy::Extensions::Clusters::Redis::RedisCluster& parent, + NetworkFilters::Common::Redis::Client::ClientFactory& client_factory) + : parent_(parent), dispatcher_(parent.dispatcher_), + resolve_timer_(parent.dispatcher_.createTimer([this]() -> void { startResolve(); })), + client_factory_(client_factory), buffer_timeout_(0) {} + +namespace { +// Convert the cluster slot IP/Port response to and address, return null if the response does not +// match the expected type. +Network::Address::InstanceConstSharedPtr +ProcessCluster(const NetworkFilters::Common::Redis::RespValue& value) { + if (value.type() != NetworkFilters::Common::Redis::RespType::Array) { + return nullptr; + } + auto& array = value.asArray(); + + if (array.size() < 2 || array[0].type() != NetworkFilters::Common::Redis::RespType::BulkString || + array[1].type() != NetworkFilters::Common::Redis::RespType::Integer) { + return nullptr; + } + + std::string address = array[0].asString(); + bool ipv6 = (address.find(":") != std::string::npos); + if (ipv6) { + return std::make_shared(address, array[1].asInteger()); + } + return std::make_shared(address, array[1].asInteger()); +} +} // namespace + +RedisCluster::RedisDiscoverySession::~RedisDiscoverySession() { + if (current_request_) { + current_request_->cancel(); + current_request_ = nullptr; + } + + while (!client_map_.empty()) { + client_map_.begin()->second->client_->close(); + } +} + +void RedisCluster::RedisDiscoveryClient::onEvent(Network::ConnectionEvent event) { + if (event == Network::ConnectionEvent::RemoteClose || + event == Network::ConnectionEvent::LocalClose) { + auto client_to_delete = parent_.client_map_.find(host_); + ASSERT(client_to_delete != parent_.client_map_.end()); + parent_.dispatcher_.deferredDelete(std::move(client_to_delete->second->client_)); + parent_.client_map_.erase(client_to_delete); + } +} + +void RedisCluster::RedisDiscoverySession::registerDiscoveryAddress( + const std::list& address_list, + const uint32_t port) { + // Since the address from DNS does not have port, we need to make a new address that has port in + // it. + for (const Network::Address::InstanceConstSharedPtr& address : address_list) { + ASSERT(address != nullptr); + discovery_address_list_.push_back(Network::Utility::getAddressWithPort(*address, port)); + } +} + +void RedisCluster::RedisDiscoverySession::startResolve() { + parent_.info_->stats().update_attempt_.inc(); + // If a resolution is currently in progress, skip it. + if (current_request_) { + return; + } + + // If hosts is empty, we haven't received a successful result from the CLUSTER SLOTS call yet. + // So, pick a random discovery address from dns and make a request. + Upstream::HostSharedPtr host; + if (parent_.hosts_.empty()) { + const int rand_idx = parent_.random_.random() % discovery_address_list_.size(); + auto it = discovery_address_list_.begin(); + std::next(it, rand_idx); + host = Upstream::HostSharedPtr{new RedisHost(parent_.info(), "", *it, parent_, true)}; + } else { + const int rand_idx = parent_.random_.random() % parent_.hosts_.size(); + host = parent_.hosts_[rand_idx]; + } + + current_host_address_ = host->address()->asString(); + RedisDiscoveryClientPtr& client = client_map_[current_host_address_]; + if (!client) { + client = std::make_unique(*this); + client->host_ = current_host_address_; + client->client_ = client_factory_.create(host, dispatcher_, *this); + client->client_->addConnectionCallbacks(*client); + } + + current_request_ = client->client_->makeRequest(ClusterSlotsRequest::instance_, *this); +} + +void RedisCluster::RedisDiscoverySession::onResponse( + NetworkFilters::Common::Redis::RespValuePtr&& value) { + current_request_ = nullptr; + + // Do nothing if the cluster is empty. + if (value->type() != NetworkFilters::Common::Redis::RespType::Array || value->asArray().empty()) { + onUnexpectedResponse(value); + return; + } + + std::vector slots_; + + // Loop through the cluster slot response and error checks for each field. + for (const NetworkFilters::Common::Redis::RespValue& part : value->asArray()) { + if (part.type() != NetworkFilters::Common::Redis::RespType::Array) { + onUnexpectedResponse(value); + return; + } + const std::vector& slot_range = part.asArray(); + if (slot_range.size() < 3 || + slot_range[0].type() != + NetworkFilters::Common::Redis::RespType::Integer || // Start slot range is an integer. + slot_range[1].type() != + NetworkFilters::Common::Redis::RespType::Integer) { // End slot range is an integer. + onUnexpectedResponse(value); + return; + } + + // Field 2: Master address for slot range + // TODO(hyang): For now we're only adding the master node for each slot. When we're ready to + // send requests to replica nodes, we need to add subsequent address in the response as + // replica nodes. + auto master_address = ProcessCluster(slot_range[2]); + if (!master_address) { + onUnexpectedResponse(value); + return; + } + slots_.emplace_back(slot_range[0].asInteger(), slot_range[1].asInteger(), master_address); + } + + parent_.onClusterSlotUpdate(slots_); + resolve_timer_->enableTimer(parent_.cluster_refresh_rate_); +} + +void RedisCluster::RedisDiscoverySession::onUnexpectedResponse( + const NetworkFilters::Common::Redis::RespValuePtr& value) { + ENVOY_LOG(warn, "Unexpected response to cluster slot command: {}", value->toString()); + this->parent_.info_->stats().update_failure_.inc(); + resolve_timer_->enableTimer(parent_.cluster_refresh_rate_); +} + +void RedisCluster::RedisDiscoverySession::onFailure() { + current_request_ = nullptr; + if (!current_host_address_.empty()) { + auto client_to_delete = client_map_.find(current_host_address_); + client_to_delete->second->client_->close(); + } + parent_.info()->stats().update_failure_.inc(); + resolve_timer_->enableTimer(parent_.cluster_refresh_rate_); +} + +RedisCluster::ClusterSlotsRequest RedisCluster::ClusterSlotsRequest::instance_; + +Upstream::ClusterImplBaseSharedPtr RedisClusterFactory::createClusterWithConfig( + const envoy::api::v2::Cluster& cluster, + const envoy::config::cluster::redis::RedisClusterConfig& proto_config, + Upstream::ClusterFactoryContext& context, + Envoy::Server::Configuration::TransportSocketFactoryContext& socket_factory_context, + Envoy::Stats::ScopePtr&& stats_scope) { + if (!cluster.has_cluster_type() || + cluster.cluster_type().name() != Extensions::Clusters::ClusterTypes::get().Redis) { + throw EnvoyException("Redis cluster can only created with redis cluster type"); + } + return std::make_shared( + cluster, proto_config, NetworkFilters::Common::Redis::Client::ClientFactoryImpl::instance_, + context.clusterManager(), context.runtime(), selectDnsResolver(cluster, context), + socket_factory_context, std::move(stats_scope), context.addedViaApi()); +} + +REGISTER_FACTORY(RedisClusterFactory, Upstream::ClusterFactory); + +} // namespace Redis +} // namespace Clusters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/clusters/redis/redis_cluster.h b/source/extensions/clusters/redis/redis_cluster.h new file mode 100644 index 0000000000000..9c432298acda9 --- /dev/null +++ b/source/extensions/clusters/redis/redis_cluster.h @@ -0,0 +1,285 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "envoy/api/api.h" +#include "envoy/api/v2/cds.pb.h" +#include "envoy/api/v2/core/base.pb.h" +#include "envoy/api/v2/endpoint/endpoint.pb.h" +#include "envoy/config/cluster/redis/redis_cluster.pb.h" +#include "envoy/config/cluster/redis/redis_cluster.pb.validate.h" +#include "envoy/config/typed_metadata.h" +#include "envoy/event/dispatcher.h" +#include "envoy/event/timer.h" +#include "envoy/http/codec.h" +#include "envoy/local_info/local_info.h" +#include "envoy/network/dns.h" +#include "envoy/runtime/runtime.h" +#include "envoy/secret/secret_manager.h" +#include "envoy/server/transport_socket_config.h" +#include "envoy/ssl/context_manager.h" +#include "envoy/stats/scope.h" +#include "envoy/thread_local/thread_local.h" +#include "envoy/upstream/cluster_manager.h" +#include "envoy/upstream/health_checker.h" +#include "envoy/upstream/load_balancer.h" +#include "envoy/upstream/locality.h" +#include "envoy/upstream/upstream.h" + +#include "common/common/callback_impl.h" +#include "common/common/enum_to_int.h" +#include "common/common/logger.h" +#include "common/config/metadata.h" +#include "common/config/well_known_names.h" +#include "common/network/address_impl.h" +#include "common/network/utility.h" +#include "common/stats/isolated_store_impl.h" +#include "common/upstream/cluster_factory_impl.h" +#include "common/upstream/load_balancer_impl.h" +#include "common/upstream/outlier_detection_impl.h" +#include "common/upstream/resource_manager_impl.h" +#include "common/upstream/upstream_impl.h" + +#include "server/transport_socket_config_impl.h" + +#include "extensions/clusters/well_known_names.h" +#include "extensions/filters/network/common/redis/client.h" +#include "extensions/filters/network/common/redis/client_impl.h" +#include "extensions/filters/network/common/redis/codec.h" + +namespace Envoy { +namespace Extensions { +namespace Clusters { +namespace Redis { + +/* + * This class implements support for the topology part of `Redis Cluster + * `_. Specifically, it allows Envoy to maintain an internal + * representation of the topology of a Redis Cluster, and how often the topology should be + * refreshed. + * + * The target Redis Cluster is obtained from the yaml config file as usual, and we choose a random + * discovery address from DNS if there are no existing hosts (our startup condition). Otherwise, we + * choose a random host from our known set of hosts. Then, against this host we make a topology + * request. + * + * Topology requests are handled by RedisDiscoverySession, which handles the initialization of + * the `CLUSTER SLOTS command `_, and the responses and + * failure cases. + * + * The topology is stored in cluster_slots_map_. According to the + * `Redis Cluster Spec SlotArray; + +class RedisCluster : public Upstream::BaseDynamicClusterImpl { +public: + RedisCluster(const envoy::api::v2::Cluster& cluster, + const envoy::config::cluster::redis::RedisClusterConfig& redisCluster, + NetworkFilters::Common::Redis::Client::ClientFactory& client_factory, + Upstream::ClusterManager& clusterManager, Runtime::Loader& runtime, + Network::DnsResolverSharedPtr dns_resolver, + Server::Configuration::TransportSocketFactoryContext& factory_context, + Stats::ScopePtr&& stats_scope, bool added_via_api); + + struct ClusterSlotsRequest : public Extensions::NetworkFilters::Common::Redis::RespValue { + public: + ClusterSlotsRequest() : Extensions::NetworkFilters::Common::Redis::RespValue() { + type(Extensions::NetworkFilters::Common::Redis::RespType::Array); + std::vector values(2); + values[0].type(NetworkFilters::Common::Redis::RespType::BulkString); + values[0].asString() = "CLUSTER"; + values[1].type(NetworkFilters::Common::Redis::RespType::BulkString); + values[1].asString() = "SLOTS"; + asArray().swap(values); + } + static ClusterSlotsRequest instance_; + }; + + InitializePhase initializePhase() const override { return InitializePhase::Primary; } + +private: + friend class RedisClusterTest; + + void startPreInit() override; + + void updateAllHosts(const Upstream::HostVector& hosts_added, + const Upstream::HostVector& hosts_removed, uint32_t priority); + + struct ClusterSlot { + ClusterSlot(int64_t start, int64_t end, Network::Address::InstanceConstSharedPtr master) + : start_(start), end_(end), master_(std::move(master)) {} + + int64_t start_; + int64_t end_; + Network::Address::InstanceConstSharedPtr master_; + }; + + void onClusterSlotUpdate(const std::vector&); + + const envoy::api::v2::endpoint::LocalityLbEndpoints& localityLbEndpoint() const { + // Always use the first endpoint. + return load_assignment_.endpoints()[0]; + } + + const envoy::api::v2::endpoint::LbEndpoint& lbEndpoint() const { + // Always use the first endpoint. + return localityLbEndpoint().lb_endpoints()[0]; + } + + // A redis node in the Redis cluster. + class RedisHost : public Upstream::HostImpl { + public: + RedisHost(Upstream::ClusterInfoConstSharedPtr cluster, const std::string& hostname, + Network::Address::InstanceConstSharedPtr address, RedisCluster& parent, bool master) + : Upstream::HostImpl(cluster, hostname, address, parent.lbEndpoint().metadata(), + parent.lbEndpoint().load_balancing_weight().value(), + parent.localityLbEndpoint().locality(), + parent.lbEndpoint().endpoint().health_check_config(), + parent.localityLbEndpoint().priority(), + parent.lbEndpoint().health_status()), + master_(master) {} + + bool isMaster() const { return master_; } + + private: + const bool master_; + }; + + // Resolves the discovery endpoint. + struct DnsDiscoveryResolveTarget { + DnsDiscoveryResolveTarget( + RedisCluster& parent, const std::string& dns_address, const uint32_t port, + const envoy::api::v2::endpoint::LocalityLbEndpoints& locality_lb_endpoint, + const envoy::api::v2::endpoint::LbEndpoint& lb_endpoint); + + ~DnsDiscoveryResolveTarget(); + + void startResolve(); + + RedisCluster& parent_; + Network::ActiveDnsQuery* active_query_{}; + const std::string dns_address_; + const uint32_t port_; + const envoy::api::v2::endpoint::LocalityLbEndpoints locality_lb_endpoint_; + const envoy::api::v2::endpoint::LbEndpoint lb_endpoint_; + }; + + typedef std::unique_ptr DnsDiscoveryResolveTargetPtr; + + struct RedisDiscoverySession; + + struct RedisDiscoveryClient : public Network::ConnectionCallbacks { + RedisDiscoveryClient(RedisDiscoverySession& parent) : parent_(parent) {} + + // Network::ConnectionCallbacks + void onEvent(Network::ConnectionEvent event) override; + void onAboveWriteBufferHighWatermark() override {} + void onBelowWriteBufferLowWatermark() override {} + + RedisDiscoverySession& parent_; + std::string host_; + Extensions::NetworkFilters::Common::Redis::Client::ClientPtr client_; + }; + + typedef std::unique_ptr RedisDiscoveryClientPtr; + + struct RedisDiscoverySession + : public Extensions::NetworkFilters::Common::Redis::Client::Config, + public Extensions::NetworkFilters::Common::Redis::Client::PoolCallbacks { + RedisDiscoverySession(RedisCluster& parent, + NetworkFilters::Common::Redis::Client::ClientFactory& client_factory); + + ~RedisDiscoverySession(); + + void registerDiscoveryAddress( + const std::list& address_list, + const uint32_t port); + + // Start discovery against a random host from existing hosts + void startResolve(); + + // Extensions::NetworkFilters::Common::Redis::Client::Config + bool disableOutlierEvents() const override { return true; } + std::chrono::milliseconds opTimeout() const override { + // Allow the main Health Check infra to control timeout. + return parent_.cluster_refresh_timeout_; + } + bool enableHashtagging() const override { return false; } + bool enableRedirection() const override { return false; } + uint32_t maxBufferSizeBeforeFlush() const override { return 0; } + std::chrono::milliseconds bufferFlushTimeoutInMs() const override { return buffer_timeout_; } + + // Extensions::NetworkFilters::Common::Redis::Client::PoolCallbacks + void onResponse(NetworkFilters::Common::Redis::RespValuePtr&& value) override; + void onFailure() override; + // Note: Below callback isn't used in topology updates + bool onRedirection(const NetworkFilters::Common::Redis::RespValue&) override { return true; } + void onUnexpectedResponse(const NetworkFilters::Common::Redis::RespValuePtr&); + + RedisCluster& parent_; + Event::Dispatcher& dispatcher_; + std::string current_host_address_; + Extensions::NetworkFilters::Common::Redis::Client::PoolRequest* current_request_{}; + std::unordered_map client_map_; + + std::list discovery_address_list_; + + Event::TimerPtr resolve_timer_; + NetworkFilters::Common::Redis::Client::ClientFactory& client_factory_; + const std::chrono::milliseconds buffer_timeout_; + }; + + Upstream::ClusterManager& cluster_manager_; + const std::chrono::milliseconds cluster_refresh_rate_; + const std::chrono::milliseconds cluster_refresh_timeout_; + std::list dns_discovery_resolve_targets_; + Event::Dispatcher& dispatcher_; + Network::DnsResolverSharedPtr dns_resolver_; + Network::DnsLookupFamily dns_lookup_family_; + const envoy::api::v2::ClusterLoadAssignment load_assignment_; + const LocalInfo::LocalInfo& local_info_; + Runtime::RandomGenerator& random_; + RedisDiscoverySession redis_discovery_session_; + // The slot to master node map. + SlotArray cluster_slots_map_; + + Upstream::HostVector hosts_; + Upstream::HostMap all_hosts_; +}; + +class RedisClusterFactory : public Upstream::ConfigurableClusterFactoryBase< + envoy::config::cluster::redis::RedisClusterConfig> { +public: + RedisClusterFactory() + : ConfigurableClusterFactoryBase(Extensions::Clusters::ClusterTypes::get().Redis) {} + +private: + friend class RedisClusterTest; + + Upstream::ClusterImplBaseSharedPtr createClusterWithConfig( + const envoy::api::v2::Cluster& cluster, + const envoy::config::cluster::redis::RedisClusterConfig& proto_config, + Upstream::ClusterFactoryContext& context, + Server::Configuration::TransportSocketFactoryContext& socket_factory_context, + Stats::ScopePtr&& stats_scope) override; +}; +} // namespace Redis +} // namespace Clusters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/clusters/well_known_names.h b/source/extensions/clusters/well_known_names.h new file mode 100644 index 0000000000000..b074bdd644bc9 --- /dev/null +++ b/source/extensions/clusters/well_known_names.h @@ -0,0 +1,40 @@ +#pragma once + +#include "common/config/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace Clusters { + +/** + * Well-known cluster types, this supersede the service discovery types + */ +class ClusterTypeValues { +public: + // Static clusters (cluster that have a fixed number of hosts with resolved IP addresses). + const std::string Static = "envoy.cluster.static"; + + // Strict DNS (cluster that periodic DNS resolution and updates the host member set if the DNS + // members change). + const std::string StrictDns = "envoy.cluster.strict_dns"; + + // Logical DNS (cluster that creates a single logical host that wraps an async DNS resolver). + const std::string LogicalDns = "envoy.cluster.logical_dns"; + + // Endpoint Discovery Service (dynamic cluster that reads host information from the Endpoint + // Discovery Service). + const std::string Eds = "envoy.cluster.eds"; + + // Original destination (dynamic cluster that automatically adds hosts as needed based on the + // original destination address of the downstream connection). + const std::string OriginalDst = "envoy.cluster.original_dst"; + + // Redis cluster (cluster that reads host information using the redis cluster protocol). + const std::string Redis = "envoy.clusters.redis"; +}; + +using ClusterTypes = ConstSingleton; + +} // namespace Clusters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/common/tap/BUILD b/source/extensions/common/tap/BUILD index 7629cb59b365c..92093202cadf0 100644 --- a/source/extensions/common/tap/BUILD +++ b/source/extensions/common/tap/BUILD @@ -12,6 +12,7 @@ envoy_cc_library( name = "tap_interface", hdrs = ["tap.h"], deps = [ + ":tap_matcher", "//include/envoy/http:header_map_interface", "@envoy_api//envoy/data/tap/v2alpha:wrapper_cc", "@envoy_api//envoy/service/tap/v2alpha:common_cc", diff --git a/source/extensions/common/tap/admin.cc b/source/extensions/common/tap/admin.cc index d298251a12e25..56face12bc0b6 100644 --- a/source/extensions/common/tap/admin.cc +++ b/source/extensions/common/tap/admin.cc @@ -4,6 +4,7 @@ #include "envoy/admin/v2alpha/tap.pb.validate.h" #include "common/buffer/buffer_impl.h" +#include "common/protobuf/utility.h" namespace Envoy { namespace Extensions { @@ -98,16 +99,32 @@ void AdminHandler::unregisterConfig(ExtensionConfig& config) { } } -void AdminHandler::submitBufferedTrace( - std::shared_ptr trace, uint64_t) { +void AdminHandler::AdminPerTapSinkHandle::submitTrace( + TraceWrapperPtr&& trace, envoy::service::tap::v2alpha::OutputSink::Format format) { ENVOY_LOG(debug, "admin submitting buffered trace to main thread"); - main_thread_dispatcher_.post([this, trace]() { - if (attached_request_.has_value()) { - ENVOY_LOG(debug, "admin writing buffered trace to response"); - Buffer::OwnedImpl json_trace{MessageUtil::getJsonStringFromMessage(*trace, true, true)}; - attached_request_.value().admin_stream_->getDecoderFilterCallbacks().encodeData(json_trace, - false); + // Convert to a shared_ptr, so we can send it to the main thread. + std::shared_ptr shared_trace{std::move(trace)}; + // The handle can be destroyed before the cross thread post is complete. Thus, we capture a + // reference to our parent. + parent_.main_thread_dispatcher_.post([& parent = parent_, trace = shared_trace, format]() { + if (!parent.attached_request_.has_value()) { + return; } + + std::string output_string; + switch (format) { + case envoy::service::tap::v2alpha::OutputSink::JSON_BODY_AS_STRING: + case envoy::service::tap::v2alpha::OutputSink::JSON_BODY_AS_BYTES: + output_string = MessageUtil::getJsonStringFromMessage(*trace, true, true); + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + + ENVOY_LOG(debug, "admin writing buffered trace to response"); + Buffer::OwnedImpl output_buffer{output_string}; + parent.attached_request_.value().admin_stream_->getDecoderFilterCallbacks().encodeData( + output_buffer, false); }); } diff --git a/source/extensions/common/tap/admin.h b/source/extensions/common/tap/admin.h index 0394f242cc0a9..be611ed0d4a6c 100644 --- a/source/extensions/common/tap/admin.h +++ b/source/extensions/common/tap/admin.h @@ -50,10 +50,21 @@ class AdminHandler : public Singleton::Instance, void unregisterConfig(ExtensionConfig& config); // Extensions::Common::Tap::Sink - void submitBufferedTrace(std::shared_ptr trace, - uint64_t trace_id) override; + PerTapSinkHandlePtr createPerTapSinkHandle(uint64_t) override { + return std::make_unique(*this); + } private: + struct AdminPerTapSinkHandle : public PerTapSinkHandle { + AdminPerTapSinkHandle(AdminHandler& parent) : parent_(parent) {} + + // Extensions::Common::Tap::PerTapSinkHandle + void submitTrace(TraceWrapperPtr&& trace, + envoy::service::tap::v2alpha::OutputSink::Format format) override; + + AdminHandler& parent_; + }; + struct AttachedRequest { AttachedRequest(std::string config_id, Server::AdminStream* admin_stream) : config_id_(std::move(config_id)), admin_stream_(admin_stream) {} diff --git a/source/extensions/common/tap/extension_config_base.cc b/source/extensions/common/tap/extension_config_base.cc index 83c514a357c9d..e72ce4458a2b9 100644 --- a/source/extensions/common/tap/extension_config_base.cc +++ b/source/extensions/common/tap/extension_config_base.cc @@ -29,7 +29,9 @@ ExtensionConfigBase::ExtensionConfigBase( ENVOY_LOG(debug, "initializing tap extension with static config"); break; } - default: { NOT_REACHED_GCOVR_EXCL_LINE; } + default: { + NOT_REACHED_GCOVR_EXCL_LINE; + } } } diff --git a/source/extensions/common/tap/tap.h b/source/extensions/common/tap/tap.h index 79a3270ed7c4e..30505f2d750d1 100644 --- a/source/extensions/common/tap/tap.h +++ b/source/extensions/common/tap/tap.h @@ -5,6 +5,8 @@ #include "envoy/http/header_map.h" #include "envoy/service/tap/v2alpha/common.pb.h" +#include "extensions/common/tap/tap_matcher.h" + #include "absl/strings/string_view.h" namespace Envoy { @@ -12,6 +14,48 @@ namespace Extensions { namespace Common { namespace Tap { +using TraceWrapperPtr = std::unique_ptr; +inline TraceWrapperPtr makeTraceWrapper() { + return std::make_unique(); +} + +/** + * A handle for a per-tap sink. This allows submitting either a single buffered trace, or a series + * of trace segments that the sink can aggregate in whatever way it chooses. + */ +class PerTapSinkHandle { +public: + virtual ~PerTapSinkHandle() = default; + + /** + * Send a trace wrapper to the sink. This may be a fully buffered trace or a segment of a larger + * trace depending on the contents of the wrapper. + * @param trace supplies the trace to send. + * @param format supplies the output format to use. + */ + virtual void submitTrace(TraceWrapperPtr&& trace, + envoy::service::tap::v2alpha::OutputSink::Format format) PURE; +}; + +using PerTapSinkHandlePtr = std::unique_ptr; + +/** + * Wraps potentially multiple PerTapSinkHandle instances and any common pre-submit functionality. + * Each active tap will have a reference to one of these, which in turn may have references to + * one or more PerTapSinkHandle. + */ +class PerTapSinkHandleManager { +public: + virtual ~PerTapSinkHandleManager() = default; + + /** + * Submit a buffered or streamed trace segment to all managed per-tap sink handles. + */ + virtual void submitTrace(TraceWrapperPtr&& trace) PURE; +}; + +using PerTapSinkHandleManagerPtr = std::unique_ptr; + /** * Sink for sending tap messages. */ @@ -20,14 +64,10 @@ class Sink { virtual ~Sink() = default; /** - * Send a fully buffered trace to the sink. - * @param trace supplies the trace to send. The trace message is a discrete trace message (as - * opposed to a portion of a larger trace that should be aggregated). + * Create a per tap sink handle for use in submitting either buffered traces or trace segments. * @param trace_id supplies a locally unique trace ID. Some sinks use this for output generation. */ - virtual void - submitBufferedTrace(std::shared_ptr trace, - uint64_t trace_id) PURE; + virtual PerTapSinkHandlePtr createPerTapSinkHandle(uint64_t trace_id) PURE; }; using SinkPtr = std::unique_ptr; @@ -63,11 +103,45 @@ class ExtensionConfig { }; /** - * Abstract tap configuration base class. Used for type safety. + * Abstract tap configuration base class. */ class TapConfig { public: virtual ~TapConfig() = default; + + /** + * Return a per-tap sink handle manager for use by a tap session. + * @param trace_id supplies a locally unique trace ID. Some sinks use this for output generation. + */ + virtual PerTapSinkHandleManagerPtr createPerTapSinkHandleManager(uint64_t trace_id) PURE; + + /** + * Return the maximum received bytes that can be buffered in memory. Streaming taps are still + * subject to this limit depending on match status. + */ + virtual uint32_t maxBufferedRxBytes() const PURE; + + /** + * Return the maximum transmitted bytes that can be buffered in memory. Streaming taps are still + * subject to this limit depending on match status. + */ + virtual uint32_t maxBufferedTxBytes() const PURE; + + /** + * Return a new match status vector that is correctly sized for the number of matchers that are in + * the configuration. + */ + virtual Matcher::MatchStatusVector createMatchStatusVector() const PURE; + + /** + * Return the root matcher for use in updating a match status vector. + */ + virtual const Matcher& rootMatcher() const PURE; + + /** + * Return whether the tap session should run in streaming or buffering mode. + */ + virtual bool streaming() const PURE; }; using TapConfigSharedPtr = std::shared_ptr; diff --git a/source/extensions/common/tap/tap_config_base.cc b/source/extensions/common/tap/tap_config_base.cc index e20bd37cec008..85aa6bf1f69ce 100644 --- a/source/extensions/common/tap/tap_config_base.cc +++ b/source/extensions/common/tap/tap_config_base.cc @@ -1,8 +1,8 @@ #include "extensions/common/tap/tap_config_base.h" -#include - #include "common/common/assert.h" +#include "common/common/stack_array.h" +#include "common/protobuf/utility.h" #include "extensions/common/tap/tap_matcher.h" @@ -11,14 +11,54 @@ namespace Extensions { namespace Common { namespace Tap { +bool Utility::addBufferToProtoBytes(envoy::data::tap::v2alpha::Body& output_body, + uint32_t max_buffered_bytes, const Buffer::Instance& data, + uint32_t buffer_start_offset, uint32_t buffer_length_to_copy) { + // TODO(mattklein123): Figure out if we can use the buffer API here directly in some way. This is + // is not trivial if we want to avoid extra copies since we end up appending to the existing + // protobuf string. + + // Note that max_buffered_bytes is assumed to include any data already contained in output_bytes. + // This is to account for callers that may be tracking this over multiple body objects. + ASSERT(buffer_start_offset + buffer_length_to_copy <= data.length()); + const uint32_t final_bytes_to_copy = std::min(max_buffered_bytes, buffer_length_to_copy); + + const uint64_t num_slices = data.getRawSlices(nullptr, 0); + STACK_ARRAY(slices, Buffer::RawSlice, num_slices); + data.getRawSlices(slices.begin(), num_slices); + trimSlices(slices, buffer_start_offset, final_bytes_to_copy); + for (const Buffer::RawSlice& slice : slices) { + output_body.mutable_as_bytes()->append(static_cast(slice.mem_), slice.len_); + } + + if (final_bytes_to_copy < buffer_length_to_copy) { + output_body.set_truncated(true); + return true; + } else { + return false; + } +} + TapConfigBaseImpl::TapConfigBaseImpl(envoy::service::tap::v2alpha::TapConfig&& proto_config, - Common::Tap::Sink* admin_streamer) { + Common::Tap::Sink* admin_streamer) + : max_buffered_rx_bytes_(PROTOBUF_GET_WRAPPED_OR_DEFAULT( + proto_config.output_config(), max_buffered_rx_bytes, DefaultMaxBufferedBytes)), + max_buffered_tx_bytes_(PROTOBUF_GET_WRAPPED_OR_DEFAULT( + proto_config.output_config(), max_buffered_tx_bytes, DefaultMaxBufferedBytes)), + streaming_(proto_config.output_config().streaming()) { ASSERT(proto_config.output_config().sinks().size() == 1); + // TODO(mattklein123): Add per-sink checks to make sure format makes sense. I.e., when using + // streaming, we should require the length delimited version of binary proto, etc. + sink_format_ = proto_config.output_config().sinks()[0].format(); switch (proto_config.output_config().sinks()[0].output_sink_type_case()) { case envoy::service::tap::v2alpha::OutputSink::kStreamingAdmin: // TODO(mattklein123): Graceful failure, error message, and test if someone specifies an - // admin stream output without configuring via /tap. + // admin stream output without configuring via /tap or the wrong format. RELEASE_ASSERT(admin_streamer != nullptr, "admin output must be configured via admin"); + RELEASE_ASSERT(sink_format_ == envoy::service::tap::v2alpha::OutputSink::JSON_BODY_AS_BYTES || + sink_format_ == + envoy::service::tap::v2alpha::OutputSink::JSON_BODY_AS_STRING, + "admin output only supports JSON formats"); sink_to_use_ = admin_streamer; break; case envoy::service::tap::v2alpha::OutputSink::kFilePerTap: @@ -33,25 +73,125 @@ TapConfigBaseImpl::TapConfigBaseImpl(envoy::service::tap::v2alpha::TapConfig&& p buildMatcher(proto_config.match_config(), matchers_); } -Matcher& TapConfigBaseImpl::rootMatcher() { +const Matcher& TapConfigBaseImpl::rootMatcher() const { ASSERT(matchers_.size() >= 1); return *matchers_[0]; } -void FilePerTapSink::submitBufferedTrace( - std::shared_ptr trace, uint64_t trace_id) { - // TODO(mattklein123): Add JSON format. - const bool text_format = - config_.format() == envoy::service::tap::v2alpha::FilePerTapSink::PROTO_TEXT; - const std::string path = - fmt::format("{}_{}.{}", config_.path_prefix(), trace_id, text_format ? "pb_text" : "pb"); - ENVOY_LOG_MISC(debug, "Writing tap for [id={}] to {}", trace_id, path); - ENVOY_LOG_MISC(trace, "Tap for [id={}]: {}", trace_id, trace->DebugString()); - std::ofstream proto_stream(path); - if (text_format) { - proto_stream << trace->DebugString(); - } else { - trace->SerializeToOstream(&proto_stream); +namespace { +void swapBytesToString(envoy::data::tap::v2alpha::Body& body) { + body.set_allocated_as_string(body.release_as_bytes()); +} +} // namespace + +void Utility::bodyBytesToString(envoy::data::tap::v2alpha::TraceWrapper& trace, + envoy::service::tap::v2alpha::OutputSink::Format sink_format) { + // Swap the "bytes" string into the "string" string. This is done purely so that JSON + // serialization will serialize as a string vs. doing base64 encoding. + if (sink_format != envoy::service::tap::v2alpha::OutputSink::JSON_BODY_AS_STRING) { + return; + } + + switch (trace.trace_case()) { + case envoy::data::tap::v2alpha::TraceWrapper::kHttpBufferedTrace: { + auto* http_trace = trace.mutable_http_buffered_trace(); + if (http_trace->has_request() && http_trace->request().has_body()) { + swapBytesToString(*http_trace->mutable_request()->mutable_body()); + } + if (http_trace->has_response() && http_trace->response().has_body()) { + swapBytesToString(*http_trace->mutable_response()->mutable_body()); + } + break; + } + case envoy::data::tap::v2alpha::TraceWrapper::kHttpStreamedTraceSegment: { + auto* http_trace = trace.mutable_http_streamed_trace_segment(); + if (http_trace->has_request_body_chunk()) { + swapBytesToString(*http_trace->mutable_request_body_chunk()); + } + if (http_trace->has_response_body_chunk()) { + swapBytesToString(*http_trace->mutable_response_body_chunk()); + } + break; + } + case envoy::data::tap::v2alpha::TraceWrapper::kSocketBufferedTrace: { + auto* socket_trace = trace.mutable_socket_buffered_trace(); + for (auto& event : *socket_trace->mutable_events()) { + if (event.has_read()) { + swapBytesToString(*event.mutable_read()->mutable_data()); + } else { + ASSERT(event.has_write()); + swapBytesToString(*event.mutable_write()->mutable_data()); + } + } + break; + } + case envoy::data::tap::v2alpha::TraceWrapper::kSocketStreamedTraceSegment: { + auto& event = *trace.mutable_socket_streamed_trace_segment()->mutable_event(); + if (event.has_read()) { + swapBytesToString(*event.mutable_read()->mutable_data()); + } else if (event.has_write()) { + swapBytesToString(*event.mutable_write()->mutable_data()); + } + break; + } + case envoy::data::tap::v2alpha::TraceWrapper::TRACE_NOT_SET: + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + +void TapConfigBaseImpl::PerTapSinkHandleManagerImpl::submitTrace(TraceWrapperPtr&& trace) { + Utility::bodyBytesToString(*trace, parent_.sink_format_); + handle_->submitTrace(std::move(trace), parent_.sink_format_); +} + +void FilePerTapSink::FilePerTapSinkHandle::submitTrace( + TraceWrapperPtr&& trace, envoy::service::tap::v2alpha::OutputSink::Format format) { + if (!output_file_.is_open()) { + std::string path = fmt::format("{}_{}", parent_.config_.path_prefix(), trace_id_); + switch (format) { + case envoy::service::tap::v2alpha::OutputSink::PROTO_BINARY: + path += MessageUtil::FileExtensions::get().ProtoBinary; + break; + case envoy::service::tap::v2alpha::OutputSink::PROTO_BINARY_LENGTH_DELIMITED: + path += MessageUtil::FileExtensions::get().ProtoBinaryLengthDelimited; + break; + case envoy::service::tap::v2alpha::OutputSink::PROTO_TEXT: + path += MessageUtil::FileExtensions::get().ProtoText; + break; + case envoy::service::tap::v2alpha::OutputSink::JSON_BODY_AS_BYTES: + case envoy::service::tap::v2alpha::OutputSink::JSON_BODY_AS_STRING: + path += MessageUtil::FileExtensions::get().Json; + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + + ENVOY_LOG_MISC(debug, "Opening tap file for [id={}] to {}", trace_id_, path); + output_file_.open(path); + } + + ENVOY_LOG_MISC(trace, "Tap for [id={}]: {}", trace_id_, trace->DebugString()); + + switch (format) { + case envoy::service::tap::v2alpha::OutputSink::PROTO_BINARY: + trace->SerializeToOstream(&output_file_); + break; + case envoy::service::tap::v2alpha::OutputSink::PROTO_BINARY_LENGTH_DELIMITED: { + Protobuf::io::OstreamOutputStream stream(&output_file_); + Protobuf::io::CodedOutputStream coded_stream(&stream); + coded_stream.WriteVarint32(trace->ByteSize()); + trace->SerializeWithCachedSizes(&coded_stream); + break; + } + case envoy::service::tap::v2alpha::OutputSink::PROTO_TEXT: + output_file_ << trace->DebugString(); + break; + case envoy::service::tap::v2alpha::OutputSink::JSON_BODY_AS_BYTES: + case envoy::service::tap::v2alpha::OutputSink::JSON_BODY_AS_STRING: + output_file_ << MessageUtil::getJsonStringFromMessage(*trace, true, true); + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; } } diff --git a/source/extensions/common/tap/tap_config_base.h b/source/extensions/common/tap/tap_config_base.h index 8333d4115cf82..36b6f8fca5567 100644 --- a/source/extensions/common/tap/tap_config_base.h +++ b/source/extensions/common/tap/tap_config_base.h @@ -1,5 +1,8 @@ #pragma once +#include + +#include "envoy/buffer/buffer.h" #include "envoy/service/tap/v2alpha/common.pb.h" #include "extensions/common/tap/tap.h" @@ -10,23 +13,101 @@ namespace Extensions { namespace Common { namespace Tap { +/** + * Common utilities for tapping. + */ +class Utility { +public: + /** + * Add body data to a tapped body message, taking into account the maximum bytes to buffer. + * @param output_body supplies the body message to buffer to. + * @param max_buffered_bytes supplies the maximum bytes to store, if truncation occurs the + * truncation flag will be set. + * @param data supplies the data to buffer. + * @param buffer_start_offset supplies the offset within data to start buffering. + * @param buffer_length_to_copy supplies the length of the data to buffer. + * @return whether the buffered data was truncated or not. + */ + static bool addBufferToProtoBytes(envoy::data::tap::v2alpha::Body& output_body, + uint32_t max_buffered_bytes, const Buffer::Instance& data, + uint32_t buffer_start_offset, uint32_t buffer_length_to_copy); + + /** + * Swap body as bytes to body as string if necessary in a trace wrapper. + */ + static void bodyBytesToString(envoy::data::tap::v2alpha::TraceWrapper& trace, + envoy::service::tap::v2alpha::OutputSink::Format sink_format); + + /** + * Trim a container that contains buffer raw slices so that the slices start at an offset and + * only contain a specific length. No slices are removed from the container, but their length + * may be reduced to 0. + * TODO(mattklein123): This is split out to ease testing and also because we should ultimately + * move this directly into the buffer API. I would rather wait until the new buffer code merges + * before we do that. + */ + template static void trimSlices(T& slices, uint32_t start_offset, uint32_t length) { + for (auto& slice : slices) { + const uint32_t start_offset_trim = std::min(start_offset, slice.len_); + slice.len_ -= start_offset_trim; + start_offset -= start_offset_trim; + slice.mem_ = static_cast(slice.mem_) + start_offset_trim; + + const uint32_t final_length = std::min(length, slice.len_); + slice.len_ = final_length; + length -= final_length; + } + } +}; + /** * Base class for all tap configurations. * TODO(mattklein123): This class will handle common functionality such as rate limiting, etc. */ -class TapConfigBaseImpl { +class TapConfigBaseImpl : public virtual TapConfig { public: - size_t numMatchers() { return matchers_.size(); } - Matcher& rootMatcher(); - Extensions::Common::Tap::Sink& sink() { return *sink_to_use_; } + // A wrapper for a per tap sink handle and trace submission. If in the future we support + // multiple sinks we can easily do it here. + class PerTapSinkHandleManagerImpl : public PerTapSinkHandleManager { + public: + PerTapSinkHandleManagerImpl(TapConfigBaseImpl& parent, uint64_t trace_id) + : parent_(parent), handle_(parent.sink_to_use_->createPerTapSinkHandle(trace_id)) {} + + // PerTapSinkHandleManager + void submitTrace(TraceWrapperPtr&& trace) override; + + private: + TapConfigBaseImpl& parent_; + PerTapSinkHandlePtr handle_; + }; + + // TapConfig + PerTapSinkHandleManagerPtr createPerTapSinkHandleManager(uint64_t trace_id) override { + return std::make_unique(*this, trace_id); + } + uint32_t maxBufferedRxBytes() const override { return max_buffered_rx_bytes_; } + uint32_t maxBufferedTxBytes() const override { return max_buffered_tx_bytes_; } + Matcher::MatchStatusVector createMatchStatusVector() const override { + return Matcher::MatchStatusVector(matchers_.size()); + } + const Matcher& rootMatcher() const override; + bool streaming() const override { return streaming_; } protected: TapConfigBaseImpl(envoy::service::tap::v2alpha::TapConfig&& proto_config, Common::Tap::Sink* admin_streamer); private: + // This is the default setting for both RX/TX max buffered bytes. (This means that per tap, the + // maximum amount that can be buffered is 2x this value). + static constexpr uint32_t DefaultMaxBufferedBytes = 1024; + + const uint32_t max_buffered_rx_bytes_; + const uint32_t max_buffered_tx_bytes_; + const bool streaming_; Sink* sink_to_use_; SinkPtr sink_; + envoy::service::tap::v2alpha::OutputSink::Format sink_format_; std::vector matchers_; }; @@ -38,10 +119,24 @@ class FilePerTapSink : public Sink { FilePerTapSink(const envoy::service::tap::v2alpha::FilePerTapSink& config) : config_(config) {} // Sink - void submitBufferedTrace(std::shared_ptr trace, - uint64_t trace_id) override; + PerTapSinkHandlePtr createPerTapSinkHandle(uint64_t trace_id) override { + return std::make_unique(*this, trace_id); + } private: + struct FilePerTapSinkHandle : public PerTapSinkHandle { + FilePerTapSinkHandle(FilePerTapSink& parent, uint64_t trace_id) + : parent_(parent), trace_id_(trace_id) {} + + // PerTapSinkHandle + void submitTrace(TraceWrapperPtr&& trace, + envoy::service::tap::v2alpha::OutputSink::Format format) override; + + FilePerTapSink& parent_; + const uint64_t trace_id_; + std::ofstream output_file_; + }; + const envoy::service::tap::v2alpha::FilePerTapSink config_; }; diff --git a/source/extensions/common/tap/tap_matcher.cc b/source/extensions/common/tap/tap_matcher.cc index 39114a1229943..1b0d53bfa6143 100644 --- a/source/extensions/common/tap/tap_matcher.cc +++ b/source/extensions/common/tap/tap_matcher.cc @@ -66,21 +66,29 @@ SetLogicMatcher::SetLogicMatcher( } } -bool SetLogicMatcher::updateLocalStatus(std::vector& statuses, +void SetLogicMatcher::updateLocalStatus(MatchStatusVector& statuses, const UpdateFunctor& functor) const { + if (!statuses[my_index_].might_change_status_) { + return; + } + for (size_t index : indexes_) { - statuses[index] = functor(*matchers_[index], statuses); + functor(*matchers_[index], statuses); } - auto predicate = [&statuses](size_t index) { return statuses[index]; }; + auto predicate = [&statuses](size_t index) { return statuses[index].matches_; }; if (type_ == Type::And) { - statuses[my_index_] = std::all_of(indexes_.begin(), indexes_.end(), predicate); + statuses[my_index_].matches_ = std::all_of(indexes_.begin(), indexes_.end(), predicate); } else { ASSERT(type_ == Type::Or); - statuses[my_index_] = std::any_of(indexes_.begin(), indexes_.end(), predicate); + statuses[my_index_].matches_ = std::any_of(indexes_.begin(), indexes_.end(), predicate); } - return statuses[my_index_]; + // TODO(mattklein123): We can potentially short circuit this even further if we git a single false + // in an AND set or a single true in an OR set. + statuses[my_index_].might_change_status_ = + std::any_of(indexes_.begin(), indexes_.end(), + [&statuses](size_t index) { return statuses[index].might_change_status_; }); } NotMatcher::NotMatcher(const envoy::service::tap::v2alpha::MatchPredicate& config, @@ -89,10 +97,15 @@ NotMatcher::NotMatcher(const envoy::service::tap::v2alpha::MatchPredicate& confi buildMatcher(config, matchers); } -bool NotMatcher::updateLocalStatus(std::vector& statuses, +void NotMatcher::updateLocalStatus(MatchStatusVector& statuses, const UpdateFunctor& functor) const { - statuses[my_index_] = !functor(*matchers_[not_index_], statuses); - return statuses[my_index_]; + if (!statuses[my_index_].might_change_status_) { + return; + } + + functor(*matchers_[not_index_], statuses); + statuses[my_index_].matches_ = !statuses[not_index_].matches_; + statuses[my_index_].might_change_status_ = statuses[not_index_].might_change_status_; } HttpHeaderMatcherBase::HttpHeaderMatcherBase( @@ -104,10 +117,11 @@ HttpHeaderMatcherBase::HttpHeaderMatcherBase( } } -bool HttpHeaderMatcherBase::matchHeaders(const Http::HeaderMap& headers, - std::vector& statuses) const { - statuses[my_index_] = Http::HeaderUtility::matchHeaders(headers, headers_to_match_); - return statuses[my_index_]; +void HttpHeaderMatcherBase::matchHeaders(const Http::HeaderMap& headers, + MatchStatusVector& statuses) const { + ASSERT(statuses[my_index_].might_change_status_); + statuses[my_index_].matches_ = Http::HeaderUtility::matchHeaders(headers, headers_to_match_); + statuses[my_index_].might_change_status_ = false; } } // namespace Tap diff --git a/source/extensions/common/tap/tap_matcher.h b/source/extensions/common/tap/tap_matcher.h index 622933a96d658..b80ff24256dba 100644 --- a/source/extensions/common/tap/tap_matcher.h +++ b/source/extensions/common/tap/tap_matcher.h @@ -24,20 +24,26 @@ using MatcherPtr = std::unique_ptr; * - In order to make this computationally efficient, the matching tree is kept in a vector, with * all references to other matchers implemented using an index into the vector. The vector is * effectively a preorder traversal flattened N-ary tree. - * - The previous point allows the creation of a per-stream/request vector of booleans of the same - * size as the matcher vector. Then, when match status is updated given new information, the - * vector of booleans can be easily updated using the same indexes as in the constant match - * configuration. + * - The previous point allows the creation of a per-stream/request vector of match statuses of + * the same size as the matcher vector. Then, when match status is updated given new + * information, the vector of match statuses can be easily updated using the same indexes as in + * the constant match configuration. * - Finally, a matches() function can be trivially implemented by looking in the status vector at * the index position that the current matcher is located in. - * - * TODO(mattklein123): Currently, any match updates perform a recursive call on any child match - * nodes. It's possible that we can short circuit this in certain cases but this needs more - * thinking (e.g., if an OR matcher already has one match and it's not possible for a matcher to - * flip from true to false). */ class Matcher { public: + struct MatchStatus { + bool operator==(const MatchStatus& rhs) const { + return matches_ == rhs.matches_ && might_change_status_ == rhs.might_change_status_; + } + + bool matches_{false}; // Does the matcher currently match? + bool might_change_status_{true}; // Is it possible for matches_ to change in subsequent updates? + }; + + using MatchStatusVector = std::vector; + /** * Base class constructor for a matcher. * @param matchers supplies the match tree vector being built. @@ -59,7 +65,7 @@ class Matcher { * Update match status when a stream is created. This might be an HTTP stream, a TCP connection, * etc. This allows any matchers to flip to an initial state of true if applicable. */ - virtual bool onNewStream(std::vector& statuses) const PURE; + virtual void onNewStream(MatchStatusVector& statuses) const PURE; /** * Update match status given HTTP request headers. @@ -67,8 +73,8 @@ class Matcher { * @param statuses supplies the per-stream-request match status vector which must be the same * size as the match tree vector (see above). */ - virtual bool onHttpRequestHeaders(const Http::HeaderMap& request_headers, - std::vector& statuses) const PURE; + virtual void onHttpRequestHeaders(const Http::HeaderMap& request_headers, + MatchStatusVector& statuses) const PURE; /** * Update match status given HTTP request trailers. @@ -76,8 +82,8 @@ class Matcher { * @param statuses supplies the per-stream-request match status vector which must be the same * size as the match tree vector (see above). */ - virtual bool onHttpRequestTrailers(const Http::HeaderMap& request_trailers, - std::vector& statuses) const PURE; + virtual void onHttpRequestTrailers(const Http::HeaderMap& request_trailers, + MatchStatusVector& statuses) const PURE; /** * Update match status given HTTP response headers. @@ -85,8 +91,8 @@ class Matcher { * @param statuses supplies the per-stream-request match status vector which must be the same * size as the match tree vector (see above). */ - virtual bool onHttpResponseHeaders(const Http::HeaderMap& response_headers, - std::vector& statuses) const PURE; + virtual void onHttpResponseHeaders(const Http::HeaderMap& response_headers, + MatchStatusVector& statuses) const PURE; /** * Update match status given HTTP response trailers. @@ -94,15 +100,15 @@ class Matcher { * @param statuses supplies the per-stream-request match status vector which must be the same * size as the match tree vector (see above). */ - virtual bool onHttpResponseTrailers(const Http::HeaderMap& response_trailers, - std::vector& statuses) const PURE; + virtual void onHttpResponseTrailers(const Http::HeaderMap& response_trailers, + MatchStatusVector& statuses) const PURE; /** * @return whether given currently available information, the matcher matches. * @param statuses supplies the per-stream-request match status vector which must be the same * size as the match tree vector (see above). */ - bool matches(const std::vector& statuses) const { return statuses[my_index_]; } + MatchStatus matchStatus(const MatchStatusVector& statuses) const { return statuses[my_index_]; } protected: const size_t my_index_; @@ -124,41 +130,38 @@ class LogicMatcherBase : public Matcher { using Matcher::Matcher; // Extensions::Common::Tap::Matcher - bool onNewStream(std::vector& statuses) const override { - return updateLocalStatus( - statuses, [](Matcher& m, std::vector& statuses) { return m.onNewStream(statuses); }); + void onNewStream(MatchStatusVector& statuses) const override { + updateLocalStatus(statuses, + [](Matcher& m, MatchStatusVector& statuses) { m.onNewStream(statuses); }); } - bool onHttpRequestHeaders(const Http::HeaderMap& request_headers, - std::vector& statuses) const override { - return updateLocalStatus(statuses, [&request_headers](Matcher& m, std::vector& statuses) { - return m.onHttpRequestHeaders(request_headers, statuses); + void onHttpRequestHeaders(const Http::HeaderMap& request_headers, + MatchStatusVector& statuses) const override { + updateLocalStatus(statuses, [&request_headers](Matcher& m, MatchStatusVector& statuses) { + m.onHttpRequestHeaders(request_headers, statuses); }); } - bool onHttpRequestTrailers(const Http::HeaderMap& request_trailers, - std::vector& statuses) const override { - return updateLocalStatus(statuses, - [&request_trailers](Matcher& m, std::vector& statuses) { - return m.onHttpRequestTrailers(request_trailers, statuses); - }); + void onHttpRequestTrailers(const Http::HeaderMap& request_trailers, + MatchStatusVector& statuses) const override { + updateLocalStatus(statuses, [&request_trailers](Matcher& m, MatchStatusVector& statuses) { + m.onHttpRequestTrailers(request_trailers, statuses); + }); } - bool onHttpResponseHeaders(const Http::HeaderMap& response_headers, - std::vector& statuses) const override { - return updateLocalStatus(statuses, - [&response_headers](Matcher& m, std::vector& statuses) { - return m.onHttpResponseHeaders(response_headers, statuses); - }); + void onHttpResponseHeaders(const Http::HeaderMap& response_headers, + MatchStatusVector& statuses) const override { + updateLocalStatus(statuses, [&response_headers](Matcher& m, MatchStatusVector& statuses) { + m.onHttpResponseHeaders(response_headers, statuses); + }); } - bool onHttpResponseTrailers(const Http::HeaderMap& response_trailers, - std::vector& statuses) const override { - return updateLocalStatus(statuses, - [&response_trailers](Matcher& m, std::vector& statuses) { - return m.onHttpResponseTrailers(response_trailers, statuses); - }); + void onHttpResponseTrailers(const Http::HeaderMap& response_trailers, + MatchStatusVector& statuses) const override { + updateLocalStatus(statuses, [&response_trailers](Matcher& m, MatchStatusVector& statuses) { + m.onHttpResponseTrailers(response_trailers, statuses); + }); } protected: - using UpdateFunctor = std::function&)>; - virtual bool updateLocalStatus(std::vector& statuses, + using UpdateFunctor = std::function; + virtual void updateLocalStatus(MatchStatusVector& statuses, const UpdateFunctor& functor) const PURE; }; @@ -173,7 +176,7 @@ class SetLogicMatcher : public LogicMatcherBase { std::vector& matchers, Type type); private: - bool updateLocalStatus(std::vector& statuses, const UpdateFunctor& functor) const override; + void updateLocalStatus(MatchStatusVector& statuses, const UpdateFunctor& functor) const override; std::vector& matchers_; std::vector indexes_; @@ -189,7 +192,7 @@ class NotMatcher : public LogicMatcherBase { std::vector& matchers); private: - bool updateLocalStatus(std::vector& statuses, const UpdateFunctor& functor) const override; + void updateLocalStatus(MatchStatusVector& statuses, const UpdateFunctor& functor) const override; std::vector& matchers_; const size_t not_index_; @@ -204,19 +207,11 @@ class SimpleMatcher : public Matcher { using Matcher::Matcher; // Extensions::Common::Tap::Matcher - bool onNewStream(std::vector& statuses) const { return statuses[my_index_]; } - bool onHttpRequestHeaders(const Http::HeaderMap&, std::vector& statuses) const { - return statuses[my_index_]; - } - bool onHttpRequestTrailers(const Http::HeaderMap&, std::vector& statuses) const { - return statuses[my_index_]; - } - bool onHttpResponseHeaders(const Http::HeaderMap&, std::vector& statuses) const { - return statuses[my_index_]; - } - bool onHttpResponseTrailers(const Http::HeaderMap&, std::vector& statuses) const { - return statuses[my_index_]; - } + void onNewStream(MatchStatusVector&) const override {} + void onHttpRequestHeaders(const Http::HeaderMap&, MatchStatusVector&) const override {} + void onHttpRequestTrailers(const Http::HeaderMap&, MatchStatusVector&) const override {} + void onHttpResponseHeaders(const Http::HeaderMap&, MatchStatusVector&) const override {} + void onHttpResponseTrailers(const Http::HeaderMap&, MatchStatusVector&) const override {} }; /** @@ -227,9 +222,9 @@ class AnyMatcher : public SimpleMatcher { using SimpleMatcher::SimpleMatcher; // Extensions::Common::Tap::Matcher - bool onNewStream(std::vector& statuses) const override { - statuses[my_index_] = true; - return true; + void onNewStream(MatchStatusVector& statuses) const override { + statuses[my_index_].matches_ = true; + statuses[my_index_].might_change_status_ = false; } }; @@ -242,7 +237,7 @@ class HttpHeaderMatcherBase : public SimpleMatcher { const std::vector& matchers); protected: - bool matchHeaders(const Http::HeaderMap& headers, std::vector& statuses) const; + void matchHeaders(const Http::HeaderMap& headers, MatchStatusVector& statuses) const; std::vector headers_to_match_; }; @@ -255,9 +250,9 @@ class HttpRequestHeadersMatcher : public HttpHeaderMatcherBase { using HttpHeaderMatcherBase::HttpHeaderMatcherBase; // Extensions::Common::Tap::Matcher - bool onHttpRequestHeaders(const Http::HeaderMap& request_headers, - std::vector& statuses) const override { - return matchHeaders(request_headers, statuses); + void onHttpRequestHeaders(const Http::HeaderMap& request_headers, + MatchStatusVector& statuses) const override { + matchHeaders(request_headers, statuses); } }; @@ -269,9 +264,9 @@ class HttpRequestTrailersMatcher : public HttpHeaderMatcherBase { using HttpHeaderMatcherBase::HttpHeaderMatcherBase; // Extensions::Common::Tap::Matcher - bool onHttpRequestTrailers(const Http::HeaderMap& request_trailers, - std::vector& statuses) const override { - return matchHeaders(request_trailers, statuses); + void onHttpRequestTrailers(const Http::HeaderMap& request_trailers, + MatchStatusVector& statuses) const override { + matchHeaders(request_trailers, statuses); } }; @@ -283,9 +278,9 @@ class HttpResponseHeadersMatcher : public HttpHeaderMatcherBase { using HttpHeaderMatcherBase::HttpHeaderMatcherBase; // Extensions::Common::Tap::Matcher - bool onHttpResponseHeaders(const Http::HeaderMap& response_headers, - std::vector& statuses) const override { - return matchHeaders(response_headers, statuses); + void onHttpResponseHeaders(const Http::HeaderMap& response_headers, + MatchStatusVector& statuses) const override { + matchHeaders(response_headers, statuses); } }; @@ -297,9 +292,9 @@ class HttpResponseTrailersMatcher : public HttpHeaderMatcherBase { using HttpHeaderMatcherBase::HttpHeaderMatcherBase; // Extensions::Common::Tap::Matcher - bool onHttpResponseTrailers(const Http::HeaderMap& response_trailers, - std::vector& statuses) const override { - return matchHeaders(response_trailers, statuses); + void onHttpResponseTrailers(const Http::HeaderMap& response_trailers, + MatchStatusVector& statuses) const override { + matchHeaders(response_trailers, statuses); } }; diff --git a/source/extensions/common/wasm/BUILD b/source/extensions/common/wasm/BUILD index 6c5af58703a58..5af6e13ef659c 100644 --- a/source/extensions/common/wasm/BUILD +++ b/source/extensions/common/wasm/BUILD @@ -23,6 +23,7 @@ envoy_cc_library( ":well_known_names", "//include/envoy/http:codes_interface", "//include/envoy/http:filter_interface", + "//include/envoy/server:wasm_interface", "//include/envoy/upstream:cluster_manager_interface", "//source/common/common:stack_array", "//source/extensions/filters/http:well_known_names", @@ -42,6 +43,8 @@ envoy_cc_library( "//source/common/http:message_lib", "//source/common/http:utility_lib", "//source/common/tracing:http_tracer_lib", + "//source/extensions/common/wasm/null:null_lib", + "//source/extensions/common/wasm/v8:v8_lib", "//source/extensions/common/wasm/wavm:wavm_lib", ], ) diff --git a/source/extensions/common/wasm/null/BUILD b/source/extensions/common/wasm/null/BUILD new file mode 100644 index 0000000000000..e8cdb44b4974b --- /dev/null +++ b/source/extensions/common/wasm/null/BUILD @@ -0,0 +1,55 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "null_plugin_hdr", + hdrs = [ + "wasm_api_impl.h", + ], + deps = [ + "//api/wasm/cpp:intrinsics_hdr", + ], +) + +envoy_cc_library( + name = "null_lib", + srcs = ["null.cc"], + hdrs = ["null.h"], + deps = [ + ":null_plugin_hdr", + "//external:abseil_node_hash_map", + "//source/common/common:assert_lib", + "//source/common/common:c_smart_ptr_lib", + "//source/common/protobuf", + "//source/extensions/common/wasm:wasm_hdr", + "//source/extensions/common/wasm:well_known_names", + "@envoy_api//envoy/config/wasm/v2:wasm_cc", + ], +) + +# Sample builtin Null VM/Sandbox plugin, also used in tests. +envoy_cc_library( + name = "plugin", + srcs = [ + "null_plugin_wrapper.cc", # The wrapper, copy and update the name. + "plugin.cc", # The plugin code. + ], + copts = ["-DNULL_PLUGIN=1"], + deps = [ + ":null_lib", + "//external:abseil_node_hash_map", + "//source/common/common:assert_lib", + "//source/common/common:c_smart_ptr_lib", + "//source/common/protobuf", + "//source/extensions/common/wasm:wasm_hdr", + "//source/extensions/common/wasm:well_known_names", + "@envoy_api//envoy/config/wasm/v2:wasm_cc", + ], +) diff --git a/source/extensions/common/wasm/null/null.cc b/source/extensions/common/wasm/null/null.cc new file mode 100644 index 0000000000000..38dd9352020a4 --- /dev/null +++ b/source/extensions/common/wasm/null/null.cc @@ -0,0 +1,546 @@ +#include "extensions/common/wasm/null/null.h" + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "envoy/common/exception.h" +#include "envoy/server/wasm.h" + +#include "common/common/assert.h" +#include "common/common/logger.h" + +#include "extensions/common/wasm/wasm.h" +#include "extensions/common/wasm/well_known_names.h" + +#include "absl/container/node_hash_map.h" +#include "absl/strings/match.h" + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace Wasm { + +extern thread_local Envoy::Extensions::Common::Wasm::Context* current_context_; + +namespace Null { + +namespace { + +struct SaveRestoreContext { + explicit SaveRestoreContext(Context* context) { + saved_context = current_context_; + current_context_ = context; + } + ~SaveRestoreContext() { current_context_ = saved_context; } + Context* saved_context; +}; + +} // namespace + +using Plugin::Context; +using Plugin::WasmData; + +struct NullVm : public WasmVm { + NullVm() = default; + NullVm(const NullVm& other) : plugin_(std::make_unique(*other.plugin_)) {} + ~NullVm() override; + + // WasmVm + absl::string_view vm() override { return WasmVmNames::get().Null; } + bool clonable() override { return true; }; + std::unique_ptr clone() override; + bool load(const std::string& code, bool allow_precompiled) override; + void link(absl::string_view debug_name, bool needs_emscripten) override; + void start(Common::Wasm::Context* context) override; + absl::string_view getMemory(uint64_t pointer, uint64_t size) override; + bool getMemoryOffset(void* host_pointer, uint64_t* vm_pointer) override; + bool setMemory(uint64_t pointer, uint64_t size, void* data) override; + bool setWord(uint64_t pointer, uint64_t data) override; + void makeModule(absl::string_view name) override; + absl::string_view getUserSection(absl::string_view name) override; + + void getFunction(absl::string_view functionName, WasmCall0Void* f) override; + void getFunction(absl::string_view functionName, WasmCall1Void* f) override; + void getFunction(absl::string_view functionName, WasmCall2Void* f) override; + void getFunction(absl::string_view functionName, WasmCall3Void* f) override; + void getFunction(absl::string_view functionName, WasmCall4Void* f) override; + void getFunction(absl::string_view functionName, WasmCall5Void* f) override; + void getFunction(absl::string_view functionName, WasmCall8Void* f) override; + void getFunction(absl::string_view functionName, WasmCall1Int* f) override; + void getFunction(absl::string_view functionName, WasmCall3Int* f) override; + + // These are noops for NullVm. +#define _REGISTER_CALLBACK(_type) \ + void registerCallback(absl::string_view, absl::string_view, _type, \ + typename ConvertFunctionTypeWordToUint32<_type>::type) override{}; + _REGISTER_CALLBACK(WasmCallback0Void); + _REGISTER_CALLBACK(WasmCallback1Void); + _REGISTER_CALLBACK(WasmCallback2Void); + _REGISTER_CALLBACK(WasmCallback3Void); + _REGISTER_CALLBACK(WasmCallback4Void); + _REGISTER_CALLBACK(WasmCallback5Void); + _REGISTER_CALLBACK(WasmCallback0Int); + _REGISTER_CALLBACK(WasmCallback1Int); + _REGISTER_CALLBACK(WasmCallback2Int); + _REGISTER_CALLBACK(WasmCallback3Int); + _REGISTER_CALLBACK(WasmCallback4Int); + _REGISTER_CALLBACK(WasmCallback5Int); + _REGISTER_CALLBACK(WasmCallback6Int); + _REGISTER_CALLBACK(WasmCallback7Int); + _REGISTER_CALLBACK(WasmCallback8Int); + _REGISTER_CALLBACK(WasmCallback9Int); + _REGISTER_CALLBACK(WasmCallback_ZWl); + _REGISTER_CALLBACK(WasmCallback_ZWm); + _REGISTER_CALLBACK(WasmCallback_m); + _REGISTER_CALLBACK(WasmCallback_mW); +#undef _REGISTER_CALLBACK + + // NullVm does not advertize code as emscripten so this will not get called. + std::unique_ptr> makeGlobal(absl::string_view, absl::string_view, + double) override { + NOT_REACHED_GCOVR_EXCL_LINE; + }; + std::unique_ptr> makeGlobal(absl::string_view, absl::string_view, Word) override { + NOT_REACHED_GCOVR_EXCL_LINE; + }; + + std::unique_ptr plugin_; +}; + +NullVm::~NullVm() {} + +std::unique_ptr NullVm::clone() { return std::make_unique(*this); } + +bool NullVm::load(const std::string& name, bool /* allow_precompiled */) { + auto factory = Registry::FactoryRegistry::getFactory(name); + if (!factory) { + return false; + } + plugin_ = factory->create(); + return true; +} + +void NullVm::link(absl::string_view /* name */, bool /* needs_emscripten */) {} + +void NullVm::makeModule(absl::string_view /* name */) { + // NullVm does not advertize code as emscripten so this will not get called. + NOT_REACHED_GCOVR_EXCL_LINE; +} + +void NullVm::start(Common::Wasm::Context* context) { + SaveRestoreContext saved_context(context); + plugin_->start(); +} + +absl::string_view NullVm::getMemory(uint64_t pointer, uint64_t size) { + return {reinterpret_cast(pointer), static_cast(size)}; +} + +bool NullVm::getMemoryOffset(void* host_pointer, uint64_t* vm_pointer) { + *vm_pointer = reinterpret_cast(host_pointer); + return true; +} + +bool NullVm::setMemory(uint64_t pointer, uint64_t size, void* data) { + auto p = reinterpret_cast(pointer); + memcpy(p, data, size); + return true; +} + +bool NullVm::setWord(uint64_t pointer, uint64_t data) { + auto p = reinterpret_cast(pointer); + memcpy(p, &data, sizeof(data)); + return true; +} + +absl::string_view NullVm::getUserSection(absl::string_view /* name */) { + // Return nothing: there is no WASM file. + return {}; +} + +std::unique_ptr createVm() { return std::make_unique(); } + +void NullVm::getFunction(absl::string_view functionName, WasmCall0Void* f) { + if (functionName == "_proxy_onStart") { + auto plugin = plugin_.get(); + *f = [plugin](Common::Wasm::Context* context) { + SaveRestoreContext saved_context(context); + plugin->onStart(); + }; + } else if (functionName == "_proxy_onTick") { + auto plugin = plugin_.get(); + *f = [plugin](Common::Wasm::Context* context) { + SaveRestoreContext saved_context(context); + plugin->onTick(); + }; + } else { + throw WasmVmException(fmt::format("Missing getFunction for: {}", functionName)); + } +} + +void NullVm::getFunction(absl::string_view functionName, WasmCall1Void* f) { + if (functionName == "_free") { + *f = [](Common::Wasm::Context*, Word ptr) { return ::free(reinterpret_cast(ptr.u64)); }; + } else if (functionName == "_proxy_onCreate") { + auto plugin = plugin_.get(); + *f = [plugin](Common::Wasm::Context* context, Word context_id) { + SaveRestoreContext saved_context(context); + plugin->onCreate(context_id.u64); + }; + } else if (functionName == "_proxy_onDone") { + auto plugin = plugin_.get(); + *f = [plugin](Common::Wasm::Context* context, Word context_id) { + SaveRestoreContext saved_context(context); + plugin->onDone(context_id.u64); + }; + } else if (functionName == "_proxy_onLog") { + auto plugin = plugin_.get(); + *f = [plugin](Common::Wasm::Context* context, Word context_id) { + SaveRestoreContext saved_context(context); + plugin->onLog(context_id.u64); + }; + } else if (functionName == "_proxy_onDelete") { + auto plugin = plugin_.get(); + *f = [plugin](Common::Wasm::Context* context, Word context_id) { + SaveRestoreContext saved_context(context); + plugin->onDelete(context_id.u64); + }; + } else { + throw WasmVmException(fmt::format("Missing getFunction for: {}", functionName)); + } +} + +void NullVm::getFunction(absl::string_view functionName, WasmCall2Void* f) { + if (functionName == "_proxy_onConfigure") { + auto plugin = plugin_.get(); + *f = [plugin](Common::Wasm::Context* context, Word ptr, Word size) { + SaveRestoreContext saved_context(context); + plugin->onConfigure(ptr.u64, size.u64); + }; + } else if (functionName == "_proxy_onGrpcCreateInitialMetadata") { + auto plugin = plugin_.get(); + *f = [plugin](Common::Wasm::Context* context, Word context_id, Word token) { + SaveRestoreContext saved_context(context); + plugin->onGrpcCreateInitialMetadata(context_id.u64, token.u64); + }; + } else if (functionName == "_proxy_onGrpcReceiveInitialMetadata") { + auto plugin = plugin_.get(); + *f = [plugin](Common::Wasm::Context* context, Word context_id, Word token) { + SaveRestoreContext saved_context(context); + plugin->onGrpcReceiveInitialMetadata(context_id.u64, token.u64); + }; + } else if (functionName == "_proxy_onGrpcReceiveTrailingMetadata") { + auto plugin = plugin_.get(); + *f = [plugin](Common::Wasm::Context* context, Word context_id, Word token) { + SaveRestoreContext saved_context(context); + plugin->onGrpcReceiveTrailingMetadata(context_id.u64, token.u64); + }; + } else { + throw WasmVmException(fmt::format("Missing getFunction for: {}", functionName)); + } +} + +void NullVm::getFunction(absl::string_view functionName, WasmCall3Void* /* f */) { + throw WasmVmException(fmt::format("Missing getFunction for: {}", functionName)); +} + +void NullVm::getFunction(absl::string_view functionName, WasmCall4Void* f) { + if (functionName == "_proxy_onGrpcReceive") { + auto plugin = plugin_.get(); + *f = [plugin](Common::Wasm::Context* context, Word context_id, Word token, Word response_ptr, + Word response_size) { + SaveRestoreContext saved_context(context); + plugin->onGrpcReceive(context_id.u64, token.u64, response_ptr.u64, response_size.u64); + }; + } else { + throw WasmVmException(fmt::format("Missing getFunction for: {}", functionName)); + } +} + +void NullVm::getFunction(absl::string_view functionName, WasmCall5Void* f) { + if (functionName == "_proxy_onGrpcClose") { + auto plugin = plugin_.get(); + *f = [plugin](Common::Wasm::Context* context, Word context_id, Word token, Word status_code, + Word status_message_ptr, Word status_message_size) { + SaveRestoreContext saved_context(context); + plugin->onGrpcClose(context_id.u64, token.u64, status_code.u64, status_message_ptr.u64, + status_message_size.u64); + }; + } else { + throw WasmVmException(fmt::format("Missing getFunction for: {}", functionName)); + } +} + +void NullVm::getFunction(absl::string_view functionName, WasmCall8Void* f) { + if (functionName == "_proxy_onHttpCallResponse") { + auto plugin = plugin_.get(); + *f = [plugin](Common::Wasm::Context* context, Word context_id, Word token, + Word header_pairs_ptr, Word header_pairs_size, Word body_ptr, Word body_size, + Word trailer_pairs_ptr, Word trailer_pairs_size) { + SaveRestoreContext saved_context(context); + plugin->onHttpCallResponse(context_id.u64, token.u64, header_pairs_ptr.u64, + header_pairs_size.u64, body_ptr.u64, body_size.u64, + trailer_pairs_ptr.u64, trailer_pairs_size.u64); + }; + } else { + throw WasmVmException(fmt::format("Missing getFunction for: {}", functionName)); + } +} + +void NullVm::getFunction(absl::string_view functionName, WasmCall1Int* f) { + if (functionName == "_malloc") { + *f = [](Common::Wasm::Context*, Word size) -> Word { + return Word(reinterpret_cast(::malloc(size.u64))); + }; + } else if (functionName == "_proxy_onRequestHeaders") { + auto plugin = plugin_.get(); + *f = [plugin](Common::Wasm::Context* context, Word context_id) -> Word { + SaveRestoreContext saved_context(context); + return Word(plugin->onRequestHeaders(context_id.u64)); + }; + } else if (functionName == "_proxy_onRequestTrailers") { + auto plugin = plugin_.get(); + *f = [plugin](Common::Wasm::Context* context, Word context_id) -> Word { + SaveRestoreContext saved_context(context); + return Word(plugin->onRequestTrailers(context_id.u64)); + }; + } else if (functionName == "_proxy_onRequestMetadata") { + auto plugin = plugin_.get(); + *f = [plugin](Common::Wasm::Context* context, Word context_id) -> Word { + SaveRestoreContext saved_context(context); + return Word(plugin->onRequestMetadata(context_id.u64)); + }; + } else if (functionName == "_proxy_onResponseHeaders") { + auto plugin = plugin_.get(); + *f = [plugin](Common::Wasm::Context* context, Word context_id) -> Word { + SaveRestoreContext saved_context(context); + return Word(plugin->onResponseHeaders(context_id.u64)); + }; + } else if (functionName == "_proxy_onResponseTrailers") { + auto plugin = plugin_.get(); + *f = [plugin](Common::Wasm::Context* context, Word context_id) -> Word { + SaveRestoreContext saved_context(context); + return Word(plugin->onResponseTrailers(context_id.u64)); + }; + } else if (functionName == "_proxy_onResponseMetadata") { + auto plugin = plugin_.get(); + *f = [plugin](Common::Wasm::Context* context, Word context_id) -> Word { + SaveRestoreContext saved_context(context); + return Word(plugin->onResponseMetadata(context_id.u64)); + }; + } else { + throw WasmVmException(fmt::format("Missing getFunction for: {}", functionName)); + } +} + +void NullVm::getFunction(absl::string_view functionName, WasmCall3Int* f) { + if (functionName == "_proxy_onRequestBody") { + auto plugin = plugin_.get(); + *f = [plugin](Common::Wasm::Context* context, Word context_id, Word body_buffer_length, + Word end_of_stream) -> Word { + SaveRestoreContext saved_context(context); + return Word(plugin->onRequestBody(context_id.u64, body_buffer_length.u64, end_of_stream.u64)); + }; + } else if (functionName == "_proxy_onResponseBody") { + auto plugin = plugin_.get(); + *f = [plugin](Common::Wasm::Context* context, Word context_id, Word body_buffer_length, + Word end_of_stream) -> Word { + SaveRestoreContext saved_context(context); + return Word( + plugin->onResponseBody(context_id.u64, body_buffer_length.u64, end_of_stream.u64)); + }; + } else { + throw WasmVmException(fmt::format("Missing getFunction for: {}", functionName)); + } +} + +Context* NullVmPlugin::ensureContext(uint64_t context_id) { + auto e = context_map_.insert(std::make_pair(context_id, nullptr)); + if (e.second) { + e.first->second = newContext(context_id); + } + return e.first->second.get(); +} + +Context* NullVmPlugin::getContext(uint64_t context_id) { + auto it = context_map_.find(context_id); + if (it == context_map_.end()) { + return nullptr; + } + return it->second.get(); +} + +void NullVmPlugin::onStart() { ensureContext(0)->onStart(); } + +void NullVmPlugin::onConfigure(uint64_t ptr, uint64_t size) { + ensureContext(0)->onConfigure(std::make_unique(reinterpret_cast(ptr), size)); +} + +void NullVmPlugin::onTick() { ensureContext(0)->onTick(); } + +void NullVmPlugin::onCreate(uint64_t context_id) { ensureContext(context_id)->onCreate(); } + +uint64_t NullVmPlugin::onRequestHeaders(uint64_t context_id) { + auto c = getContext(context_id); + if (!c) { + return static_cast(Plugin::FilterHeadersStatus::Continue); + } + return static_cast(c->onRequestHeaders()); +} + +uint64_t NullVmPlugin::onRequestBody(uint64_t context_id, uint64_t body_buffer_length, + uint64_t end_of_stream) { + auto c = getContext(context_id); + if (!c) { + return static_cast(Plugin::FilterDataStatus::Continue); + } + return static_cast( + c->onRequestBody(static_cast(body_buffer_length), end_of_stream != 0)); +} + +uint64_t NullVmPlugin::onRequestTrailers(uint64_t context_id) { + auto c = getContext(context_id); + if (!c) { + return static_cast(Plugin::FilterTrailersStatus::Continue); + } + return static_cast(c->onRequestTrailers()); +} + +uint64_t NullVmPlugin::onRequestMetadata(uint64_t context_id) { + auto c = getContext(context_id); + if (!c) { + return static_cast(Plugin::FilterMetadataStatus::Continue); + } + return static_cast(c->onRequestMetadata()); +} + +uint64_t NullVmPlugin::onResponseHeaders(uint64_t context_id) { + auto c = getContext(context_id); + if (!c) { + return static_cast(Plugin::FilterHeadersStatus::Continue); + } + return static_cast(c->onResponseHeaders()); +} + +uint64_t NullVmPlugin::onResponseBody(uint64_t context_id, uint64_t body_buffer_length, + uint64_t end_of_stream) { + auto c = getContext(context_id); + if (!c) { + return static_cast(Plugin::FilterDataStatus::Continue); + } + return static_cast( + c->onResponseBody(static_cast(body_buffer_length), end_of_stream != 0)); +} + +uint64_t NullVmPlugin::onResponseTrailers(uint64_t context_id) { + auto c = getContext(context_id); + if (!c) { + return static_cast(Plugin::FilterTrailersStatus::Continue); + } + return static_cast(c->onResponseTrailers()); +} + +uint64_t NullVmPlugin::onResponseMetadata(uint64_t context_id) { + auto c = getContext(context_id); + if (!c) { + return static_cast(Plugin::FilterMetadataStatus::Continue); + } + return static_cast(c->onResponseMetadata()); +} + +void NullVmPlugin::onHttpCallResponse(uint64_t context_id, uint64_t token, + uint64_t header_pairs_ptr, uint64_t header_pairs_size, + uint64_t body_ptr, uint64_t body_size, + uint64_t trailer_pairs_ptr, uint64_t trailer_pairs_size) { + auto c = getContext(context_id); + if (!c) { + return; + } + c->onHttpCallResponse( + token, + std::make_unique(reinterpret_cast(header_pairs_ptr), header_pairs_size), + std::make_unique(reinterpret_cast(body_ptr), body_size), + std::make_unique(reinterpret_cast(trailer_pairs_ptr), trailer_pairs_size)); +} + +void NullVmPlugin::onGrpcReceive(uint64_t context_id, uint64_t token, uint64_t response_ptr, + uint64_t response_size) { + auto c = getContext(context_id); + if (!c) { + return; + } + c->onGrpcReceive( + token, std::make_unique(reinterpret_cast(response_ptr), response_size)); +} + +void NullVmPlugin::onGrpcClose(uint64_t context_id, uint64_t token, uint64_t status_code, + uint64_t status_message_ptr, uint64_t status_message_size) { + auto c = getContext(context_id); + if (!c) + return; + c->onGrpcClose( + token, static_cast(status_code), + std::make_unique(reinterpret_cast(status_message_ptr), status_message_size)); +} + +void NullVmPlugin::onGrpcCreateInitialMetadata(uint64_t context_id, uint64_t token) { + auto c = getContext(context_id); + if (!c) { + return; + } + c->onGrpcCreateInitialMetadata(token); +} + +void NullVmPlugin::onGrpcReceiveInitialMetadata(uint64_t context_id, uint64_t token) { + auto c = getContext(context_id); + if (!c) { + return; + } + c->onGrpcReceiveInitialMetadata(token); +} + +void NullVmPlugin::onGrpcReceiveTrailingMetadata(uint64_t context_id, uint64_t token) { + auto c = getContext(context_id); + if (!c) { + return; + } + c->onGrpcReceiveTrailingMetadata(token); +} + +void NullVmPlugin::onLog(uint64_t context_id) { + auto c = getContext(context_id); + if (!c) { + return; + } + c->onLog(); +} + +void NullVmPlugin::onDone(uint64_t context_id) { + auto c = getContext(context_id); + if (!c) { + return; + } + c->onDone(); +} + +void NullVmPlugin::onDelete(uint64_t context_id) { + auto c = getContext(context_id); + if (!c) { + return; + } + c->onDelete(); + context_map_.erase(context_id); +} + +} // namespace Null +} // namespace Wasm +} // namespace Common +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/common/wasm/null/null.h b/source/extensions/common/wasm/null/null.h new file mode 100644 index 0000000000000..cff681f40a85d --- /dev/null +++ b/source/extensions/common/wasm/null/null.h @@ -0,0 +1,115 @@ +#pragma once + +#include + +#include "extensions/common/wasm/wasm.h" + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace Wasm { +namespace Null { +namespace Plugin { +using LogLevel = Envoy::Logger::Logger::levels; +using FilterHeadersStatus = Http::FilterHeadersStatus; +using FilterMetadataStatus = Http::FilterMetadataStatus; +using FilterTrailersStatus = Http::FilterTrailersStatus; +using FilterDataStatus = Http::FilterDataStatus; +using GrpcStatus = ProtobufUtil::error::Code; +using MetricType = Envoy::Extensions::Common::Wasm::Context::MetricType; +using StringView = absl::string_view; +} // namespace Plugin +} // namespace Null +} // namespace Wasm +} // namespace Common +} // namespace Extensions +} // namespace Envoy + +#include "extensions/common/wasm/null/wasm_api_impl.h" + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace Wasm { +namespace Null { +namespace Plugin { +#include "api/wasm/cpp/proxy_wasm_impl.h" +} // namespace Plugin + +class NullVmPlugin { +public: + using Context = Plugin::Context; + using NewContextFnPtr = std::unique_ptr (*)(uint32_t /* id */); + + NullVmPlugin(NewContextFnPtr new_context) : new_context_(new_context) {} + NullVmPlugin(const NullVmPlugin& other) : new_context_(other.new_context_) {} + + std::unique_ptr newContext(uint64_t context_id) { return new_context_(context_id); } + + void start() {} + void onStart(); + void onConfigure(uint64_t ptr, uint64_t size); + void onTick(); + + void onCreate(uint64_t context_id); + + uint64_t onRequestHeaders(uint64_t context_id); + uint64_t onRequestBody(uint64_t context_id, uint64_t body_buffer_length, uint64_t end_of_stream); + uint64_t onRequestTrailers(uint64_t context_id); + uint64_t onRequestMetadata(uint64_t context_id); + + uint64_t onResponseHeaders(uint64_t context_id); + uint64_t onResponseBody(uint64_t context_id, uint64_t body_buffer_length, uint64_t end_of_stream); + uint64_t onResponseTrailers(uint64_t context_id); + uint64_t onResponseMetadata(uint64_t context_id); + + void onHttpCallResponse(uint64_t context_id, uint64_t token, uint64_t header_pairs_ptr, + uint64_t header_pairs_size, uint64_t body_ptr, uint64_t body_size, + uint64_t trailer_pairs_ptr, uint64_t trailer_pairs_size); + + void onGrpcReceive(uint64_t context_id, uint64_t token, uint64_t response_ptr, + uint64_t response_size); + void onGrpcClose(uint64_t context_id, uint64_t token, uint64_t status_code, + uint64_t status_message_ptr, uint64_t status_message_size); + void onGrpcCreateInitialMetadata(uint64_t context_id, uint64_t token); + void onGrpcReceiveInitialMetadata(uint64_t context_id, uint64_t token); + void onGrpcReceiveTrailingMetadata(uint64_t context_id, uint64_t token); + + void onLog(uint64_t context_id); + void onDone(uint64_t context_id); + void onDelete(uint64_t context_id); + +private: + Context* ensureContext(uint64_t context_id); + Context* getContext(uint64_t context_id); + + NewContextFnPtr new_context_; + std::unordered_map> context_map_; +}; + +/** + * Pseudo-WASM plugins using the NullVM should implement this factory and register via + * Registry::registerFactory or the convenience class RegisterFactory. + */ +class NullVmPluginFactory { +public: + virtual ~NullVmPluginFactory() {} + + /** + * Name of the plugin. + */ + virtual const std::string name() const PURE; + + /** + * Create an instance of the plugin. + */ + virtual std::unique_ptr create() const PURE; +}; + +std::unique_ptr createVm(); + +} // namespace Null +} // namespace Wasm +} // namespace Common +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/common/wasm/null/null_plugin_wrapper.cc b/source/extensions/common/wasm/null/null_plugin_wrapper.cc new file mode 100644 index 0000000000000..39b5aa414088d --- /dev/null +++ b/source/extensions/common/wasm/null/null_plugin_wrapper.cc @@ -0,0 +1,37 @@ +#include "extensions/common/wasm/null/null.h" + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace Wasm { +namespace Null { +namespace Plugin { +namespace ExamplePlugin { +std::unique_ptr NewContext(uint32_t id); +} // namespace ExamplePlugin + +/** + * Config registration for a Wasm filter plugin. @see NamedHttpFilterConfigFactory. + */ +class PluginFactory : public NullVmPluginFactory { +public: + PluginFactory() {} + + const std::string name() const override { return "null_vm_plugin"; } + std::unique_ptr create() const override { + return std::make_unique( + &Envoy::Extensions::Common::Wasm::Null::Plugin::ExamplePlugin::NewContext); + } +}; + +/** + * Static registration for the null Wasm filter. @see RegisterFactory. + */ +static Registry::RegisterFactory register_; + +} // namespace Plugin +} // namespace Null +} // namespace Wasm +} // namespace Common +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/common/wasm/null/plugin.cc b/source/extensions/common/wasm/null/plugin.cc new file mode 100644 index 0000000000000..17b90072d077c --- /dev/null +++ b/source/extensions/common/wasm/null/plugin.cc @@ -0,0 +1,63 @@ +#include +#include + +#ifndef NULL_PLUGIN +#include "proxy_wasm_intrinsics.h" +#else + +#include "extensions/common/wasm/null/null.h" + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace Wasm { +namespace Null { +namespace Plugin { +namespace ExamplePlugin { +#endif + +class PluginContext : public Context { +public: + explicit PluginContext(uint32_t id) : Context(id) {} + + FilterHeadersStatus onRequestHeaders() override; + FilterDataStatus onRequestBody(size_t body_buffer_length, bool end_of_stream) override; + void onLog() override; + void onDone() override; +}; + +std::unique_ptr NewContext(uint32_t id) { + return std::unique_ptr(new PluginContext(id)); +} + +FilterHeadersStatus PluginContext::onRequestHeaders() { + logDebug(std::string("onRequestHeaders ") + std::to_string(id())); + auto path = getRequestHeader(":path"); + logInfo(std::string("header path ") + std::string(path->view())); + addRequestHeader("newheader", "newheadervalue"); + replaceRequestHeader("server", "envoy-wasm"); + return FilterHeadersStatus::Continue; +} + +FilterDataStatus PluginContext::onRequestBody(size_t body_buffer_length, bool /* end_of_stream */) { + auto body = getRequestBodyBufferBytes(0, body_buffer_length); + logError(std::string("onRequestBody ") + std::string(body->view())); + return FilterDataStatus::Continue; +} + +void PluginContext::onLog() { + auto path = getRequestHeader(":path"); + logWarn("onLog " + std::to_string(id()) + " " + std::string(path->view())); +} + +void PluginContext::onDone() { logWarn("onDone " + std::to_string(id())); } + +#ifdef NULL_PLUGIN +} // namespace ExamplePlugin +} // namespace Plugin +} // namespace Null +} // namespace Wasm +} // namespace Common +} // namespace Extensions +} // namespace Envoy +#endif diff --git a/source/extensions/common/wasm/null/wasm_api_impl.h b/source/extensions/common/wasm/null/wasm_api_impl.h new file mode 100644 index 0000000000000..763a7608560be --- /dev/null +++ b/source/extensions/common/wasm/null/wasm_api_impl.h @@ -0,0 +1,171 @@ +#pragma once + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace Wasm { + +extern thread_local Envoy::Extensions::Common::Wasm::Context* current_context_; + +namespace Null { +namespace Plugin { + +#define WS(_x) Word(static_cast(_x)) +#define WR(_x) Word(reinterpret_cast(_x)) + +// Logging +inline void proxy_log(LogLevel level, const char* logMessage, size_t messageSize) { + logHandler(current_context_, WS(level), WR(logMessage), WS(messageSize)); +} + +// Timer +inline void proxy_setTickPeriodMilliseconds(uint64_t millisecond) { + setTickPeriodMillisecondsHandler(current_context_, Word(millisecond)); +} +inline uint64_t proxy_getCurrentTimeNanoseconds() { + return getCurrentTimeNanosecondsHandler(current_context_); +} + +// Stream Info +inline void proxy_getProtocol(StreamType type, const char** value_ptr, size_t* value_size) { + getProtocolHandler(current_context_, WS(type), WR(value_ptr), WR(value_size)); +} + +// Metadata +inline void proxy_getMetadata(MetadataType type, const char* key_ptr, size_t key_size, + const char** value_ptr_ptr, size_t* value_size_ptr) { + getMetadataHandler(current_context_, WS(type), WR(key_ptr), WS(key_size), WR(value_ptr_ptr), + WR(value_size_ptr)); +} +inline void proxy_setMetadata(MetadataType type, const char* key_ptr, size_t key_size, + const char* value_ptr, size_t value_size) { + setMetadataHandler(current_context_, WS(type), WR(key_ptr), WS(key_size), WR(value_ptr), + WS(value_size)); +} +inline void proxy_getMetadataPairs(MetadataType type, const char** value_ptr, size_t* value_size) { + getMetadataPairsHandler(current_context_, WS(type), WR(value_ptr), WR(value_size)); +} +inline void proxy_getMetadataStruct(MetadataType type, const char* name_ptr, size_t name_size, + const char** value_ptr_ptr, size_t* value_size_ptr) { + getMetadataStructHandler(current_context_, WS(type), WR(name_ptr), WS(name_size), + WR(value_ptr_ptr), WR(value_size_ptr)); +} +inline void proxy_setMetadataStruct(MetadataType type, const char* name_ptr, size_t name_size, + const char* value_ptr, size_t value_size) { + setMetadataStructHandler(current_context_, WS(type), WR(name_ptr), WS(name_size), WR(value_ptr), + WS(value_size)); +} + +// Continue +inline void proxy_continueRequest() { continueRequestHandler(current_context_); } +inline void proxy_continueResponse() { continueResponseHandler(current_context_); } + +// SharedData +inline void proxy_getSharedData(const char* key_ptr, size_t key_size, const char** value_ptr, + size_t* value_size, uint32_t* cas) { + getSharedDataHandler(current_context_, WR(key_ptr), WS(key_size), WR(value_ptr), WR(value_size), + WR(cas)); +} +// If cas != 0 and cas != the current cas for 'key' return false, otherwise set the value and +// return true. +inline bool proxy_setSharedData(const char* key_ptr, size_t key_size, const char* value_ptr, + size_t value_size, uint64_t cas) { + return setSharedDataHandler(current_context_, WR(key_ptr), WS(key_size), WR(value_ptr), + WS(value_size), WS(cas)); +} + +// Headers/Trailers/Metadata Maps +inline void proxy_addHeaderMapValue(HeaderMapType type, const char* key_ptr, size_t key_size, + const char* value_ptr, size_t value_size) { + addHeaderMapValueHandler(current_context_, WS(type), WR(key_ptr), WS(key_size), WR(value_ptr), + WS(value_size)); +} +inline void proxy_getHeaderMapValue(HeaderMapType type, const char* key_ptr, size_t key_size, + const char** value_ptr, size_t* value_size) { + getHeaderMapValueHandler(current_context_, WS(type), WR(key_ptr), WS(key_size), WR(value_ptr), + WR(value_size)); +} +inline void proxy_getHeaderMapPairs(HeaderMapType type, const char** ptr, size_t* size) { + getHeaderMapPairsHandler(current_context_, WS(type), WR(ptr), WR(size)); +} +inline void proxy_setHeaderMapPairs(HeaderMapType type, const char* ptr, size_t size) { + setHeaderMapPairsHandler(current_context_, WS(type), WR(ptr), WS(size)); +} +inline void proxy_replaceHeaderMapValue(HeaderMapType type, const char* key_ptr, size_t key_size, + const char* value_ptr, size_t value_size) { + replaceHeaderMapValueHandler(current_context_, WS(type), WR(key_ptr), WS(key_size), WR(value_ptr), + WS(value_size)); +} +inline void proxy_removeHeaderMapValue(HeaderMapType type, const char* key_ptr, size_t key_size) { + removeHeaderMapValueHandler(current_context_, WS(type), WR(key_ptr), WS(key_size)); +} + +// Body +inline void proxy_getRequestBodyBufferBytes(uint64_t start, uint64_t length, const char** ptr, + size_t* size) { + getRequestBodyBufferBytesHandler(current_context_, Word(start), Word(length), WR(ptr), WR(size)); +} +inline void proxy_getResponseBodyBufferBytes(uint64_t start, uint64_t length, const char** ptr, + size_t* size) { + getResponseBodyBufferBytesHandler(current_context_, WS(start), WS(length), WR(ptr), WR(size)); +} + +// HTTP +// Returns token, used in callback onHttpCallResponse +inline uint64_t proxy_httpCall(const char* uri_ptr, size_t uri_size, void* header_pairs_ptr, + size_t header_pairs_size, const char* body_ptr, size_t body_size, + void* trailer_pairs_ptr, size_t trailer_pairs_size, + uint64_t timeout_milliseconds) { + return httpCallHandler(current_context_, WR(uri_ptr), WS(uri_size), WR(header_pairs_ptr), + WS(header_pairs_size), WR(body_ptr), WS(body_size), WR(trailer_pairs_ptr), + WS(trailer_pairs_size), WS(timeout_milliseconds)); +} +// gRPC +// Returns token, used in gRPC callbacks (onGrpc...) +inline uint64_t proxy_grpcCall(const char* service_ptr, size_t service_size, + const char* service_name_ptr, size_t service_name_size, + const char* method_name_ptr, size_t method_name_size, + const char* request_ptr, size_t request_size, + uint64_t timeout_milliseconds) { + return grpcCallHandler(current_context_, WR(service_ptr), WS(service_size), WR(service_name_ptr), + WS(service_name_size), WR(method_name_ptr), WS(method_name_size), + WR(request_ptr), WS(request_size), WS(timeout_milliseconds)); +} +inline uint64_t proxy_grpcStream(const char* service_ptr, size_t service_size, + const char* service_name_ptr, size_t service_name_size, + const char* method_name_ptr, size_t method_name_size) { + return grpcStreamHandler(current_context_, WR(service_ptr), WS(service_size), + WR(service_name_ptr), WS(service_name_size), WR(method_name_ptr), + WS(method_name_size)); +} +inline void proxy_grpcCancel(uint64_t token) { grpcCancelHandler(current_context_, WS(token)); } +inline void proxy_grpcClose(uint64_t token) { grpcCloseHandler(current_context_, WS(token)); } +inline void proxy_grpcSend(uint64_t token, const char* message_ptr, size_t message_size, + uint64_t end_stream) { + grpcSendHandler(current_context_, WS(token), WR(message_ptr), WS(message_size), WS(end_stream)); +} + +// Metrics +// Returns a metric_id which can be used to report a metric. On error returns 0. +inline uint32_t proxy_defineMetric(MetricType type, const char* name_ptr, size_t name_size) { + return defineMetricHandler(current_context_, WS(type), WR(name_ptr), WS(name_size)); +} +inline void proxy_incrementMetric(uint32_t metric_id, int64_t offset) { + incrementMetricHandler(current_context_, WS(metric_id), WS(offset)); +} +inline void proxy_recordMetric(uint32_t metric_id, uint64_t value) { + recordMetricHandler(current_context_, WS(metric_id), WS(value)); +} +inline uint64_t proxy_getMetric(uint32_t metric_id) { + return getMetricHandler(current_context_, WS(metric_id)); +} + +#undef WS +#undef WR + +} // namespace Plugin +} // namespace Null +} // namespace Wasm +} // namespace Common +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/common/wasm/v8/BUILD b/source/extensions/common/wasm/v8/BUILD new file mode 100644 index 0000000000000..322a37ad6452c --- /dev/null +++ b/source/extensions/common/wasm/v8/BUILD @@ -0,0 +1,28 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "v8_lib", + srcs = ["v8.cc"], + hdrs = ["v8.h"], + external_deps = [ + "v8", + ], + deps = [ + "//include/envoy/server:wasm_interface", + "//include/envoy/thread_local:thread_local_interface", + "//source/common/common:assert_lib", + "//source/common/common:c_smart_ptr_lib", + "//source/common/protobuf", + "//source/extensions/common/wasm:wasm_hdr", + "//source/extensions/common/wasm:well_known_names", + "@envoy_api//envoy/config/wasm/v2:wasm_cc", + ], +) diff --git a/source/extensions/common/wasm/v8/v8.cc b/source/extensions/common/wasm/v8/v8.cc new file mode 100644 index 0000000000000..30b8c32d45778 --- /dev/null +++ b/source/extensions/common/wasm/v8/v8.cc @@ -0,0 +1,646 @@ +#include "extensions/common/wasm/v8/v8.h" + +#include +#include + +#include +#include +#include +#include +#include + +#include "envoy/common/exception.h" +#include "envoy/server/wasm.h" + +#include "common/common/assert.h" +#include "common/common/logger.h" + +#include "extensions/common/wasm/wasm.h" +#include "extensions/common/wasm/well_known_names.h" + +#include "absl/strings/match.h" +#include "absl/types/span.h" +#include "absl/utility/utility.h" +#include "wasm-c-api/wasm.hh" + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace Wasm { + +extern thread_local Envoy::Extensions::Common::Wasm::Context* current_context_; + +namespace V8 { + +wasm::Engine* engine() { + static const auto engine = wasm::Engine::make(); + return engine.get(); +} + +class V8 : public WasmVm { +public: + V8() = default; + + // Extensions::Common::Wasm::WasmVm + absl::string_view vm() override { return WasmVmNames::get().v8; } + + bool load(const std::string& code, bool allow_precompiled) override; + absl::string_view getUserSection(absl::string_view name) override; + void link(absl::string_view debug_name, bool needs_emscripten) override; + + // We don't care about this. + void makeModule(absl::string_view) override {} + + // v8 is currently not clonable. + bool clonable() override { return false; } + std::unique_ptr clone() override { return nullptr; } + + void start(Context* context) override; + absl::string_view getMemory(uint64_t pointer, uint64_t size) override; + bool getMemoryOffset(void* host_pointer, uint64_t* vm_pointer) override; + bool setMemory(uint64_t pointer, uint64_t size, void* data) override; + bool setWord(uint64_t pointer, uint64_t word) override; + +#define _REGISTER_HOST_GLOBAL(_type) \ + std::unique_ptr> makeGlobal(absl::string_view moduleName, absl::string_view name, \ + _type initialValue) override { \ + return registerHostGlobalImpl(moduleName, name, initialValue); \ + }; + _REGISTER_HOST_GLOBAL(Word); + _REGISTER_HOST_GLOBAL(double); +#undef _REGISTER_HOST_GLOBAL + +#define _REGISTER_HOST_FUNCTION(_type) \ + void registerCallback(absl::string_view moduleName, absl::string_view functionName, _type, \ + typename ConvertFunctionTypeWordToUint32<_type>::type f) override { \ + registerHostFunctionImpl(moduleName, functionName, f); \ + }; + _REGISTER_HOST_FUNCTION(WasmCallback0Void); + _REGISTER_HOST_FUNCTION(WasmCallback1Void); + _REGISTER_HOST_FUNCTION(WasmCallback2Void); + _REGISTER_HOST_FUNCTION(WasmCallback3Void); + _REGISTER_HOST_FUNCTION(WasmCallback4Void); + _REGISTER_HOST_FUNCTION(WasmCallback5Void); + _REGISTER_HOST_FUNCTION(WasmCallback0Int); + _REGISTER_HOST_FUNCTION(WasmCallback1Int); + _REGISTER_HOST_FUNCTION(WasmCallback2Int); + _REGISTER_HOST_FUNCTION(WasmCallback3Int); + _REGISTER_HOST_FUNCTION(WasmCallback4Int); + _REGISTER_HOST_FUNCTION(WasmCallback5Int); + _REGISTER_HOST_FUNCTION(WasmCallback6Int); + _REGISTER_HOST_FUNCTION(WasmCallback7Int); + _REGISTER_HOST_FUNCTION(WasmCallback8Int); + _REGISTER_HOST_FUNCTION(WasmCallback9Int); + _REGISTER_HOST_FUNCTION(WasmCallback_ZWl); + _REGISTER_HOST_FUNCTION(WasmCallback_ZWm); + _REGISTER_HOST_FUNCTION(WasmCallback_m); + _REGISTER_HOST_FUNCTION(WasmCallback_mW); +#undef _REGISTER_HOST_FUNCTION + +#define _GET_MODULE_FUNCTION(_type) \ + void getFunction(absl::string_view functionName, _type* f) override { \ + getModuleFunctionImpl(functionName, f); \ + }; + _GET_MODULE_FUNCTION(WasmCall0Void); + _GET_MODULE_FUNCTION(WasmCall1Void); + _GET_MODULE_FUNCTION(WasmCall2Void); + _GET_MODULE_FUNCTION(WasmCall3Void); + _GET_MODULE_FUNCTION(WasmCall4Void); + _GET_MODULE_FUNCTION(WasmCall5Void); + _GET_MODULE_FUNCTION(WasmCall8Void); + _GET_MODULE_FUNCTION(WasmCall1Int); + _GET_MODULE_FUNCTION(WasmCall3Int); +#undef _GET_MODULE_FUNCTION + +private: + void callModuleFunction(Context* context, absl::string_view functionName, const wasm::Val args[], + wasm::Val results[]); + + template + std::unique_ptr> registerHostGlobalImpl(absl::string_view moduleName, + absl::string_view name, T initialValue); + + template + void registerHostFunctionImpl(absl::string_view moduleName, absl::string_view functionName, + void (*function)(void*, Args...)); + + template + void registerHostFunctionImpl(absl::string_view moduleName, absl::string_view functionName, + R (*function)(void*, Args...)); + + template + void getModuleFunctionImpl(absl::string_view functionName, + std::function* function); + + template + void getModuleFunctionImpl(absl::string_view functionName, + std::function* function); + + wasm::vec source_ = wasm::vec::invalid(); + wasm::own store_; + wasm::own module_; + wasm::own instance_; + wasm::own memory_; + wasm::own table_; + + absl::flat_hash_map> host_globals_; + absl::flat_hash_map> host_functions_; + absl::flat_hash_map> module_functions_; + bool module_needs_emscripten_{}; +}; + +// Helper functions. + +static const char* printValKind(wasm::ValKind kind) { + switch (kind) { + case wasm::I32: + return "i32"; + case wasm::I64: + return "i64"; + case wasm::F32: + return "f32"; + case wasm::F64: + return "f64"; + case wasm::ANYREF: + return "anyref"; + case wasm::FUNCREF: + return "funcref"; + default: + return "unknown"; + } +} + +static std::string printValTypes(const wasm::vec& types) { + if (types.size() == 0) { + return "void"; + } + + std::string s; + s.reserve(types.size() * 8 /* max size + " " */ - 1); + for (size_t i = 0; i < types.size(); i++) { + if (i) { + s.append(" "); + } + s.append(printValKind(types[i]->kind())); + } + return s; +} + +static bool equalValTypes(const wasm::vec& left, + const wasm::vec& right) { + if (left.size() != right.size()) { + return false; + } + for (size_t i = 0; i < left.size(); i++) { + if (left[i]->kind() != right[i]->kind()) { + return false; + } + } + return true; +} + +static uint32_t parseVarint(const byte_t*& pos, const byte_t* end) { + uint32_t n = 0; + uint32_t shift = 0; + byte_t b; + do { + if (pos + 1 > end) { + throw WasmException("Failed to parse corrupted WASM module"); + } + b = *pos++; + n += (b & 0x7f) << shift; + shift += 7; + } while ((b & 0x80) != 0); + return n; +} + +// Template magic. + +template struct ConvertWordType { using type = T; }; +template <> struct ConvertWordType { using type = uint32_t; }; + +template wasm::Val makeVal(T t) { return wasm::Val::make(t); } +template <> wasm::Val makeVal(Word t) { return wasm::Val::make(static_cast(t.u64)); } + +template constexpr auto convertArgToValKind(); +template <> constexpr auto convertArgToValKind() { return wasm::I32; }; +template <> constexpr auto convertArgToValKind() { return wasm::I32; }; +template <> constexpr auto convertArgToValKind() { return wasm::I32; }; +template <> constexpr auto convertArgToValKind() { return wasm::I64; }; +template <> constexpr auto convertArgToValKind() { return wasm::I64; }; +template <> constexpr auto convertArgToValKind() { return wasm::F32; }; +template <> constexpr auto convertArgToValKind() { return wasm::F64; }; + +template struct V8ProxyForGlobal : Global { + V8ProxyForGlobal(wasm::Global* value) : global_(value) {} + + T get() override { return global_->get().get::type>(); }; + void set(const T& value) override { global_->set(makeVal(static_cast(value))); }; + + wasm::Global* global_; +}; + +template +constexpr auto convertArgsTupleToValTypesImpl(absl::index_sequence) { + return wasm::vec::make( + wasm::ValType::make(convertArgToValKind::type>())...); +} + +template constexpr auto convertArgsTupleToValTypes() { + return convertArgsTupleToValTypesImpl(absl::make_index_sequence::value>()); +} + +template +constexpr T convertValTypesToArgsTupleImpl(const U& arr, absl::index_sequence) { + return std::make_tuple( + (arr[I] + .template get< + typename ConvertWordType::type>::type>())...); +} + +template constexpr T convertValTypesToArgsTuple(const U& arr) { + return convertValTypesToArgsTupleImpl(arr, + absl::make_index_sequence::value>()); +} + +// V8 implementation. + +bool V8::load(const std::string& code, bool /* allow_precompiled */) { + ENVOY_LOG(trace, "[wasm] load()"); + store_ = wasm::Store::make(engine()); + RELEASE_ASSERT(store_ != nullptr, ""); + + source_ = wasm::vec::make_uninitialized(code.size()); + ::memcpy(source_.get(), code.data(), code.size()); + + module_ = wasm::Module::make(store_.get(), source_); + return module_ != nullptr; +} + +absl::string_view V8::getUserSection(absl::string_view name) { + ENVOY_LOG(trace, "[wasm] getUserSection(\"{}\")", name); + ASSERT(source_.get() != nullptr); + + const byte_t* end = source_.get() + source_.size(); + const byte_t* pos = source_.get() + 8; // skip header + while (pos < end) { + if (pos + 1 > end) { + throw WasmException("Failed to parse corrupted WASM module"); + } + auto type = *pos++; + auto rest = parseVarint(pos, end); + if (pos + rest > end) { + throw WasmException("Failed to parse corrupted WASM module"); + } + if (type == 0 /* custom section */) { + auto start = pos; + auto len = parseVarint(pos, end); + if (pos + len > end) { + throw WasmException("Failed to parse corrupted WASM module"); + } + pos += len; + rest -= (pos - start); + if (len == name.size() && ::memcmp(pos - len, name.data(), len) == 0) { + ENVOY_LOG(trace, "[wasm] getUserSection(\"{}\") found, size: {}", name, rest); + return absl::string_view(pos, rest); + } + } + pos += rest; + } + return ""; +} + +void V8::link(absl::string_view debug_name, bool needs_emscripten) { + ENVOY_LOG(trace, "[wasm] link(\"{}\"), emscripten: {}", debug_name, needs_emscripten); + ASSERT(module_ != nullptr); + + const auto import_types = module_.get()->imports(); + std::vector imports; + + for (size_t i = 0; i < import_types.size(); i++) { + absl::string_view module(import_types[i]->module().get(), import_types[i]->module().size()); + absl::string_view name(import_types[i]->name().get(), import_types[i]->name().size()); + auto import_type = import_types[i]->type(); + + switch (import_type->kind()) { + + case wasm::EXTERN_FUNC: { + ENVOY_LOG(trace, "[wasm] link(), export host func: {}.{} ({} -> {})", module, name, + printValTypes(import_type->func()->params()), + printValTypes(import_type->func()->results())); + + const wasm::Func* func = nullptr; + auto it = host_functions_.find(absl::StrCat(module, ".", name)); + if (it != host_functions_.end()) { + func = it->second.get(); + } else { + it = host_functions_.find(absl::StrCat("envoy", ".", name)); + if (it != host_functions_.end()) { + func = it->second.get(); + } + } + if (func) { + if (equalValTypes(import_type->func()->params(), func->type()->params()) && + equalValTypes(import_type->func()->results(), func->type()->results())) { + imports.push_back(func); + } else { + throw WasmException(fmt::format( + "Failed to load WASM module due to an import type mismatch: {}.{}, " + "want: {} -> {}, but host exports: {} -> {}", + module, name, printValTypes(import_type->func()->params()), + printValTypes(import_type->func()->results()), printValTypes(func->type()->params()), + printValTypes(func->type()->results()))); + } + } else { + throw WasmException( + fmt::format("Failed to load WASM module due to a missing import: {}.{}", module, name)); + } + } break; + + case wasm::EXTERN_GLOBAL: { + ENVOY_LOG(trace, "[wasm] link(), export host global: {}.{} ({})", module, name, + printValKind(import_type->global()->content()->kind())); + + const wasm::Global* global = nullptr; + auto it = host_globals_.find(absl::StrCat(module, ".", name)); + if (it != host_globals_.end()) { + global = it->second.get(); + } else { + it = host_globals_.find(absl::StrCat("envoy", ".", name)); + if (it != host_globals_.end()) { + global = it->second.get(); + } + } + if (global) { + imports.push_back(global); + } else { + throw WasmException( + fmt::format("Failed to load WASM module due to a missing import: {}.{}", module, name)); + } + } break; + + case wasm::EXTERN_MEMORY: { + ENVOY_LOG(trace, "[wasm] link(), export host memory: {}.{} (min: {} max: {})", module, name, + import_type->memory()->limits().min, import_type->memory()->limits().max); + + ASSERT(memory_ == nullptr); + auto type = wasm::MemoryType::make(import_type->memory()->limits()); + memory_ = wasm::Memory::make(store_.get(), type.get()); + imports.push_back(memory_.get()); + } break; + + case wasm::EXTERN_TABLE: { + ENVOY_LOG(trace, "[wasm] link(), export host table: {}.{} (min: {} max: {})", module, name, + import_type->table()->limits().min, import_type->table()->limits().max); + + ASSERT(table_ == nullptr); + auto type = + wasm::TableType::make(wasm::ValType::make(import_type->table()->element()->kind()), + import_type->table()->limits()); + table_ = wasm::Table::make(store_.get(), type.get()); + imports.push_back(table_.get()); + } break; + } + } + + ASSERT(import_types.size() == imports.size()); + + instance_ = wasm::Instance::make(store_.get(), module_.get(), imports.data()); + RELEASE_ASSERT(instance_ != nullptr, ""); + module_needs_emscripten_ = needs_emscripten; + + const auto export_types = module_.get()->exports(); + const auto exports = instance_.get()->exports(); + ASSERT(export_types.size() == exports.size()); + + for (size_t i = 0; i < export_types.size(); i++) { + absl::string_view name(export_types[i]->name().get(), export_types[i]->name().size()); + auto export_type = export_types[i]->type(); + auto export_item = exports[i]; + ASSERT(export_type->kind() == export_item->kind()); + + switch (export_type->kind()) { + + case wasm::EXTERN_FUNC: { + ENVOY_LOG(trace, "[wasm] link(), import module func: {} ({} -> {})", name, + printValTypes(export_type->func()->params()), + printValTypes(export_type->func()->results())); + + ASSERT(export_item->func() != nullptr); + module_functions_.emplace(name, export_item->func()->copy()); + } break; + + case wasm::EXTERN_GLOBAL: { + // TODO(PiotrSikora): add support when/if needed. + ENVOY_LOG(trace, "[wasm] link(), import module global: {} ({}) --- IGNORED", name, + printValKind(export_type->global()->content()->kind())); + } break; + + case wasm::EXTERN_MEMORY: { + ENVOY_LOG(trace, "[wasm] link(), import module memory: {} (min: {} max: {})", name, + export_type->memory()->limits().min, export_type->memory()->limits().max); + + ASSERT(export_item->memory() != nullptr); + ASSERT(memory_ == nullptr); + memory_ = exports[i]->memory()->copy(); + } break; + + case wasm::EXTERN_TABLE: { + // TODO(PiotrSikora): add support when/if needed. + ENVOY_LOG(trace, "[wasm] link(), import module table: {} (min: {} max: {}) --- IGNORED", name, + export_type->table()->limits().min, export_type->table()->limits().max); + } break; + } + } +} + +void V8::start(Context* context) { + ENVOY_LOG(trace, "[wasm] start()"); + + if (module_needs_emscripten_) { + const wasm::Val args[] = {wasm::Val::make(static_cast(64 * 64 * 1024 /* 4MB */)), + wasm::Val::make(static_cast(128 * 64 * 1024 /* 8MB */))}; + callModuleFunction(context, "establishStackSpace", args, nullptr); + callModuleFunction(context, "globalCtors", nullptr, nullptr); + + for (const auto& kv : module_functions_) { + if (absl::StartsWith(kv.first, "__GLOBAL__")) { + const wasm::Func* func = kv.second.get(); + auto trap = func->call(nullptr, nullptr); + if (trap) { + throw WasmVmException( + fmt::format("Function: {} failed: {}", kv.first, + absl::string_view(trap->message().get(), trap->message().size()))); + } + } + } + } + + callModuleFunction(context, "__post_instantiate", nullptr, nullptr); +} + +void V8::callModuleFunction(Context* context, absl::string_view functionName, + const wasm::Val args[], wasm::Val results[]) { + ENVOY_LOG(trace, "[wasm] callModuleFunction(\"{}\")", functionName); + current_context_ = context; + + auto it = module_functions_.find(functionName); + if (it != module_functions_.end()) { + const wasm::Func* func = it->second.get(); + auto trap = func->call(args, results); + if (trap) { + throw WasmVmException( + fmt::format("Function: {} failed: {}", functionName, + absl::string_view(trap->message().get(), trap->message().size()))); + } + } +} + +absl::string_view V8::getMemory(uint64_t pointer, uint64_t size) { + ENVOY_LOG(trace, "[wasm] getMemory({}, {})", pointer, size); + ASSERT(memory_ != nullptr); + RELEASE_ASSERT(pointer + size <= memory_->data_size(), ""); + return absl::string_view(memory_->data() + pointer, size); +} + +bool V8::getMemoryOffset(void* host_pointer, uint64_t* vm_pointer) { + ENVOY_LOG(trace, "[wasm] getMemoryOffset({})", host_pointer); + ASSERT(memory_ != nullptr); + RELEASE_ASSERT(static_cast(host_pointer) >= memory_->data(), ""); + RELEASE_ASSERT(static_cast(host_pointer) <= memory_->data() + memory_->data_size(), ""); + *vm_pointer = static_cast(host_pointer) - memory_->data(); + return true; +} + +bool V8::setMemory(uint64_t pointer, uint64_t size, void* data) { + ENVOY_LOG(trace, "[wasm] setMemory({}, {})", pointer, size); + ASSERT(memory_ != nullptr); + RELEASE_ASSERT(pointer + size <= memory_->data_size(), ""); + ::memcpy(memory_->data() + pointer, data, size); + return true; +} + +bool V8::setWord(uint64_t pointer, uint64_t word) { + ENVOY_LOG(trace, "[wasm] setWord({}, {})", pointer, word); + ASSERT(memory_ != nullptr); + RELEASE_ASSERT(pointer + sizeof(uint32_t) <= memory_->data_size(), ""); + uint32_t word32 = static_cast(word); + ::memcpy(memory_->data() + pointer, &word32, sizeof(uint32_t)); + return true; +} + +template +std::unique_ptr> V8::registerHostGlobalImpl(absl::string_view moduleName, + absl::string_view name, T initialValue) { + ENVOY_LOG(trace, "[wasm] registerHostGlobal(\"{}.{}\", {})", moduleName, name, initialValue); + auto value = makeVal(initialValue); + auto type = wasm::GlobalType::make(wasm::ValType::make(value.kind()), wasm::CONST); + auto global = wasm::Global::make(store_.get(), type.get(), value); + auto proxy = std::make_unique>(global.get()); + host_globals_.emplace(absl::StrCat(moduleName, ".", name), std::move(global)); + return proxy; +} + +template +void V8::registerHostFunctionImpl(absl::string_view moduleName, absl::string_view functionName, + void (*function)(void*, Args...)) { + ENVOY_LOG(trace, "[wasm] registerHostFunction(\"{}.{}\")", moduleName, functionName); + auto type = wasm::FuncType::make(convertArgsTupleToValTypes>(), + convertArgsTupleToValTypes>()); + auto func = wasm::Func::make( + store_.get(), type.get(), + [](void* data, const wasm::Val params[], wasm::Val[]) -> wasm::own { + auto args_tuple = convertValTypesToArgsTuple>(params); + auto args = std::tuple_cat(std::make_tuple(current_context_), args_tuple); + auto function = reinterpret_cast(data); + absl::apply(function, args); + return nullptr; + }, + reinterpret_cast(function)); + host_functions_.emplace(absl::StrCat(moduleName, ".", functionName), std::move(func)); +} + +template +void V8::registerHostFunctionImpl(absl::string_view moduleName, absl::string_view functionName, + R (*function)(void*, Args...)) { + ENVOY_LOG(trace, "[wasm] registerHostFunction(\"{}.{}\")", moduleName, functionName); + auto type = wasm::FuncType::make(convertArgsTupleToValTypes>(), + convertArgsTupleToValTypes>()); + auto func = wasm::Func::make( + store_.get(), type.get(), + [](void* data, const wasm::Val params[], wasm::Val results[]) -> wasm::own { + auto args_tuple = convertValTypesToArgsTuple>(params); + auto args = std::tuple_cat(std::make_tuple(current_context_), args_tuple); + auto function = reinterpret_cast(data); + R rvalue = absl::apply(function, args); + results[0] = makeVal(rvalue); + return nullptr; + }, + reinterpret_cast(function)); + host_functions_.emplace(absl::StrCat(moduleName, ".", functionName), std::move(func)); +} + +template +void V8::getModuleFunctionImpl(absl::string_view functionName, + std::function* function) { + ENVOY_LOG(trace, "[wasm] getModuleFunction(\"{}\")", functionName); + auto it = module_functions_.find(functionName); + if (it == module_functions_.end()) { + *function = nullptr; + return; + } + const wasm::Func* func = it->second.get(); + if (!equalValTypes(func->type()->params(), convertArgsTupleToValTypes>()) || + !equalValTypes(func->type()->results(), convertArgsTupleToValTypes>())) { + throw WasmVmException(fmt::format("Bad function signature for: {}", functionName)); + } + *function = [func, functionName](Context* context, Args... args) -> void { + ENVOY_LOG(trace, "[wasm] callModuleFunction(\"{}\")", functionName); + current_context_ = context; + wasm::Val params[] = {makeVal(args)...}; + auto trap = func->call(params, nullptr); + if (trap) { + throw WasmVmException( + fmt::format("Function: {} failed: {}", functionName, + absl::string_view(trap->message().get(), trap->message().size()))); + } + }; +} + +template +void V8::getModuleFunctionImpl(absl::string_view functionName, + std::function* function) { + ENVOY_LOG(trace, "[wasm] getModuleFunction(\"{}\")", functionName); + auto it = module_functions_.find(functionName); + if (it == module_functions_.end()) { + *function = nullptr; + return; + } + const wasm::Func* func = it->second.get(); + if (!equalValTypes(func->type()->params(), convertArgsTupleToValTypes>()) || + !equalValTypes(func->type()->results(), convertArgsTupleToValTypes>())) { + throw WasmVmException(fmt::format("Bad function signature for: {}", functionName)); + } + *function = [func, functionName](Context* context, Args... args) -> R { + ENVOY_LOG(trace, "[wasm] callModuleFunction(\"{}\")", functionName); + current_context_ = context; + wasm::Val params[] = {makeVal(args)...}; + wasm::Val results[1]; + auto trap = func->call(params, results); + if (trap) { + throw WasmVmException( + fmt::format("Function: {} failed: {}", functionName, + absl::string_view(trap->message().get(), trap->message().size()))); + } + R rvalue = results[0].get::type>(); + return rvalue; + }; +} + +std::unique_ptr createVm() { return std::make_unique(); } + +} // namespace V8 +} // namespace Wasm +} // namespace Common +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/common/wasm/v8/v8.h b/source/extensions/common/wasm/v8/v8.h new file mode 100644 index 0000000000000..7a22f6dbcbe4c --- /dev/null +++ b/source/extensions/common/wasm/v8/v8.h @@ -0,0 +1,19 @@ +#pragma once + +#include + +#include "extensions/common/wasm/wasm.h" + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace Wasm { +namespace V8 { + +std::unique_ptr createVm(); + +} // namespace V8 +} // namespace Wasm +} // namespace Common +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/common/wasm/wasm.cc b/source/extensions/common/wasm/wasm.cc index 4951a5f87005f..4a598fe87f5d0 100644 --- a/source/extensions/common/wasm/wasm.cc +++ b/source/extensions/common/wasm/wasm.cc @@ -26,6 +26,8 @@ #include "common/http/utility.h" #include "common/tracing/http_tracer_impl.h" +#include "extensions/common/wasm/null/null.h" +#include "extensions/common/wasm/v8/v8.h" #include "extensions/common/wasm/wavm/wavm.h" #include "extensions/common/wasm/well_known_names.h" @@ -157,26 +159,26 @@ Pairs toPairs(absl::string_view buffer) { } template -void getPairs(Context* context, const Pairs& result, uint32_t ptr_ptr, uint32_t size_ptr) { +void getPairs(Context* context, const Pairs& result, uint64_t ptr_ptr, uint64_t size_ptr) { if (result.empty()) { context->wasm()->copyToPointerSize("", ptr_ptr, size_ptr); return; } - uint32_t size = pairsSize(result); - uint32_t ptr; + uint64_t size = pairsSize(result); + uint64_t ptr; char* buffer = static_cast(context->wasm()->allocMemory(size, &ptr)); marshalPairs(result, buffer); - context->wasmVm()->setMemory(ptr_ptr, sizeof(int32_t), &ptr); - context->wasmVm()->setMemory(size_ptr, sizeof(int32_t), &size); + context->wasmVm()->setWord(ptr_ptr, ptr); + context->wasmVm()->setWord(size_ptr, size); } -void exportPairs(Context* context, const Pairs& pairs, uint32_t* ptr_ptr, uint32_t* size_ptr) { +void exportPairs(Context* context, const Pairs& pairs, uint64_t* ptr_ptr, uint64_t* size_ptr) { if (pairs.empty()) { *ptr_ptr = 0; *size_ptr = 0; return; } - uint32_t size = pairsSize(pairs); + uint64_t size = pairsSize(pairs); char* buffer = static_cast(context->wasm()->allocMemory(size, ptr_ptr)); marshalPairs(pairs, buffer); *size_ptr = size; @@ -194,67 +196,105 @@ Http::HeaderMapPtr buildHeaderMapFromPairs(const Pairs& pairs) { return map; } +const ProtobufWkt::Struct* +getStructProtoFromMetadata(const envoy::api::v2::core::Metadata& metadata, + absl::string_view name = "") { + if (name.empty()) { + name = HttpFilters::HttpFilterNames::get().Wasm; + } + const auto filter_it = metadata.filter_metadata().find(std::string(name)); + if (filter_it == metadata.filter_metadata().end()) { + return nullptr; + } + return &filter_it->second; +} + +const ProtobufWkt::Struct* getRouteMetadataStructProto(Http::StreamFilterCallbacks* callbacks) { + if (callbacks == nullptr || callbacks->route() == nullptr || + callbacks->route()->routeEntry() == nullptr) { + return nullptr; + } + return getStructProtoFromMetadata(callbacks->route()->routeEntry()->metadata()); +} + +const uint8_t* decodeVarint(const uint8_t* pos, const uint8_t* end, uint32_t* out) { + uint32_t ret = 0; + int shift = 0; + while (pos < end && (*pos & 0x80)) { + ret |= (*pos & 0x7f) << shift; + shift += 7; + pos++; + } + if (pos < end) { + ret |= *pos << shift; + pos++; + } + *out = ret; + return pos; +} + +} // namespace + // // HTTP Handlers // // StreamInfo -void getProtocolHandler(void* raw_context, uint32_t type, uint32_t value_ptr_ptr, - uint32_t value_size_ptr) { +void getProtocolHandler(void* raw_context, Word type, Word value_ptr_ptr, Word value_size_ptr) { if (type > static_cast(StreamType::MAX)) return; auto context = WASM_CONTEXT(raw_context); - context->wasm()->copyToPointerSize(context->getProtocol(static_cast(type)), + context->wasm()->copyToPointerSize(context->getProtocol(static_cast(type.u64)), value_ptr_ptr, value_size_ptr); } // Metadata -void getMetadataHandler(void* raw_context, uint32_t type, uint32_t key_ptr, uint32_t key_size, - uint32_t value_ptr_ptr, uint32_t value_size_ptr) { +void getMetadataHandler(void* raw_context, Word type, Word key_ptr, Word key_size, + Word value_ptr_ptr, Word value_size_ptr) { if (type > static_cast(MetadataType::MAX)) return; auto context = WASM_CONTEXT(raw_context); context->wasm()->copyToPointerSize( - context->getMetadata(static_cast(type), + context->getMetadata(static_cast(type.u64), context->wasmVm()->getMemory(key_ptr, key_size)), value_ptr_ptr, value_size_ptr); } -void setMetadataHandler(void* raw_context, uint32_t type, uint32_t key_ptr, uint32_t key_size, - uint32_t value_ptr, uint32_t value_size) { +void setMetadataHandler(void* raw_context, Word type, Word key_ptr, Word key_size, Word value_ptr, + Word value_size) { if (type > static_cast(MetadataType::MAX)) return; auto context = WASM_CONTEXT(raw_context); - context->setMetadata(static_cast(type), + context->setMetadata(static_cast(type.u64), context->wasmVm()->getMemory(key_ptr, key_size), context->wasmVm()->getMemory(value_ptr, value_size)); } -void getMetadataPairsHandler(void* raw_context, uint32_t type, uint32_t ptr_ptr, - uint32_t size_ptr) { +void getMetadataPairsHandler(void* raw_context, Word type, Word ptr_ptr, Word size_ptr) { if (type > static_cast(MetadataType::MAX)) return; auto context = WASM_CONTEXT(raw_context); - getPairs(context, context->getMetadataPairs(static_cast(type)), ptr_ptr, size_ptr); + getPairs(context, context->getMetadataPairs(static_cast(type.u64)), ptr_ptr, + size_ptr); } -void getMetadataStructHandler(void* raw_context, uint32_t type, uint32_t name_ptr, - uint32_t name_size, uint32_t value_ptr_ptr, uint32_t value_size_ptr) { +void getMetadataStructHandler(void* raw_context, Word type, Word name_ptr, Word name_size, + Word value_ptr_ptr, Word value_size_ptr) { if (type > static_cast(MetadataType::MAX)) return; auto context = WASM_CONTEXT(raw_context); context->wasm()->copyToPointerSize( - context->getMetadataStruct(static_cast(type), + context->getMetadataStruct(static_cast(type.u64), context->wasmVm()->getMemory(name_ptr, name_size)), value_ptr_ptr, value_size_ptr); } -void setMetadataStructHandler(void* raw_context, uint32_t type, uint32_t name_ptr, - uint32_t name_size, uint32_t value_ptr, uint32_t value_size) { +void setMetadataStructHandler(void* raw_context, Word type, Word name_ptr, Word name_size, + Word value_ptr, Word value_size) { if (type > static_cast(MetadataType::MAX)) return; auto context = WASM_CONTEXT(raw_context); - context->setMetadataStruct(static_cast(type), + context->setMetadataStruct(static_cast(type.u64), context->wasmVm()->getMemory(name_ptr, name_size), context->wasmVm()->getMemory(value_ptr, value_size)); } @@ -271,8 +311,8 @@ void continueResponseHandler(void* raw_context) { } // SharedData -void getSharedDataHandler(void* raw_context, uint32_t key_ptr, uint32_t key_size, - uint32_t value_ptr_ptr, uint32_t value_size_ptr, uint32_t cas_ptr) { +void getSharedDataHandler(void* raw_context, Word key_ptr, Word key_size, Word value_ptr_ptr, + Word value_size_ptr, Word cas_ptr) { auto context = WASM_CONTEXT(raw_context); auto key = context->wasmVm()->getMemory(key_ptr, key_size); auto p = context->getSharedData(key); @@ -280,97 +320,94 @@ void getSharedDataHandler(void* raw_context, uint32_t key_ptr, uint32_t key_size context->wasmVm()->setMemory(cas_ptr, sizeof(uint32_t), &p.second); } -uint32_t setSharedDataHandler(void* raw_context, uint32_t key_ptr, uint32_t key_size, - uint32_t value_ptr, uint32_t value_size, uint32_t cas) { +Word setSharedDataHandler(void* raw_context, Word key_ptr, Word key_size, Word value_ptr, + Word value_size, Word cas) { auto context = WASM_CONTEXT(raw_context); auto key = context->wasmVm()->getMemory(key_ptr, key_size); auto value = context->wasmVm()->getMemory(value_ptr, value_size); auto ok = context->setSharedData(key, value, cas); - return static_cast(ok); + return Word(static_cast(ok)); } // Header/Trailer/Metadata Maps -void addHeaderMapValueHandler(void* raw_context, uint32_t type, uint32_t key_ptr, uint32_t key_size, - uint32_t value_ptr, uint32_t value_size) { - if (type > static_cast(HeaderMapType::MAX)) { +void addHeaderMapValueHandler(void* raw_context, Word type, Word key_ptr, Word key_size, + Word value_ptr, Word value_size) { + if (type > static_cast(HeaderMapType::MAX)) { return; } auto context = WASM_CONTEXT(raw_context); - context->addHeaderMapValue(static_cast(type), + context->addHeaderMapValue(static_cast(type.u64), context->wasmVm()->getMemory(key_ptr, key_size), context->wasmVm()->getMemory(value_ptr, value_size)); } -void getHeaderMapValueHandler(void* raw_context, uint32_t type, uint32_t key_ptr, uint32_t key_size, - uint32_t value_ptr_ptr, uint32_t value_size_ptr) { - if (type > static_cast(HeaderMapType::MAX)) { +void getHeaderMapValueHandler(void* raw_context, Word type, Word key_ptr, Word key_size, + Word value_ptr_ptr, Word value_size_ptr) { + if (type > static_cast(HeaderMapType::MAX)) { return; } auto context = WASM_CONTEXT(raw_context); - auto result = context->getHeaderMapValue(static_cast(type), + auto result = context->getHeaderMapValue(static_cast(type.u64), context->wasmVm()->getMemory(key_ptr, key_size)); context->wasm()->copyToPointerSize(result, value_ptr_ptr, value_size_ptr); } -void replaceHeaderMapValueHandler(void* raw_context, uint32_t type, uint32_t key_ptr, - uint32_t key_size, uint32_t value_ptr, uint32_t value_size) { - if (type > static_cast(HeaderMapType::MAX)) { +void replaceHeaderMapValueHandler(void* raw_context, Word type, Word key_ptr, Word key_size, + Word value_ptr, Word value_size) { + if (type > static_cast(HeaderMapType::MAX)) { return; } auto context = WASM_CONTEXT(raw_context); - context->replaceHeaderMapValue(static_cast(type), + context->replaceHeaderMapValue(static_cast(type.u64), context->wasmVm()->getMemory(key_ptr, key_size), context->wasmVm()->getMemory(value_ptr, value_size)); } -void removeHeaderMapValueHandler(void* raw_context, uint32_t type, uint32_t key_ptr, - uint32_t key_size) { - if (type > static_cast(HeaderMapType::MAX)) { +void removeHeaderMapValueHandler(void* raw_context, Word type, Word key_ptr, Word key_size) { + if (type > static_cast(HeaderMapType::MAX)) { return; } auto context = WASM_CONTEXT(raw_context); - context->removeHeaderMapValue(static_cast(type), + context->removeHeaderMapValue(static_cast(type.u64), context->wasmVm()->getMemory(key_ptr, key_size)); } -void getHeaderMapPairsHandler(void* raw_context, uint32_t type, uint32_t ptr_ptr, - uint32_t size_ptr) { - if (type > static_cast(HeaderMapType::MAX)) { +void getHeaderMapPairsHandler(void* raw_context, Word type, Word ptr_ptr, Word size_ptr) { + if (type > static_cast(HeaderMapType::MAX)) { return; } auto context = WASM_CONTEXT(raw_context); - auto result = context->getHeaderMapPairs(static_cast(type)); + auto result = context->getHeaderMapPairs(static_cast(type.u64)); getPairs(context, result, ptr_ptr, size_ptr); } -void setHeaderMapPairsHandler(void* raw_context, uint32_t type, uint32_t ptr, uint32_t size) { - if (type > static_cast(HeaderMapType::MAX)) { +void setHeaderMapPairsHandler(void* raw_context, Word type, Word ptr, Word size) { + if (type > static_cast(HeaderMapType::MAX)) { return; } auto context = WASM_CONTEXT(raw_context); - context->setHeaderMapPairs(static_cast(type), + context->setHeaderMapPairs(static_cast(type.u64), toPairs(context->wasmVm()->getMemory(ptr, size))); } // Body Buffer -void getRequestBodyBufferBytesHandler(void* raw_context, uint32_t start, uint32_t length, - uint32_t ptr_ptr, uint32_t size_ptr) { +void getRequestBodyBufferBytesHandler(void* raw_context, Word start, Word length, Word ptr_ptr, + Word size_ptr) { auto context = WASM_CONTEXT(raw_context); auto result = context->getRequestBodyBufferBytes(start, length); context->wasm()->copyToPointerSize(result, ptr_ptr, size_ptr); } -void getResponseBodyBufferBytesHandler(void* raw_context, uint32_t start, uint32_t length, - uint32_t ptr_ptr, uint32_t size_ptr) { +void getResponseBodyBufferBytesHandler(void* raw_context, Word start, Word length, Word ptr_ptr, + Word size_ptr) { auto context = WASM_CONTEXT(raw_context); auto result = context->getResponseBodyBufferBytes(start, length); context->wasm()->copyToPointerSize(result, ptr_ptr, size_ptr); } -uint32_t httpCallHandler(void* raw_context, uint32_t uri_ptr, uint32_t uri_size, - uint32_t header_pairs_ptr, uint32_t header_pairs_size, uint32_t body_ptr, - uint32_t body_size, uint32_t trailer_pairs_ptr, - uint32_t trailer_pairs_size, uint32_t timeout_milliseconds) { +Word httpCallHandler(void* raw_context, Word uri_ptr, Word uri_size, Word header_pairs_ptr, + Word header_pairs_size, Word body_ptr, Word body_size, Word trailer_pairs_ptr, + Word trailer_pairs_size, Word timeout_milliseconds) { auto context = WASM_CONTEXT(raw_context); auto uri = context->wasmVm()->getMemory(uri_ptr, uri_size); auto headers = toPairs(context->wasmVm()->getMemory(header_pairs_ptr, header_pairs_size)); @@ -379,34 +416,32 @@ uint32_t httpCallHandler(void* raw_context, uint32_t uri_ptr, uint32_t uri_size, return context->httpCall(uri, headers, body, trailers, timeout_milliseconds); } -uint32_t defineMetricHandler(void* raw_context, uint32_t metric_type, uint32_t name_ptr, - uint32_t name_size) { - if (metric_type > static_cast(Context::MetricType::Max)) +Word defineMetricHandler(void* raw_context, Word metric_type, Word name_ptr, Word name_size) { + if (metric_type > static_cast(Context::MetricType::Max)) return 0; auto context = WASM_CONTEXT(raw_context); auto name = context->wasmVm()->getMemory(name_ptr, name_size); - return context->defineMetric(static_cast(metric_type), name); + return context->defineMetric(static_cast(metric_type.u64), name); } -void incrementMetricHandler(void* raw_context, uint32_t metric_id, int64_t offset) { +void incrementMetricHandler(void* raw_context, Word metric_id, int64_t offset) { auto context = WASM_CONTEXT(raw_context); context->incrementMetric(metric_id, offset); } -void recordMetricHandler(void* raw_context, uint32_t metric_id, uint64_t value) { +void recordMetricHandler(void* raw_context, Word metric_id, uint64_t value) { auto context = WASM_CONTEXT(raw_context); context->recordMetric(metric_id, value); } -uint64_t getMetricHandler(void* raw_context, uint32_t metric_id) { +uint64_t getMetricHandler(void* raw_context, Word metric_id) { auto context = WASM_CONTEXT(raw_context); return context->getMetric(metric_id); } -uint32_t grpcCallHandler(void* raw_context, uint32_t service_ptr, uint32_t service_size, - uint32_t service_name_ptr, uint32_t service_name_size, - uint32_t method_name_ptr, uint32_t method_name_size, uint32_t request_ptr, - uint32_t request_size, uint32_t timeout_milliseconds) { +Word grpcCallHandler(void* raw_context, Word service_ptr, Word service_size, Word service_name_ptr, + Word service_name_size, Word method_name_ptr, Word method_name_size, + Word request_ptr, Word request_size, Word timeout_milliseconds) { auto context = WASM_CONTEXT(raw_context); auto service = context->wasmVm()->getMemory(service_ptr, service_size); auto service_name = context->wasmVm()->getMemory(service_name_ptr, service_name_size); @@ -420,9 +455,9 @@ uint32_t grpcCallHandler(void* raw_context, uint32_t service_ptr, uint32_t servi std::chrono::milliseconds(timeout_milliseconds)); } -uint32_t grpcStreamHandler(void* raw_context, uint32_t service_ptr, uint32_t service_size, - uint32_t service_name_ptr, uint32_t service_name_size, - uint32_t method_name_ptr, uint32_t method_name_size) { +Word grpcStreamHandler(void* raw_context, Word service_ptr, Word service_size, + Word service_name_ptr, Word service_name_size, Word method_name_ptr, + Word method_name_size) { auto context = WASM_CONTEXT(raw_context); auto service = context->wasmVm()->getMemory(service_ptr, service_size); auto service_name = context->wasmVm()->getMemory(service_name_ptr, service_name_size); @@ -434,120 +469,96 @@ uint32_t grpcStreamHandler(void* raw_context, uint32_t service_ptr, uint32_t ser return context->grpcStream(service_proto, service_name, method_name); } -void grpcCancelHandler(void* raw_context, uint32_t token) { +void grpcCancelHandler(void* raw_context, Word token) { auto context = WASM_CONTEXT(raw_context); context->grpcCancel(token); } -void grpcCloseHandler(void* raw_context, uint32_t token) { +void grpcCloseHandler(void* raw_context, Word token) { auto context = WASM_CONTEXT(raw_context); context->grpcClose(token); } -void grpcSendHandler(void* raw_context, uint32_t token, uint32_t message_ptr, uint32_t message_size, - uint32_t end_stream) { +void grpcSendHandler(void* raw_context, Word token, Word message_ptr, Word message_size, + Word end_stream) { auto context = WASM_CONTEXT(raw_context); auto message = context->wasmVm()->getMemory(message_ptr, message_size); context->grpcSend(token, message, end_stream); } -uint32_t _emscripten_memcpy_bigHandler(void*, uint32_t, uint32_t, uint32_t) { +Word _emscripten_get_heap_sizeHandler(void*) { return std::numeric_limits::max(); } + +Word _emscripten_memcpy_bigHandler(void*, Word, Word, Word) { throw WasmException("emscripten emscripten_memcpy_big"); } -uint32_t _emscripten_get_heap_sizeHandler(void*) { return 0x7FFFFFFF; } - -uint32_t _emscripten_resize_heapHandler(void*, uint32_t) { +Word _emscripten_resize_heapHandler(void*, Word) { throw WasmException("emscripten emscripten_resize_heap"); } -uint32_t abortOnCannotGrowMemoryHandler(void*) { +Word abortOnCannotGrowMemoryHandler(void*) { throw WasmException("emscripten abortOnCannotGrowMemory"); } -void abortHandler(void*, uint32_t) { throw WasmException("emscripten abort"); } +void abortHandler(void*, Word) { throw WasmException("emscripten abort"); } void _abortHandler(void*) { throw WasmException("emscripten abort"); } void _llvm_trapHandler(void*) { throw WasmException("emscripten llvm_trap"); } -void ___assert_failHandler(void*, uint32_t, uint32_t, uint32_t, uint32_t) { +void ___assert_failHandler(void*, Word, Word, Word, Word) { throw WasmException("emscripten assert_fail"); } -void ___cxa_throwHandler(void*, uint32_t, uint32_t, uint32_t) { - throw WasmException("emscripten cxa_throw"); -} +void ___cxa_throwHandler(void*, Word, Word, Word) { throw WasmException("emscripten cxa_throw"); } void ___cxa_pure_virtualHandler(void*) { throw WasmException("emscripten cxa_pure_virtual"); } -uint32_t ___cxa_allocate_exceptionHandler(void*, uint32_t) { - throw WasmException("emscripten cxa_allocate_exception"); -} +Word ___call_mainHandler(void*, Word, Word) { throw WasmException("emscripten call_main"); } -uint32_t ___call_mainHandler(void*, uint32_t, uint32_t) { - throw WasmException("emscripten call_main"); +Word ___cxa_allocate_exceptionHandler(void*, Word) { + throw WasmException("emscripten cxa_allocate_exception"); } -uint32_t ___clock_gettimeHandler(void*, uint32_t, uint32_t) { - throw WasmException("emscripten clock_gettime"); -} +Word ___clock_gettimeHandler(void*, Word, Word) { throw WasmException("emscripten clock_gettime"); } -void ___lockHandler(void*, uint32_t) { throw WasmException("emscripten lock"); } +void ___lockHandler(void*, Word) { throw WasmException("emscripten lock"); } -void ___unlockHandler(void*, uint32_t) { throw WasmException("emscripten unlock"); } +void ___unlockHandler(void*, Word) { throw WasmException("emscripten unlock"); } -uint32_t ___syscall6Handler(void*, uint32_t, uint32_t) { - throw WasmException("emscripten syscall6"); -} +Word ___syscall6Handler(void*, Word, Word) { throw WasmException("emscripten syscall6"); } -uint32_t ___syscall54Handler(void*, uint32_t, uint32_t) { - throw WasmException("emscripten syscall54"); -} +Word ___syscall54Handler(void*, Word, Word) { throw WasmException("emscripten syscall54"); } -uint32_t ___syscall140Handler(void*, uint32_t, uint32_t) { - throw WasmException("emscripten syscall140"); -} +Word ___syscall140Handler(void*, Word, Word) { throw WasmException("emscripten syscall140"); } -uint32_t ___syscall146Handler(void*, uint32_t, uint32_t) { - throw WasmException("emscripten syscall146"); -} +Word ___syscall146Handler(void*, Word, Word) { throw WasmException("emscripten syscall146"); } -void ___setErrNoHandler(void*, uint32_t) { throw WasmException("emscripten setErrNo"); } +void ___setErrNoHandler(void*, Word) { throw WasmException("emscripten setErrNo"); } -// pthread_equal is required to return 0 by the protobuf libarary. -uint32_t _pthread_equalHandler(void*, uint32_t, uint32_t) { - /* throw WasmException("emscripten pthread_equal"); */ +// NB: pthread_equal is required to return 0 by the protobuf libarary. +Word _pthread_equalHandler(void*, Word, + Word) { /* throw WasmException("emscripten pthread_equal"); */ return 0; } - -uint32_t _pthread_mutex_destroyHandler(void*, uint32_t) { - throw WasmException("emscripten pthread_mutex_destroy"); -} - -uint32_t _pthread_cond_waitHandler(void*, uint32_t, uint32_t) { +// NB: pthread_mutex_destroy is required to return 0 by the protobuf libarary. +Word _pthread_mutex_destroyHandler(void*, Word) { return 0; } +Word _pthread_cond_waitHandler(void*, Word, Word) { throw WasmException("emscripten pthread_cond_wait"); } - -uint32_t _pthread_getspecificHandler(void*, uint32_t) { +Word _pthread_getspecificHandler(void*, Word) { throw WasmException("emscripten pthread_getspecific"); } - -uint32_t _pthread_key_createHandler(void*, uint32_t, uint32_t) { +Word _pthread_key_createHandler(void*, Word, Word) { throw WasmException("emscripten pthread_key_create"); } - -uint32_t _pthread_onceHandler(void*, uint32_t, uint32_t) { - throw WasmException("emscripten pthread_once"); -} - -uint32_t _pthread_setspecificHandler(void*, uint32_t, uint32_t) { +Word _pthread_onceHandler(void*, Word, Word) { throw WasmException("emscripten pthread_once"); } +Word _pthread_setspecificHandler(void*, Word, Word) { throw WasmException("emscripten pthread_setspecific"); } +void setTempRet0Handler(void*, Word) { throw WasmException("emscripten setTempRet0"); } -void setTempRet0Handler(void*, uint32_t) { throw WasmException("emscripten setTempRet0"); } - -void setTickPeriodMillisecondsHandler(void* raw_context, uint32_t tick_period_milliseconds) { +void setTickPeriodMillisecondsHandler(void* raw_context, Word tick_period_milliseconds) { WASM_CONTEXT(raw_context)->setTickPeriod(std::chrono::milliseconds(tick_period_milliseconds)); } @@ -555,51 +566,12 @@ uint64_t getCurrentTimeNanosecondsHandler(void* raw_context) { return WASM_CONTEXT(raw_context)->getCurrentTimeNanoseconds(); } -void logHandler(void* raw_context, uint32_t level, uint32_t address, uint32_t size) { +void logHandler(void* raw_context, Word level, Word address, Word size) { auto context = WASM_CONTEXT(raw_context); - context->scriptLog(static_cast(level), + context->scriptLog(static_cast(level.u64), context->wasmVm()->getMemory(address, size)); } -const ProtobufWkt::Struct* -getStructProtoFromMetadata(const envoy::api::v2::core::Metadata& metadata, - absl::string_view name = "") { - if (name.empty()) { - name = HttpFilters::HttpFilterNames::get().Wasm; - } - const auto filter_it = metadata.filter_metadata().find(std::string(name)); - if (filter_it == metadata.filter_metadata().end()) { - return nullptr; - } - return &filter_it->second; -} - -const ProtobufWkt::Struct* getRouteMetadataStructProto(Http::StreamFilterCallbacks* callbacks) { - if (callbacks == nullptr || callbacks->route() == nullptr || - callbacks->route()->routeEntry() == nullptr) { - return nullptr; - } - return getStructProtoFromMetadata(callbacks->route()->routeEntry()->metadata()); -} - -const uint8_t* decodeVarint(const uint8_t* pos, const uint8_t* end, uint32_t* out) { - uint32_t ret = 0; - int shift = 0; - while (pos < end && (*pos & 0x80)) { - ret |= (*pos & 0x7f) << shift; - shift += 7; - pos++; - } - if (pos < end) { - ret |= *pos << shift; - pos++; - } - *out = ret; - return pos; -} - -} // namespace - void Context::setTickPeriod(std::chrono::milliseconds tick_period) { wasm_->setTickPeriod(tick_period); } @@ -1189,11 +1161,11 @@ void Context::onHttpCallResponse(uint32_t token, const Pairs& response_headers, absl::string_view response_body, const Pairs& response_trailers) { if (!wasm_->onHttpCallResponse_) return; - uint32_t headers_ptr, headers_size, trailers_ptr, trailers_size; + uint64_t headers_ptr, headers_size, trailers_ptr, trailers_size; exportPairs(this, response_headers, &headers_ptr, &headers_size); exportPairs(this, response_trailers, &trailers_ptr, &trailers_size); - uint32_t body_ptr = wasm_->copyString(response_body); - uint32_t body_size = response_body.size(); + auto body_ptr = wasm_->copyString(response_body); + auto body_size = response_body.size(); wasm_->onHttpCallResponse_(this, id_, token, headers_ptr, headers_size, body_ptr, body_size, trailers_ptr, trailers_size); } @@ -1305,14 +1277,18 @@ Wasm::Wasm(absl::string_view vm, absl::string_view id, absl::string_view initial Stats::Scope& scope, const LocalInfo::LocalInfo& local_info, Stats::ScopeSharedPtr owned_scope) : cluster_manager_(cluster_manager), dispatcher_(dispatcher), scope_(scope), - local_info_(local_info), owned_scope_(owned_scope), time_source_(dispatcher.timeSystem()), + local_info_(local_info), owned_scope_(owned_scope), time_source_(dispatcher.timeSource()), initial_configuration_(initial_configuration) { wasm_vm_ = Common::Wasm::createWasmVm(vm); id_ = std::string(id); } void Wasm::registerCallbacks() { -#define _REGISTER(_fn) wasm_vm_->registerCallback("envoy", #_fn, &_fn##Handler); +#define _REGISTER(_fn) \ + wasm_vm_->registerCallback( \ + "envoy", #_fn, &_fn##Handler, \ + &ConvertFunctionWordToUint32::convertFunctionWordToUint32); if (is_emscripten_) { _REGISTER(_emscripten_memcpy_big); _REGISTER(_emscripten_get_heap_size); @@ -1346,7 +1322,11 @@ void Wasm::registerCallbacks() { #undef _REGISTER // Calls with the "_proxy_" prefix. -#define _REGISTER_PROXY(_fn) wasm_vm_->registerCallback("envoy", "_proxy_" #_fn, &_fn##Handler); +#define _REGISTER_PROXY(_fn) \ + wasm_vm_->registerCallback( \ + "envoy", "_proxy_" #_fn, &_fn##Handler, \ + &ConvertFunctionWordToUint32::convertFunctionWordToUint32); _REGISTER_PROXY(log); _REGISTER_PROXY(getProtocol); @@ -1394,9 +1374,8 @@ void Wasm::registerCallbacks() { void Wasm::establishEnvironment() { if (is_emscripten_) { - emscripten_table_base_ = wasm_vm_->makeGlobal("env", "__table_base", static_cast(0)); - emscripten_dynamictop_ = - wasm_vm_->makeGlobal("env", "DYNAMICTOP_PTR", static_cast(128 * 64 * 1024)); + emscripten_table_base_ = wasm_vm_->makeGlobal("env", "__table_base", Word(0)); + emscripten_dynamictop_ = wasm_vm_->makeGlobal("env", "DYNAMICTOP_PTR", Word(128 * 64 * 1024)); wasm_vm_->makeModule("global"); emscripten_NaN_ = wasm_vm_->makeGlobal("global", "NaN", std::nan("0")); @@ -1437,14 +1416,14 @@ void Wasm::getFunctions() { #undef _GET_PROXY if (!malloc_ || !free_) { - throw WasmException("WAVM missing malloc/free"); + throw WasmException("WASM missing malloc/free"); } } Wasm::Wasm(const Wasm& wasm, Event::Dispatcher& dispatcher) : std::enable_shared_from_this(wasm), cluster_manager_(wasm.cluster_manager_), dispatcher_(dispatcher), scope_(wasm.scope_), local_info_(wasm.local_info_), - owned_scope_(wasm.owned_scope_), time_source_(dispatcher.timeSystem()) { + owned_scope_(wasm.owned_scope_), time_source_(dispatcher.timeSource()) { wasm_vm_ = wasm.wasmVm()->clone(); general_context_ = createContext(); getFunctions(); @@ -1676,8 +1655,8 @@ void GrpcStreamClientHandler::onReceiveTrailingMetadata(Http::HeaderMapPtr&& met void Context::onGrpcReceive(uint32_t token, Buffer::InstancePtr response) { if (wasm_->onGrpcReceive_) { - uint32_t response_size = response->length(); - uint32_t response_ptr = wasm_->copyBuffer(*response); + auto response_size = response->length(); + auto response_ptr = wasm_->copyBuffer(*response); wasm_->onGrpcReceive_(this, id_, token, response_ptr, response_size); } if (IsGrpcCallToken(token)) { @@ -1688,8 +1667,8 @@ void Context::onGrpcReceive(uint32_t token, Buffer::InstancePtr response) { void Context::onGrpcClose(uint32_t token, const Grpc::Status::GrpcStatus& status, const absl::string_view message) { if (wasm_->onGrpcClose_) { - uint32_t message_ptr = wasm_->copyString(message); - wasm_->onGrpcClose_(this, id_, token, static_cast(status), message_ptr, + auto message_ptr = wasm_->copyString(message); + wasm_->onGrpcClose_(this, id_, token, static_cast(status), message_ptr, message.size()); } if (IsGrpcCallToken(token)) { @@ -1762,8 +1741,12 @@ void GrpcStreamClientHandler::onRemoteClose(Grpc::Status::GrpcStatus status, } std::unique_ptr createWasmVm(absl::string_view wasm_vm) { - if (wasm_vm == WasmVmNames::get().Wavm) { - return Wavm::createWavm(); + if (wasm_vm == WasmVmNames::get().Null) { + return Null::createVm(); + } else if (wasm_vm == WasmVmNames::get().v8) { + return V8::createVm(); + } else if (wasm_vm == WasmVmNames::get().Wavm) { + return Wavm::createVm(); } else { return nullptr; } diff --git a/source/extensions/common/wasm/wasm.h b/source/extensions/common/wasm/wasm.h index f0def1ab0326f..4f1322e0bf59e 100644 --- a/source/extensions/common/wasm/wasm.h +++ b/source/extensions/common/wasm/wasm.h @@ -31,35 +31,69 @@ class Context; class Wasm; class WasmVm; +// Represents a WASM-native word-sized datum. On 32-bit VMs, the high bits are always zero. +// The WASM/VM API treats all bits as significant. +struct Word { + Word(uint64_t w) : u64(w) {} // Implicit conversion into Word. + operator uint64_t() const { return u64; } // Implicit conversion into uint64_t. + // Note: no implicit conversion to uint32_t as it is lossy. + uint64_t u64; +}; + +template struct ConvertWordTypeToUint32 { using type = T; }; +template <> struct ConvertWordTypeToUint32 { using type = uint32_t; }; + +inline uint32_t convertWordToUint32(Word w) { return static_cast(w.u64); } +inline uint32_t convertWordToUint32(uint32_t v) { return v; } +inline uint64_t convertWordToUint32(uint64_t v) { return v; } +inline int64_t convertWordToUint32(int64_t v) { return v; } +inline float convertWordToUint32(float v) { return v; } +inline double convertWordToUint32(double v) { return v; } + +// Convert a function of the form Word(Word...) to one of the form uint32_t(uint32_t...). +template struct ConvertFunctionWordToUint32 { + static void convertFunctionWordToUint32() {} +}; +template R> +struct ConvertFunctionWordToUint32 { + static auto convertFunctionWordToUint32(typename ConvertWordTypeToUint32::type... args) { + return convertWordToUint32(F(std::forward(args)...)); + } +}; +template void> +struct ConvertFunctionWordToUint32 { + static void convertFunctionWordToUint32(typename ConvertWordTypeToUint32::type... args) { + F(std::forward(args)...); + } +}; + +template struct ConvertFunctionTypeWordToUint32 {}; +template struct ConvertFunctionTypeWordToUint32 { + using type = typename ConvertWordTypeToUint32::type (*)( + typename ConvertWordTypeToUint32::type...); +}; + using Pairs = std::vector>; using PairsWithStringValues = std::vector>; // 1st arg is always a pointer to Context (Context*). using WasmCall0Void = std::function; -using WasmCall1Void = std::function; -using WasmCall2Void = std::function; -using WasmCall3Void = std::function; -using WasmCall4Void = std::function; -using WasmCall5Void = - std::function; -using WasmCall6Void = - std::function; -using WasmCall7Void = std::function; -using WasmCall8Void = std::function; -using WasmCall1Int = std::function; -using WasmCall2Int = std::function; -using WasmCall3Int = std::function; -using WasmCall4Int = std::function; -using WasmCall5Int = - std::function; -using WasmCall6Int = - std::function; -using WasmCall7Int = std::function; -using WasmCall8Int = std::function; +using WasmCall1Void = std::function; +using WasmCall2Void = std::function; +using WasmCall3Void = std::function; +using WasmCall4Void = std::function; +using WasmCall5Void = std::function; +using WasmCall6Void = std::function; +using WasmCall7Void = std::function; +using WasmCall8Void = std::function; +using WasmCall1Int = std::function; +using WasmCall2Int = std::function; +using WasmCall3Int = std::function; +using WasmCall4Int = std::function; +using WasmCall5Int = std::function; +using WasmCall6Int = std::function; +using WasmCall7Int = std::function; +using WasmCall8Int = std::function; // 1st arg is always a context_id (uint32_t). using WasmContextCall0Void = WasmCall1Void; @@ -81,36 +115,32 @@ using WasmContextCall7Int = WasmCall8Int; // 1st arg is always a pointer to raw_context (void*). using WasmCallback0Void = void (*)(void*); -using WasmCallback1Void = void (*)(void*, uint32_t); -using WasmCallback2Void = void (*)(void*, uint32_t, uint32_t); -using WasmCallback3Void = void (*)(void*, uint32_t, uint32_t, uint32_t); -using WasmCallback4Void = void (*)(void*, uint32_t, uint32_t, uint32_t, uint32_t); -using WasmCallback5Void = void (*)(void*, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t); -using WasmCallback6Void = void (*)(void*, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, - uint32_t); -using WasmCallback7Void = void (*)(void*, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, - uint32_t, uint32_t); -using WasmCallback0Int = uint32_t (*)(void*); -using WasmCallback1Int = uint32_t (*)(void*, uint32_t); -using WasmCallback2Int = uint32_t (*)(void*, uint32_t, uint32_t); -using WasmCallback3Int = uint32_t (*)(void*, uint32_t, uint32_t, uint32_t); -using WasmCallback4Int = uint32_t (*)(void*, uint32_t, uint32_t, uint32_t, uint32_t); -using WasmCallback5Int = uint32_t (*)(void*, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t); -using WasmCallback6Int = uint32_t (*)(void*, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, - uint32_t); -using WasmCallback7Int = uint32_t (*)(void*, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, - uint32_t, uint32_t, uint32_t); -using WasmCallback8Int = uint32_t (*)(void*, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, - uint32_t, uint32_t, uint32_t, uint32_t); -using WasmCallback9Int = uint32_t (*)(void*, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, - uint32_t, uint32_t, uint32_t, uint32_t, uint32_t); +using WasmCallback1Void = void (*)(void*, Word); +using WasmCallback2Void = void (*)(void*, Word, Word); +using WasmCallback3Void = void (*)(void*, Word, Word, Word); +using WasmCallback4Void = void (*)(void*, Word, Word, Word, Word); +using WasmCallback5Void = void (*)(void*, Word, Word, Word, Word, Word); +using WasmCallback6Void = void (*)(void*, Word, Word, Word, Word, Word, Word); +using WasmCallback7Void = void (*)(void*, Word, Word, Word, Word, Word, Word, Word); +using WasmCallback0Int = Word (*)(void*); +using WasmCallback1Int = Word (*)(void*, Word); +using WasmCallback2Int = Word (*)(void*, Word, Word); +using WasmCallback3Int = Word (*)(void*, Word, Word, Word); +using WasmCallback4Int = Word (*)(void*, Word, Word, Word, Word); +using WasmCallback5Int = Word (*)(void*, Word, Word, Word, Word, Word); +using WasmCallback6Int = Word (*)(void*, Word, Word, Word, Word, Word, Word); +using WasmCallback7Int = Word (*)(void*, Word, Word, Word, Word, Word, Word, Word, Word); +using WasmCallback8Int = Word (*)(void*, Word, Word, Word, Word, Word, Word, Word, Word, Word); +using WasmCallback9Int = Word (*)(void*, Word, Word, Word, Word, Word, Word, Word, Word, Word, + Word); // Using the standard g++/clang mangling algorithm: // https://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling-builtin +// Extended with W = Word // Z = void, j = uint32_t, l = int64_t, m = uint64_t -using WasmCallback_Zjl = void (*)(void*, uint32_t, int64_t); -using WasmCallback_Zjm = void (*)(void*, uint32_t, uint64_t); -using WasmCallback_mjj = uint64_t (*)(void*, uint32_t); -using WasmCallback_mj = uint64_t (*)(void*); +using WasmCallback_ZWl = void (*)(void*, Word, int64_t); +using WasmCallback_ZWm = void (*)(void*, Word, uint64_t); +using WasmCallback_m = uint64_t (*)(void*); +using WasmCallback_mW = uint64_t (*)(void*, Word); // Sadly we don't have enum class inheritance in c++-14. enum class StreamType : uint32_t { Request = 0, Response = 1, MAX = 1 }; @@ -134,6 +164,57 @@ enum class HeaderMapType : uint32_t { MAX = 6, }; +// Handlers for functions exported from envoy to wasm. +void logHandler(void* raw_context, Word level, Word address, Word size); +void getProtocolHandler(void* raw_context, Word type, Word value_ptr_ptr, Word value_size_ptr); +void getMetadataHandler(void* raw_context, Word type, Word key_ptr, Word key_size, + Word value_ptr_ptr, Word value_size_ptr); +void setMetadataHandler(void* raw_context, Word type, Word key_ptr, Word key_size, Word value_ptr, + Word value_size); +void getMetadataPairsHandler(void* raw_context, Word type, Word ptr_ptr, Word size_ptr); +void getMetadataStructHandler(void* raw_context, Word type, Word name_ptr, Word name_size, + Word value_ptr_ptr, Word value_size_ptr); +void setMetadataStructHandler(void* raw_context, Word type, Word name_ptr, Word name_size, + Word value_ptr, Word value_size); +void continueRequestHandler(void* raw_context); +void continueResponseHandler(void* raw_context); +void getSharedDataHandler(void* raw_context, Word key_ptr, Word key_size, Word value_ptr_ptr, + Word value_size_ptr, Word cas_ptr); +Word setSharedDataHandler(void* raw_context, Word key_ptr, Word key_size, Word value_ptr, + Word value_size, Word cas); +void addHeaderMapValueHandler(void* raw_context, Word type, Word key_ptr, Word key_size, + Word value_ptr, Word value_size); +void getHeaderMapValueHandler(void* raw_context, Word type, Word key_ptr, Word key_size, + Word value_ptr_ptr, Word value_size_ptr); +void replaceHeaderMapValueHandler(void* raw_context, Word type, Word key_ptr, Word key_size, + Word value_ptr, Word value_size); +void removeHeaderMapValueHandler(void* raw_context, Word type, Word key_ptr, Word key_size); +void getHeaderMapPairsHandler(void* raw_context, Word type, Word ptr_ptr, Word size_ptr); +void setHeaderMapPairsHandler(void* raw_context, Word type, Word ptr, Word size); +void getRequestBodyBufferBytesHandler(void* raw_context, Word start, Word length, Word ptr_ptr, + Word size_ptr); +void getResponseBodyBufferBytesHandler(void* raw_context, Word start, Word length, Word ptr_ptr, + Word size_ptr); +Word httpCallHandler(void* raw_context, Word uri_ptr, Word uri_size, Word header_pairs_ptr, + Word header_pairs_size, Word body_ptr, Word body_size, Word trailer_pairs_ptr, + Word trailer_pairs_size, Word timeout_milliseconds); +Word defineMetricHandler(void* raw_context, Word metric_type, Word name_ptr, Word name_size); +void incrementMetricHandler(void* raw_context, Word metric_id, int64_t offset); +void recordMetricHandler(void* raw_context, Word metric_id, uint64_t value); +uint64_t getMetricHandler(void* raw_context, Word metric_id); +Word grpcCallHandler(void* raw_context, Word service_ptr, Word service_size, Word service_name_ptr, + Word service_name_size, Word method_name_ptr, Word method_name_size, + Word request_ptr, Word request_size, Word timeout_milliseconds); +Word grpcStreamHandler(void* raw_context, Word service_ptr, Word service_size, + Word service_name_ptr, Word service_name_size, Word method_name_ptr, + Word method_name_size); +void grpcCancelHandler(void* raw_context, Word token); +void grpcCloseHandler(void* raw_context, Word token); +void grpcSendHandler(void* raw_context, Word token, Word message_ptr, Word message_size, + Word end_stream); +void setTickPeriodMillisecondsHandler(void* raw_context, Word tick_period_milliseconds); +uint64_t getCurrentTimeNanosecondsHandler(void* raw_context); + inline MetadataType StreamType2MetadataType(StreamType type) { return static_cast(type); } @@ -465,16 +546,16 @@ class Wasm : public Envoy::Server::Wasm, const Http::HeaderMap* response_trailers, const StreamInfo::StreamInfo& stream_info); // Support functions. - void* allocMemory(uint32_t size, uint32_t* address); + void* allocMemory(uint64_t size, uint64_t* address); bool freeMemory(void* pointer); - void freeMemoryOffset(uint32_t address); + void freeMemoryOffset(uint64_t address); // Allocate a null-terminated string in the VM and return the pointer to use as a call arguments. - uint32_t copyString(absl::string_view s); - uint32_t copyBuffer(const Buffer::Instance& buffer); + uint64_t copyString(absl::string_view s); + uint64_t copyBuffer(const Buffer::Instance& buffer); // Copy the data in 's' into the VM along with the pointer-size pair. Returns true on success. - bool copyToPointerSize(absl::string_view s, uint32_t ptr_ptr, uint32_t size_ptr); - bool copyToPointerSize(const Buffer::Instance& buffer, uint32_t start, uint32_t length, - uint32_t ptr_ptr, uint32_t size_ptr); + bool copyToPointerSize(absl::string_view s, uint64_t ptr_ptr, uint64_t size_ptr); + bool copyToPointerSize(const Buffer::Instance& buffer, uint64_t start, uint64_t length, + uint64_t ptr_ptr, uint64_t size_ptr); // For testing. void setGeneralContext(std::shared_ptr context) { @@ -588,8 +669,8 @@ class Wasm : public Envoy::Server::Wasm, uint32_t emscripten_memory_size_ = 0; uint32_t emscripten_table_size_ = 0; - std::unique_ptr> emscripten_table_base_; - std::unique_ptr> emscripten_dynamictop_; + std::unique_ptr> emscripten_table_base_; + std::unique_ptr> emscripten_dynamictop_; std::unique_ptr> emscripten_NaN_; std::unique_ptr> emscripten_Infinity_; @@ -626,11 +707,13 @@ class WasmVm : public Logger::Loggable { virtual void start(Context*) PURE; // Convert a block of memory in the VM to a string_view. - virtual absl::string_view getMemory(uint32_t pointer, uint32_t size) PURE; + virtual absl::string_view getMemory(uint64_t pointer, uint64_t size) PURE; // Convert a host pointer to memory in the VM into a VM "pointer" (an offset into the Memory). - virtual bool getMemoryOffset(void* host_pointer, uint32_t* vm_pointer) PURE; + virtual bool getMemoryOffset(void* host_pointer, uint64_t* vm_pointer) PURE; // Set a block of memory in the VM, returns true on success, false if the pointer/size is invalid. - virtual bool setMemory(uint32_t pointer, uint32_t size, void* data) PURE; + virtual bool setMemory(uint64_t pointer, uint64_t size, void* data) PURE; + // Set a Word in the VM, returns true on success, false if the pointer is invalid. + virtual bool setWord(uint64_t pointer, uint64_t data) PURE; // Make a new intrinsic module (e.g. for Emscripten support). virtual void makeModule(absl::string_view name) PURE; @@ -649,53 +732,36 @@ class WasmVm : public Logger::Loggable { virtual void getFunction(absl::string_view functionName, WasmCall3Int* f) PURE; // Register typed callbacks exported by the host environment. - virtual void registerCallback(absl::string_view moduleName, absl::string_view functionName, - WasmCallback0Void f) PURE; - virtual void registerCallback(absl::string_view moduleName, absl::string_view functionName, - WasmCallback1Void f) PURE; - virtual void registerCallback(absl::string_view moduleName, absl::string_view functionName, - WasmCallback2Void f) PURE; - virtual void registerCallback(absl::string_view moduleName, absl::string_view functionName, - WasmCallback3Void f) PURE; - virtual void registerCallback(absl::string_view moduleName, absl::string_view functionName, - WasmCallback4Void f) PURE; - virtual void registerCallback(absl::string_view moduleName, absl::string_view functionName, - WasmCallback5Void f) PURE; - virtual void registerCallback(absl::string_view moduleName, absl::string_view functionName, - WasmCallback0Int f) PURE; - virtual void registerCallback(absl::string_view moduleName, absl::string_view functionName, - WasmCallback1Int f) PURE; - virtual void registerCallback(absl::string_view moduleName, absl::string_view functionName, - WasmCallback2Int f) PURE; - virtual void registerCallback(absl::string_view moduleName, absl::string_view functionName, - WasmCallback3Int f) PURE; - virtual void registerCallback(absl::string_view moduleName, absl::string_view functionName, - WasmCallback4Int f) PURE; - virtual void registerCallback(absl::string_view moduleName, absl::string_view functionName, - WasmCallback5Int f) PURE; - virtual void registerCallback(absl::string_view moduleName, absl::string_view functionName, - WasmCallback6Int f) PURE; - virtual void registerCallback(absl::string_view moduleName, absl::string_view functionName, - WasmCallback7Int f) PURE; - virtual void registerCallback(absl::string_view moduleName, absl::string_view functionName, - WasmCallback8Int f) PURE; - virtual void registerCallback(absl::string_view moduleName, absl::string_view functionName, - WasmCallback9Int f) PURE; - - virtual void registerCallback(absl::string_view moduleName, absl::string_view functionName, - WasmCallback_Zjl f) PURE; - virtual void registerCallback(absl::string_view moduleName, absl::string_view functionName, - WasmCallback_Zjm f) PURE; - virtual void registerCallback(absl::string_view moduleName, absl::string_view functionName, - WasmCallback_mjj f) PURE; - virtual void registerCallback(absl::string_view moduleName, absl::string_view functionName, - WasmCallback_mj f) PURE; +#define REGISTER_CALLBACK(_t) \ + virtual void registerCallback(absl::string_view moduleName, absl::string_view functionName, \ + _t f, typename ConvertFunctionTypeWordToUint32<_t>::type) PURE; + REGISTER_CALLBACK(WasmCallback0Void); + REGISTER_CALLBACK(WasmCallback1Void); + REGISTER_CALLBACK(WasmCallback2Void); + REGISTER_CALLBACK(WasmCallback3Void); + REGISTER_CALLBACK(WasmCallback4Void); + REGISTER_CALLBACK(WasmCallback5Void); + REGISTER_CALLBACK(WasmCallback0Int); + REGISTER_CALLBACK(WasmCallback1Int); + REGISTER_CALLBACK(WasmCallback2Int); + REGISTER_CALLBACK(WasmCallback3Int); + REGISTER_CALLBACK(WasmCallback4Int); + REGISTER_CALLBACK(WasmCallback5Int); + REGISTER_CALLBACK(WasmCallback6Int); + REGISTER_CALLBACK(WasmCallback7Int); + REGISTER_CALLBACK(WasmCallback8Int); + REGISTER_CALLBACK(WasmCallback9Int); + REGISTER_CALLBACK(WasmCallback_ZWl); + REGISTER_CALLBACK(WasmCallback_ZWm); + REGISTER_CALLBACK(WasmCallback_m); + REGISTER_CALLBACK(WasmCallback_mW); +#undef REGISTER_CALLBACK // Register typed value exported by the host environment. - virtual std::unique_ptr> - makeGlobal(absl::string_view moduleName, absl::string_view name, uint32_t initialValue) PURE; + virtual std::unique_ptr> makeGlobal(absl::string_view module_name, + absl::string_view name, Word initial_value) PURE; virtual std::unique_ptr> - makeGlobal(absl::string_view moduleName, absl::string_view name, double initialValue) PURE; + makeGlobal(absl::string_view module_name, absl::string_view name, double initial_value) PURE; }; // Create a new low-level WASM VM of the give type (e.g. "envoy.wasm.vm.wavm"). @@ -729,17 +795,17 @@ class WasmVmException : public EnvoyException { inline Context::Context(Wasm* wasm) : wasm_(wasm), id_(wasm->allocContextId()) {} -inline void* Wasm::allocMemory(uint32_t size, uint32_t* address) { - uint32_t a = malloc_(generalContext(), size); - *address = a; - // Note: this can thorw a WAVM exception. +inline void* Wasm::allocMemory(uint64_t size, uint64_t* address) { + Word a = malloc_(generalContext(), size); + *address = a.u64; + // Note: this can throw a WASM exception. return const_cast(reinterpret_cast(wasm_vm_->getMemory(a, size).data())); } -inline void Wasm::freeMemoryOffset(uint32_t address) { free_(generalContext(), address); } +inline void Wasm::freeMemoryOffset(uint64_t address) { free_(generalContext(), address); } inline bool Wasm::freeMemory(void* pointer) { - uint32_t offset; + uint64_t offset; if (!wasm_vm_->getMemoryOffset(pointer, &offset)) { return false; } @@ -747,8 +813,8 @@ inline bool Wasm::freeMemory(void* pointer) { return true; } -inline uint32_t Wasm::copyString(absl::string_view s) { - uint32_t pointer; +inline uint64_t Wasm::copyString(absl::string_view s) { + uint64_t pointer; uint8_t* m = static_cast(allocMemory((s.size() + 1), &pointer)); if (s.size() > 0) memcpy(m, s.data(), s.size()); @@ -756,8 +822,8 @@ inline uint32_t Wasm::copyString(absl::string_view s) { return pointer; } -inline uint32_t Wasm::copyBuffer(const Buffer::Instance& buffer) { - uint32_t pointer; +inline uint64_t Wasm::copyBuffer(const Buffer::Instance& buffer) { + uint64_t pointer; auto length = buffer.length(); if (length <= 0) { return 0; @@ -783,33 +849,36 @@ inline uint32_t Wasm::copyBuffer(const Buffer::Instance& buffer) { return pointer; } -inline bool Wasm::copyToPointerSize(absl::string_view s, uint32_t ptr_ptr, uint32_t size_ptr) { - uint32_t pointer = 0; - uint32_t size = s.size(); +inline bool Wasm::copyToPointerSize(absl::string_view s, uint64_t ptr_ptr, uint64_t size_ptr) { + uint64_t pointer = 0; + uint64_t size = s.size(); void* p = nullptr; if (size > 0) { p = allocMemory(size, &pointer); - if (!p) + if (!p) { return false; + } memcpy(p, s.data(), size); } - if (!wasm_vm_->setMemory(ptr_ptr, sizeof(uint32_t), &pointer)) + if (!wasm_vm_->setWord(ptr_ptr, pointer)) { return false; - if (!wasm_vm_->setMemory(size_ptr, sizeof(uint32_t), &size)) + } + if (!wasm_vm_->setWord(size_ptr, size)) { return false; + } return true; } -inline bool Wasm::copyToPointerSize(const Buffer::Instance& buffer, uint32_t start, uint32_t length, - uint32_t ptr_ptr, uint32_t size_ptr) { - uint32_t size = buffer.length(); +inline bool Wasm::copyToPointerSize(const Buffer::Instance& buffer, uint64_t start, uint64_t length, + uint64_t ptr_ptr, uint64_t size_ptr) { + uint64_t size = buffer.length(); if (size < start + length) { return false; } auto nslices = buffer.getRawSlices(nullptr, 0); auto slices = std::make_unique(nslices + 10 /* pad for evbuffer overrun */); auto actual_slices = buffer.getRawSlices(&slices[0], nslices); - uint32_t pointer = 0; + uint64_t pointer = 0; char* p = static_cast(allocMemory(length, &pointer)); auto s = start; auto l = length; @@ -832,10 +901,10 @@ inline bool Wasm::copyToPointerSize(const Buffer::Instance& buffer, uint32_t sta s = 0; p += ll; } - if (!wasm_vm_->setMemory(ptr_ptr, sizeof(int32_t), &pointer)) { + if (!wasm_vm_->setWord(ptr_ptr, pointer)) { return false; } - if (!wasm_vm_->setMemory(size_ptr, sizeof(int32_t), &length)) { + if (!wasm_vm_->setWord(size_ptr, length)) { return false; } return true; diff --git a/source/extensions/common/wasm/wavm/BUILD b/source/extensions/common/wasm/wavm/BUILD index d302142abe450..0606d9917c005 100644 --- a/source/extensions/common/wasm/wavm/BUILD +++ b/source/extensions/common/wasm/wavm/BUILD @@ -28,8 +28,6 @@ envoy_cc_library( ], deps = [ "//external:abseil_node_hash_map", - "//include/envoy/server:wasm_interface", - "//include/envoy/thread_local:thread_local_interface", "//source/common/common:assert_lib", "//source/common/common:c_smart_ptr_lib", "//source/common/protobuf", diff --git a/source/extensions/common/wasm/wavm/wavm.cc b/source/extensions/common/wasm/wavm/wavm.cc index 0fb1f903942c5..7bbcd0fd26fed 100644 --- a/source/extensions/common/wasm/wavm/wavm.cc +++ b/source/extensions/common/wasm/wavm/wavm.cc @@ -47,6 +47,14 @@ using namespace WAVM; using namespace WAVM::IR; +namespace WAVM { +namespace IR { +template <> constexpr ValueType inferValueType() { + return ValueType::i32; +} +} // namespace IR +} // namespace WAVM + namespace Envoy { namespace Extensions { namespace Common { @@ -61,6 +69,9 @@ void getFunctionWavm(WasmVm* vm, absl::string_view functionName, template void registerCallbackWavm(WasmVm* vm, absl::string_view moduleName, absl::string_view functionName, R (*)(Args...)); +template +void registerCallbackWavm(WasmVm* vm, absl::string_view moduleName, absl::string_view functionName, + F, R (*)(Args...)); template std::unique_ptr> makeGlobalWavm(WasmVm* vm, absl::string_view moduleName, absl::string_view name, T initialValue); @@ -71,6 +82,16 @@ struct Wavm; namespace { +struct WasmUntaggedValue : public WAVM::IR::UntaggedValue { + WasmUntaggedValue(I32 inI32) { i32 = inI32; } + WasmUntaggedValue(I64 inI64) { i64 = inI64; } + WasmUntaggedValue(U32 inU32) { u32 = inU32; } + WasmUntaggedValue(Word w) { u32 = static_cast(w.u64); } + WasmUntaggedValue(U64 inU64) { u64 = inU64; } + WasmUntaggedValue(F32 inF32) { f32 = inF32; } + WasmUntaggedValue(F64 inF64) { f64 = inF64; } +}; + using Context = Common::Wasm::Context; // Shadowing WAVM::Runtime::Context. const Logger::Id wasmId = Logger::Id::wasm; @@ -177,28 +198,21 @@ bool loadModule(const std::string& code, IR::Module& outModule) { } // namespace -struct EnvoyHandlerBase { - virtual ~EnvoyHandlerBase() {} -}; - -template struct EnvoyHandler : EnvoyHandlerBase { - ~EnvoyHandler() override {} - explicit EnvoyHandler(F ahandler) : handler(ahandler) {} - F handler; -}; - -template EnvoyHandlerBase* MakeEnvoyHandler(F handler) { - return new EnvoyHandler(handler); -} - struct WavmGlobalBase { WAVM::Runtime::Global* global_ = nullptr; }; -template struct WavmGlobal : Global, Intrinsics::GenericGlobal, WavmGlobalBase { +template struct NativeWord { using type = T; }; +template <> struct NativeWord { using type = uint32_t; }; + +template +struct WavmGlobal : Global, + Intrinsics::GenericGlobal::type>, + WavmGlobalBase { WavmGlobal(Common::Wasm::Wavm::Wavm* wavm, Intrinsics::Module& module, const std::string& name, T value) - : Intrinsics::GenericGlobal(module, name.c_str(), value), wavm_(wavm) {} + : Intrinsics::GenericGlobal::type>(module, name.c_str(), value), + wavm_(wavm) {} virtual ~WavmGlobal() {} T get() override; @@ -224,9 +238,10 @@ struct Wavm : public WasmVm { bool load(const std::string& code, bool allow_precompiled) override; void link(absl::string_view debug_name, bool needs_emscripten) override; void start(Context* context) override; - absl::string_view getMemory(uint32_t pointer, uint32_t size) override; - bool getMemoryOffset(void* host_pointer, uint32_t* vm_pointer) override; - bool setMemory(uint32_t pointer, uint32_t size, void* data) override; + absl::string_view getMemory(uint64_t pointer, uint64_t size) override; + bool getMemoryOffset(void* host_pointer, uint64_t* vm_pointer) override; + bool setMemory(uint64_t pointer, uint64_t size, void* data) override; + bool setWord(uint64_t pointer, uint64_t data) override; void makeModule(absl::string_view name) override; absl::string_view getUserSection(absl::string_view name) override; @@ -248,8 +263,8 @@ struct Wavm : public WasmVm { #undef _GET_FUNCTION #define _REGISTER_CALLBACK(_type) \ - void registerCallback(absl::string_view moduleName, absl::string_view functionName, _type f) \ - override { \ + void registerCallback(absl::string_view moduleName, absl::string_view functionName, _type, \ + typename ConvertFunctionTypeWordToUint32<_type>::type f) override { \ registerCallbackWavm(this, moduleName, functionName, f); \ }; _REGISTER_CALLBACK(WasmCallback0Void); @@ -268,10 +283,10 @@ struct Wavm : public WasmVm { _REGISTER_CALLBACK(WasmCallback7Int); _REGISTER_CALLBACK(WasmCallback8Int); _REGISTER_CALLBACK(WasmCallback9Int); - _REGISTER_CALLBACK(WasmCallback_Zjl); - _REGISTER_CALLBACK(WasmCallback_Zjm); - _REGISTER_CALLBACK(WasmCallback_mjj); - _REGISTER_CALLBACK(WasmCallback_mj); + _REGISTER_CALLBACK(WasmCallback_ZWl); + _REGISTER_CALLBACK(WasmCallback_ZWm); + _REGISTER_CALLBACK(WasmCallback_m); + _REGISTER_CALLBACK(WasmCallback_mW); #undef _REGISTER_CALLBACK #define _REGISTER_GLOBAL(_type) \ @@ -279,7 +294,7 @@ struct Wavm : public WasmVm { _type initialValue) override { \ return makeGlobalWavm(this, moduleName, name, initialValue); \ }; - _REGISTER_GLOBAL(uint32_t); + _REGISTER_GLOBAL(Word); _REGISTER_GLOBAL(double); #undef _REGISTER_GLOBAL @@ -307,7 +322,7 @@ Wavm::~Wavm() { if (emscriptenInstance_) { emscriptenInstance_->env = nullptr; emscriptenInstance_->global = nullptr; - emscriptenInstance_->emscriptenMemory = nullptr; + emscriptenInstance_->memory = nullptr; delete emscriptenInstance_; } context_ = nullptr; @@ -410,7 +425,7 @@ void Wavm::start(Context* context) { } if (emscriptenInstance_) { - Emscripten::initializeGlobals(context_, irModule_, moduleInstance_); + Emscripten::initializeGlobals(emscriptenInstance_, context_, irModule_, moduleInstance_); } f = asFunctionNullable(getInstanceExport(moduleInstance_, "__post_instantiate")); @@ -431,13 +446,13 @@ void Wavm::start(Context* context) { } } -absl::string_view Wavm::getMemory(uint32_t pointer, uint32_t size) { +absl::string_view Wavm::getMemory(uint64_t pointer, uint64_t size) { return {reinterpret_cast( WAVM::Runtime::memoryArrayPtr(memory_, pointer, static_cast(size))), static_cast(size)}; } -bool Wavm::getMemoryOffset(void* host_pointer, uint32_t* vm_pointer) { +bool Wavm::getMemoryOffset(void* host_pointer, uint64_t* vm_pointer) { intptr_t offset = (static_cast(host_pointer) - memory_base_); if (offset < 0) { return false; @@ -445,11 +460,11 @@ bool Wavm::getMemoryOffset(void* host_pointer, uint32_t* vm_pointer) { if (static_cast(offset) > memory_size_) { return false; } - *vm_pointer = static_cast(offset); + *vm_pointer = static_cast(offset); return true; } -bool Wavm::setMemory(uint32_t pointer, uint32_t size, void* data) { +bool Wavm::setMemory(uint64_t pointer, uint64_t size, void* data) { auto p = reinterpret_cast( WAVM::Runtime::memoryArrayPtr(memory_, pointer, static_cast(size))); if (p) { @@ -460,6 +475,18 @@ bool Wavm::setMemory(uint32_t pointer, uint32_t size, void* data) { } } +bool Wavm::setWord(uint64_t pointer, uint64_t data) { + auto p = reinterpret_cast( + WAVM::Runtime::memoryArrayPtr(memory_, pointer, static_cast(sizeof(uint32_t)))); + if (p) { + uint32_t data32 = static_cast(data); + memcpy(p, &data32, sizeof(uint32_t)); + return true; + } else { + return false; + } +} + absl::string_view Wavm::getUserSection(absl::string_view name) { for (auto& section : irModule_.userSections) { if (section.name == name) { @@ -469,7 +496,7 @@ absl::string_view Wavm::getUserSection(absl::string_view name) { return {}; } -std::unique_ptr createWavm() { return std::make_unique(); } +std::unique_ptr createVm() { return std::make_unique(); } } // namespace Wavm @@ -595,7 +622,7 @@ void getFunctionWavmReturn(WasmVm* vm, absl::string_view functionName, throw WasmVmException(fmt::format("Bad function signature for: {}", functionName)); } *function = [wavm, f](Context* context, Args... args) -> R { - UntaggedValue values[] = {args...}; + WasmUntaggedValue values[] = {args...}; try { CALL_WITH_CONTEXT_RETURN(invokeFunctionUnchecked(wavm->context_, f, &values[0]), context, uint32_t, i32); @@ -623,7 +650,7 @@ void getFunctionWavmReturn(WasmVm* vm, absl::string_view functionName, throw WasmVmException(fmt::format("Bad function signature for: {}", functionName)); } *function = [wavm, f](Context* context, Args... args) -> R { - UntaggedValue values[] = {args...}; + WasmUntaggedValue values[] = {args...}; try { CALL_WITH_CONTEXT(invokeFunctionUnchecked(wavm->context_, f, &values[0]), context); } catch (const std::exception& e) { @@ -713,6 +740,7 @@ template void getFunctionWavm*); template T getValue(IR::Value) {} +template <> Word getValue(IR::Value v) { return v.u32; } template <> int32_t getValue(IR::Value v) { return v.i32; } template <> uint32_t getValue(IR::Value v) { return v.u32; } template <> int64_t getValue(IR::Value v) { return v.i64; } diff --git a/source/extensions/common/wasm/wavm/wavm.h b/source/extensions/common/wasm/wavm/wavm.h index f818e3afd712f..0d23b59380a0a 100644 --- a/source/extensions/common/wasm/wavm/wavm.h +++ b/source/extensions/common/wasm/wavm/wavm.h @@ -1,17 +1,6 @@ #pragma once #include -#include -#include - -#include "envoy/common/exception.h" -#include "envoy/config/wasm/v2/wasm.pb.validate.h" -#include "envoy/server/wasm.h" -#include "envoy/thread_local/thread_local.h" - -#include "common/common/assert.h" -#include "common/common/c_smart_ptr.h" -#include "common/common/logger.h" #include "extensions/common/wasm/wasm.h" @@ -21,7 +10,7 @@ namespace Common { namespace Wasm { namespace Wavm { -std::unique_ptr createWavm(); +std::unique_ptr createVm(); } // namespace Wavm } // namespace Wasm diff --git a/source/extensions/common/wasm/well_known_names.h b/source/extensions/common/wasm/well_known_names.h index 592f2e51699b1..332beb2f019ef 100644 --- a/source/extensions/common/wasm/well_known_names.h +++ b/source/extensions/common/wasm/well_known_names.h @@ -13,8 +13,13 @@ namespace Wasm { */ class WasmVmValues { public: + // V8 (https://v8.dev) WASM VM. + const std::string v8 = "envoy.wasm.vm.v8"; // WAVM (https://github.com/WAVM/WAVM) Wasm VM. const std::string Wavm = "envoy.wasm.vm.wavm"; + // Null sandbox: modules must be compiled into envoy and registered name is given in the + // DataSource.inline_string. + const std::string Null = "envoy.wasm.vm.null"; }; typedef ConstSingleton WasmVmNames; diff --git a/source/extensions/extensions_build_config.bzl b/source/extensions/extensions_build_config.bzl index b1eee873af467..02628a4f797a3 100644 --- a/source/extensions/extensions_build_config.bzl +++ b/source/extensions/extensions_build_config.bzl @@ -8,11 +8,16 @@ EXTENSIONS = { "envoy.access_loggers.http_grpc": "//source/extensions/access_loggers/http_grpc:config", "envoy.access_loggers.wasm": "//source/extensions/access_loggers/wasm:config", + # + # Clusters + # + "envoy.clusters.redis": "//source/extensions/clusters/redis:redis_cluster", + # # gRPC Credentials Plugins # - "envoy.grpc_credentials.file_based_metadata": "//source/extensions/grpc_credentials/file_based_metadata:config", + "envoy.grpc_credentials.file_based_metadata": "//source/extensions/grpc_credentials/file_based_metadata:config", # # WASM @@ -31,6 +36,7 @@ EXTENSIONS = { "envoy.filters.http.buffer": "//source/extensions/filters/http/buffer:config", "envoy.filters.http.cors": "//source/extensions/filters/http/cors:config", + "envoy.filters.http.csrf": "//source/extensions/filters/http/csrf:config", "envoy.filters.http.dynamo": "//source/extensions/filters/http/dynamo:config", "envoy.filters.http.ext_authz": "//source/extensions/filters/http/ext_authz:config", "envoy.filters.http.fault": "//source/extensions/filters/http/fault:config", @@ -76,6 +82,9 @@ EXTENSIONS = { "envoy.filters.network.echo": "//source/extensions/filters/network/echo:config", "envoy.filters.network.ext_authz": "//source/extensions/filters/network/ext_authz:config", "envoy.filters.network.http_connection_manager": "//source/extensions/filters/network/http_connection_manager:config", + # NOTE: Kafka filter does not have a proper filter implemented right now. We are referencing to + # codec implementation that is going to be used by the filter. + "envoy.filters.network.kafka": "//source/extensions/filters/network/kafka:kafka_request_codec_lib", "envoy.filters.network.mongo_proxy": "//source/extensions/filters/network/mongo_proxy:config", "envoy.filters.network.mysql_proxy": "//source/extensions/filters/network/mysql_proxy:config", "envoy.filters.network.ratelimit": "//source/extensions/filters/network/ratelimit:config", @@ -84,6 +93,7 @@ EXTENSIONS = { "envoy.filters.network.tcp_proxy": "//source/extensions/filters/network/tcp_proxy:config", "envoy.filters.network.thrift_proxy": "//source/extensions/filters/network/thrift_proxy:config", "envoy.filters.network.sni_cluster": "//source/extensions/filters/network/sni_cluster:config", + "envoy.filters.network.zookeeper_proxy": "//source/extensions/filters/network/zookeeper_proxy:config", # # Resource monitors @@ -157,6 +167,7 @@ WINDOWS_EXTENSIONS = { #"envoy.filters.http.buffer": "//source/extensions/filters/http/buffer:config", #"envoy.filters.http.cors": "//source/extensions/filters/http/cors:config", + #"envoy.filters.http.csrf": "//source/extensions/filters/http/csrf:config", #"envoy.filters.http.dynamo": "//source/extensions/filters/http/dynamo:config", #"envoy.filters.http.ext_authz": "//source/extensions/filters/http/ext_authz:config", #"envoy.filters.http.fault": "//source/extensions/filters/http/fault:config", @@ -201,6 +212,7 @@ WINDOWS_EXTENSIONS = { "envoy.filters.network.tcp_proxy": "//source/extensions/filters/network/tcp_proxy:config", #"envoy.filters.network.thrift_proxy": "//source/extensions/filters/network/thrift_proxy:config", #"envoy.filters.network.sni_cluster": "//source/extensions/filters/network/sni_cluster:config", + #"envoy.filters.network.zookeeper_proxy": "//source/extensions/filters/network/zookeeper_proxy:config", # # Stat sinks diff --git a/source/extensions/filters/common/ext_authz/BUILD b/source/extensions/filters/common/ext_authz/BUILD index 624dc42b9403d..dc9687f1bd2dc 100644 --- a/source/extensions/filters/common/ext_authz/BUILD +++ b/source/extensions/filters/common/ext_authz/BUILD @@ -41,6 +41,12 @@ envoy_cc_library( "//source/common/network:utility_lib", "//source/common/protobuf", "//source/common/tracing:http_tracer_lib", + + # TODO(gsagula): Descriptor pool requires this dependence in runtime only. It should NOT be + # removed unless v2alpha is fully deprecated, otherwise linker will drop the object file + # associated with v2alpha/external_auth.pb.cc causing FindMessageTypeByName() to always return + # nullptr when looking up for envoy.service.auth.v2alpha.Authorization.Check. + "@envoy_api//envoy/service/auth/v2alpha:external_auth_cc", ], ) diff --git a/source/extensions/filters/common/ext_authz/check_request_utils.cc b/source/extensions/filters/common/ext_authz/check_request_utils.cc index 30474ad1b2962..d243e04c5978f 100644 --- a/source/extensions/filters/common/ext_authz/check_request_utils.cc +++ b/source/extensions/filters/common/ext_authz/check_request_utils.cc @@ -40,19 +40,21 @@ void CheckRequestUtils::setAttrContextPeer(envoy::service::auth::v2::AttributeCo // Set the principal // Preferably the SAN from the peer's cert or // Subject from the peer's cert. - Ssl::Connection* ssl = const_cast(connection.ssl()); + Ssl::ConnectionInfo* ssl = const_cast(connection.ssl()); if (ssl != nullptr) { if (local) { - peer.set_principal(ssl->uriSanLocalCertificate()); - - if (peer.principal().empty()) { + const auto uriSans = ssl->uriSanLocalCertificate(); + if (uriSans.empty()) { peer.set_principal(ssl->subjectLocalCertificate()); + } else { + peer.set_principal(uriSans[0]); } } else { - peer.set_principal(ssl->uriSanPeerCertificate()); - - if (peer.principal().empty()) { + const auto uriSans = ssl->uriSanPeerCertificate(); + if (uriSans.empty()) { peer.set_principal(ssl->subjectPeerCertificate()); + } else { + peer.set_principal(uriSans[0]); } } } @@ -74,7 +76,7 @@ std::string CheckRequestUtils::getHeaderStr(const Envoy::Http::HeaderEntry* entr void CheckRequestUtils::setHttpRequest( ::envoy::service::auth::v2::AttributeContext_HttpRequest& httpreq, const Envoy::Http::StreamDecoderFilterCallbacks* callbacks, - const Envoy::Http::HeaderMap& headers) { + const Envoy::Http::HeaderMap& headers, uint64_t max_request_bytes) { // Set id // The streamId is not qualified as a const. Although it is as it does not modify the object. @@ -105,29 +107,43 @@ void CheckRequestUtils::setHttpRequest( auto mutable_headers = httpreq.mutable_headers(); headers.iterate( [](const Envoy::Http::HeaderEntry& e, void* ctx) { - Envoy::Protobuf::Map* - mutable_headers = static_cast< - Envoy::Protobuf::Map*>( - ctx); - (*mutable_headers)[std::string(e.key().getStringView())] = - std::string(e.value().getStringView()); + // Skip any client EnvoyAuthPartialBody header, which could interfere with internal use. + if (e.key().getStringView() != Http::Headers::get().EnvoyAuthPartialBody.get()) { + Envoy::Protobuf::Map* mutable_headers = + static_cast*>(ctx); + (*mutable_headers)[std::string(e.key().getStringView())] = + std::string(e.value().getStringView()); + } return Envoy::Http::HeaderMap::Iterate::Continue; }, mutable_headers); + + // Set request body. + const Buffer::Instance* buffer = sdfc->decodingBuffer(); + if (max_request_bytes > 0 && buffer != nullptr) { + const uint64_t length = std::min(buffer->length(), max_request_bytes); + std::string data(length, 0); + buffer->copyOut(0, length, &data[0]); + httpreq.set_body(std::move(data)); + + // Add in a header to detect when a partial body is used. + (*mutable_headers)[Http::Headers::get().EnvoyAuthPartialBody.get()] = + length != buffer->length() ? "true" : "false"; + } } void CheckRequestUtils::setAttrContextRequest( ::envoy::service::auth::v2::AttributeContext_Request& req, const Envoy::Http::StreamDecoderFilterCallbacks* callbacks, - const Envoy::Http::HeaderMap& headers) { - setHttpRequest(*req.mutable_http(), callbacks, headers); + const Envoy::Http::HeaderMap& headers, uint64_t max_request_bytes) { + setHttpRequest(*req.mutable_http(), callbacks, headers, max_request_bytes); } void CheckRequestUtils::createHttpCheck( const Envoy::Http::StreamDecoderFilterCallbacks* callbacks, const Envoy::Http::HeaderMap& headers, - Protobuf::Map&& context_extensions, - envoy::service::auth::v2::CheckRequest& request) { + Protobuf::Map&& context_extensions, + envoy::service::auth::v2::CheckRequest& request, uint64_t max_request_bytes) { auto attrs = request.mutable_attributes(); @@ -138,7 +154,7 @@ void CheckRequestUtils::createHttpCheck( setAttrContextPeer(*attrs->mutable_source(), *cb->connection(), service, false); setAttrContextPeer(*attrs->mutable_destination(), *cb->connection(), "", true); - setAttrContextRequest(*attrs->mutable_request(), callbacks, headers); + setAttrContextRequest(*attrs->mutable_request(), callbacks, headers, max_request_bytes); // Fill in the context extensions: (*attrs->mutable_context_extensions()) = std::move(context_extensions); diff --git a/source/extensions/filters/common/ext_authz/check_request_utils.h b/source/extensions/filters/common/ext_authz/check_request_utils.h index a3214d17b50d9..5fa997c80a522 100644 --- a/source/extensions/filters/common/ext_authz/check_request_utils.h +++ b/source/extensions/filters/common/ext_authz/check_request_utils.h @@ -43,20 +43,19 @@ class CheckRequestUtils { * @param headers supplies the header map with http headers that will be used to create the * check request. * @param request is the reference to the check request that will be filled up. - * + * @param with_request_body when true, will add the request body to the check request. */ - static void - createHttpCheck(const Envoy::Http::StreamDecoderFilterCallbacks* callbacks, - const Envoy::Http::HeaderMap& headers, - Protobuf::Map&& context_extensions, - envoy::service::auth::v2::CheckRequest& request); + static void createHttpCheck(const Envoy::Http::StreamDecoderFilterCallbacks* callbacks, + const Envoy::Http::HeaderMap& headers, + Protobuf::Map&& context_extensions, + envoy::service::auth::v2::CheckRequest& request, + uint64_t max_request_bytes); /** * createTcpCheck is used to extract the attributes from the network layer and fill them up * in the CheckRequest proto message. * @param callbacks supplies the network layer context from which data can be extracted. * @param request is the reference to the check request that will be filled up. - * */ static void createTcpCheck(const Network::ReadFilterCallbacks* callbacks, envoy::service::auth::v2::CheckRequest& request); @@ -67,10 +66,11 @@ class CheckRequestUtils { const bool local); static void setHttpRequest(::envoy::service::auth::v2::AttributeContext_HttpRequest& httpreq, const Envoy::Http::StreamDecoderFilterCallbacks* callbacks, - const Envoy::Http::HeaderMap& headers); + const Envoy::Http::HeaderMap& headers, uint64_t max_request_bytes); static void setAttrContextRequest(::envoy::service::auth::v2::AttributeContext_Request& req, const Envoy::Http::StreamDecoderFilterCallbacks* callbacks, - const Envoy::Http::HeaderMap& headers); + const Envoy::Http::HeaderMap& headers, + uint64_t max_request_bytes); static std::string getHeaderStr(const Envoy::Http::HeaderEntry* entry); static Envoy::Http::HeaderMap::Iterate fillHttpHeaders(const Envoy::Http::HeaderEntry&, void*); }; diff --git a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc index 1ea13f6542ec9..bf1b242256d08 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc @@ -13,12 +13,16 @@ namespace Filters { namespace Common { namespace ExtAuthz { +// Values used for selecting service paths. +// TODO(gsagula): keep only V2 when V2Alpha gets deprecated. +constexpr char V2[] = "envoy.service.auth.v2.Authorization.Check"; +constexpr char V2alpha[] = "envoy.service.auth.v2alpha.Authorization.Check"; + GrpcClientImpl::GrpcClientImpl(Grpc::AsyncClientPtr&& async_client, - const absl::optional& timeout) - : service_method_(*Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - // TODO(dio): Define the following service method name as a constant value. - "envoy.service.auth.v2.Authorization.Check")), - async_client_(std::move(async_client)), timeout_(timeout) {} + const absl::optional& timeout, + bool use_alpha) + : service_method_(getMethodDescriptor(use_alpha)), async_client_(std::move(async_client)), + timeout_(timeout) {} GrpcClientImpl::~GrpcClientImpl() { ASSERT(!callbacks_); } @@ -39,9 +43,7 @@ void GrpcClientImpl::check(RequestCallbacks& callbacks, void GrpcClientImpl::onSuccess(std::unique_ptr&& response, Tracing::Span& span) { - ASSERT(response->status().code() != Grpc::Status::GrpcStatus::Unknown); ResponsePtr authz_response = std::make_unique(Response{}); - if (response->status().code() == Grpc::Status::GrpcStatus::Ok) { span.setTag(Constants::get().TraceStatus, Constants::get().TraceOk); authz_response->status = CheckStatus::OK; @@ -89,6 +91,14 @@ void GrpcClientImpl::toAuthzResponseHeader( } } +const Protobuf::MethodDescriptor& GrpcClientImpl::getMethodDescriptor(bool use_alpha) { + const auto* descriptor = + use_alpha ? Protobuf::DescriptorPool::generated_pool()->FindMethodByName(V2alpha) + : Protobuf::DescriptorPool::generated_pool()->FindMethodByName(V2); + ASSERT(descriptor != nullptr); + return *descriptor; +} + } // namespace ExtAuthz } // namespace Common } // namespace Filters diff --git a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h index 0e805233d9b6a..24769dd3ccbe7 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h +++ b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h @@ -47,8 +47,9 @@ typedef ConstSingleton Constants; */ class GrpcClientImpl : public Client, public ExtAuthzAsyncCallbacks { public: + // TODO(gsagula): remove `use_alpha` param when V2Alpha gets deprecated. GrpcClientImpl(Grpc::AsyncClientPtr&& async_client, - const absl::optional& timeout); + const absl::optional& timeout, bool use_alpha); ~GrpcClientImpl(); // ExtAuthz::Client @@ -64,6 +65,7 @@ class GrpcClientImpl : public Client, public ExtAuthzAsyncCallbacks { Tracing::Span& span) override; private: + static const Protobuf::MethodDescriptor& getMethodDescriptor(bool use_alpha); void toAuthzResponseHeader( ResponsePtr& response, const Protobuf::RepeatedPtrField& headers); diff --git a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc index de58a3fae12a2..aff72f7a533fa 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc @@ -38,7 +38,8 @@ struct SuccessResponse { // UpstreamHeaderMatcher if (context->matchers_->matches(header.key().getStringView())) { context->response_->headers_to_add.emplace_back( - Http::LowerCaseString{header.key().c_str()}, header.value().c_str()); + Http::LowerCaseString{std::string(header.key().getStringView())}, + std::string(header.value().getStringView())); } return Http::HeaderMap::Iterate::Continue; }, @@ -161,7 +162,17 @@ void RawHttpClientImpl::check(RequestCallbacks& callbacks, ASSERT(callbacks_ == nullptr); callbacks_ = &callbacks; - Http::HeaderMapPtr headers = std::make_unique(lengthZeroHeader()); + Http::HeaderMapPtr headers; + const uint64_t request_length = request.attributes().request().http().body().size(); + if (request_length > 0) { + headers = + std::make_unique>>( + {{Http::Headers::get().ContentLength, std::to_string(request_length)}}); + } else { + headers = std::make_unique(lengthZeroHeader()); + } + for (const auto& header : request.attributes().request().http().headers()) { const Http::LowerCaseString key{header.first}; if (config_->requestHeaderMatchers()->matches(key.get())) { @@ -179,8 +190,14 @@ void RawHttpClientImpl::check(RequestCallbacks& callbacks, headers->setReference(header_to_add.first, header_to_add.second); } + Http::MessagePtr message = std::make_unique(std::move(headers)); + if (request_length > 0) { + message->body() = + std::make_unique(request.attributes().request().http().body()); + } + request_ = cm_.httpAsyncClientForCluster(config_->cluster()) - .send(std::make_unique(std::move(headers)), *this, + .send(std::move(message), *this, Http::AsyncClient::RequestOptions().setTimeout(config_->timeout())); } @@ -199,7 +216,7 @@ ResponsePtr RawHttpClientImpl::toResponse(Http::MessagePtr message) { // Set an error status if parsing status code fails. A Forbidden response is sent to the client // if the filter has not been configured with failure_mode_allow. uint64_t status_code{}; - if (!StringUtil::atoul(message->headers().Status()->value().c_str(), status_code)) { + if (!absl::SimpleAtoi(message->headers().Status()->value().getStringView(), &status_code)) { ENVOY_LOG(warn, "ext_authz HTTP client failed to parse the HTTP status code."); return std::make_unique(errorResponse()); } diff --git a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h index f553144260452..39d1666a7f267 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h +++ b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h @@ -82,13 +82,13 @@ class ClientConfig { const MatcherSharedPtr& requestHeaderMatchers() const { return request_header_matchers_; } /** - * Returns a list of matchers used for selecting the the authorization response headers that + * Returns a list of matchers used for selecting the authorization response headers that * should be send back to the client. */ const MatcherSharedPtr& clientHeaderMatchers() const { return client_header_matchers_; } /** - * Returns a list of matchers used for selecting the the authorization response headers that + * Returns a list of matchers used for selecting the authorization response headers that * should be send to an the upstream server. */ const MatcherSharedPtr& upstreamHeaderMatchers() const { return upstream_header_matchers_; } diff --git a/source/extensions/filters/common/fault/BUILD b/source/extensions/filters/common/fault/BUILD new file mode 100644 index 0000000000000..b8607b4f861bf --- /dev/null +++ b/source/extensions/filters/common/fault/BUILD @@ -0,0 +1,20 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "fault_config_lib", + srcs = ["fault_config.cc"], + hdrs = ["fault_config.h"], + deps = [ + "//include/envoy/http:header_map_interface", + "//source/common/protobuf:utility_lib", + "@envoy_api//envoy/config/filter/fault/v2:fault_cc", + ], +) diff --git a/source/extensions/filters/common/fault/fault_config.cc b/source/extensions/filters/common/fault/fault_config.cc new file mode 100644 index 0000000000000..c4de2b1ffa546 --- /dev/null +++ b/source/extensions/filters/common/fault/fault_config.cc @@ -0,0 +1,78 @@ +#include "extensions/filters/common/fault/fault_config.h" + +#include "common/protobuf/utility.h" + +namespace Envoy { +namespace Extensions { +namespace Filters { +namespace Common { +namespace Fault { + +FaultDelayConfig::FaultDelayConfig(const envoy::config::filter::fault::v2::FaultDelay& delay_config) + : percentage_(delay_config.percentage()) { + switch (delay_config.fault_delay_secifier_case()) { + case envoy::config::filter::fault::v2::FaultDelay::kFixedDelay: + provider_ = std::make_unique( + std::chrono::milliseconds(PROTOBUF_GET_MS_REQUIRED(delay_config, fixed_delay))); + break; + case envoy::config::filter::fault::v2::FaultDelay::kHeaderDelay: + provider_ = std::make_unique(); + break; + case envoy::config::filter::fault::v2::FaultDelay::FAULT_DELAY_SECIFIER_NOT_SET: + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + +absl::optional +FaultDelayConfig::HeaderDelayProvider::duration(const Http::HeaderEntry* header) const { + if (header == nullptr) { + return absl::nullopt; + } + + uint64_t value; + if (!absl::SimpleAtoi(header->value().getStringView(), &value)) { + return absl::nullopt; + } + + return std::chrono::milliseconds(value); +} + +FaultRateLimitConfig::FaultRateLimitConfig( + const envoy::config::filter::fault::v2::FaultRateLimit& rate_limit_config) + : percentage_(rate_limit_config.percentage()) { + switch (rate_limit_config.limit_type_case()) { + case envoy::config::filter::fault::v2::FaultRateLimit::kFixedLimit: + provider_ = + std::make_unique(rate_limit_config.fixed_limit().limit_kbps()); + break; + case envoy::config::filter::fault::v2::FaultRateLimit::kHeaderLimit: + provider_ = std::make_unique(); + break; + case envoy::config::filter::fault::v2::FaultRateLimit::LIMIT_TYPE_NOT_SET: + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + +absl::optional +FaultRateLimitConfig::HeaderRateLimitProvider::rateKbps(const Http::HeaderEntry* header) const { + if (header == nullptr) { + return absl::nullopt; + } + + uint64_t value; + if (!absl::SimpleAtoi(header->value().getStringView(), &value)) { + return absl::nullopt; + } + + if (value == 0) { + return absl::nullopt; + } + + return value; +} + +} // namespace Fault +} // namespace Common +} // namespace Filters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/common/fault/fault_config.h b/source/extensions/filters/common/fault/fault_config.h new file mode 100644 index 0000000000000..61b3ada9eda70 --- /dev/null +++ b/source/extensions/filters/common/fault/fault_config.h @@ -0,0 +1,130 @@ +#pragma once + +#include "envoy/config/filter/fault/v2/fault.pb.h" +#include "envoy/http/header_map.h" + +#include "common/singleton/const_singleton.h" + +namespace Envoy { +namespace Extensions { +namespace Filters { +namespace Common { +namespace Fault { + +class HeaderNameValues { +public: + const Http::LowerCaseString DelayRequest{"x-envoy-fault-delay-request"}; + const Http::LowerCaseString ThroughputResponse{"x-envoy-fault-throughput-response"}; +}; + +typedef ConstSingleton HeaderNames; + +/** + * Generic configuration for a delay fault. + */ +class FaultDelayConfig { +public: + FaultDelayConfig(const envoy::config::filter::fault::v2::FaultDelay& delay_config); + + const envoy::type::FractionalPercent& percentage() const { return percentage_; } + absl::optional duration(const Http::HeaderEntry* header) const { + return provider_->duration(header); + } + +private: + // Abstract delay provider. + class DelayProvider { + public: + virtual ~DelayProvider() = default; + + // Return the duration to use. Optionally passed an HTTP header that may contain the delay + // depending on the provider implementation. + virtual absl::optional + duration(const Http::HeaderEntry* header) const PURE; + }; + + // Delay provider that uses a fixed delay. + class FixedDelayProvider : public DelayProvider { + public: + FixedDelayProvider(std::chrono::milliseconds delay) : delay_(delay) {} + + // DelayProvider + absl::optional duration(const Http::HeaderEntry*) const override { + return delay_; + } + + private: + const std::chrono::milliseconds delay_; + }; + + // Delay provider the reads a delay from an HTTP header. + class HeaderDelayProvider : public DelayProvider { + public: + // DelayProvider + absl::optional + duration(const Http::HeaderEntry* header) const override; + }; + + using DelayProviderPtr = std::unique_ptr; + + DelayProviderPtr provider_; + const envoy::type::FractionalPercent percentage_; +}; + +using FaultDelayConfigPtr = std::unique_ptr; +using FaultDelayConfigSharedPtr = std::shared_ptr; + +/** + * Generic configuration for a rate limit fault. + */ +class FaultRateLimitConfig { +public: + FaultRateLimitConfig(const envoy::config::filter::fault::v2::FaultRateLimit& rate_limit_config); + + const envoy::type::FractionalPercent& percentage() const { return percentage_; } + absl::optional rateKbps(const Http::HeaderEntry* header) const { + return provider_->rateKbps(header); + } + +private: + // Abstract rate limit provider. + class RateLimitProvider { + public: + virtual ~RateLimitProvider() = default; + + // Return the rate limit to use in KiB/s. Optionally passed an HTTP header that may contain the + // rate limit depending on the provider implementation. + virtual absl::optional rateKbps(const Http::HeaderEntry* header) const PURE; + }; + + // Rate limit provider that uses a fixed rate limit. + class FixedRateLimitProvider : public RateLimitProvider { + public: + FixedRateLimitProvider(uint64_t fixed_rate_kbps) : fixed_rate_kbps_(fixed_rate_kbps) {} + absl::optional rateKbps(const Http::HeaderEntry*) const override { + return fixed_rate_kbps_; + } + + private: + const uint64_t fixed_rate_kbps_; + }; + + // Rate limit provider that reads the rate limit from an HTTP header. + class HeaderRateLimitProvider : public RateLimitProvider { + public: + absl::optional rateKbps(const Http::HeaderEntry* header) const override; + }; + + using RateLimitProviderPtr = std::unique_ptr; + + RateLimitProviderPtr provider_; + const envoy::type::FractionalPercent percentage_; +}; + +using FaultRateLimitConfigPtr = std::unique_ptr; + +} // namespace Fault +} // namespace Common +} // namespace Filters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/common/lua/lua.h b/source/extensions/filters/common/lua/lua.h index a824a0c4634a8..872e0dba2e14e 100644 --- a/source/extensions/filters/common/lua/lua.h +++ b/source/extensions/filters/common/lua/lua.h @@ -44,7 +44,8 @@ namespace Lua { */ #define DECLARE_LUA_FUNCTION_EX(Class, Name, Index) \ static int static_##Name(lua_State* state) { \ - Class* object = static_cast(luaL_checkudata(state, Index, typeid(Class).name())); \ + Class* object = ::Envoy::Extensions::Filters::Common::Lua::alignAndCast( \ + luaL_checkudata(state, Index, typeid(Class).name())); \ object->checkDead(state); \ return object->Name(state); \ } \ @@ -60,6 +61,32 @@ namespace Lua { */ #define DECLARE_LUA_CLOSURE(Class, Name) DECLARE_LUA_FUNCTION_EX(Class, Name, lua_upvalueindex(1)) +/** + * Calculate the maximum space needed to be aligned. + */ +template constexpr size_t maximumSpaceNeededToAlign() { + // The allocated memory can be misaligned up to `alignof(T) - 1` bytes. Adding it to the size to + // allocate. + return sizeof(T) + alignof(T) - 1; +} + +template inline T* alignAndCast(void* mem) { + size_t size = maximumSpaceNeededToAlign(); + return static_cast(std::align(alignof(T), sizeof(T), mem, size)); +} + +/** + * Create a new user data and assign its metatable. + */ +template inline T* allocateLuaUserData(lua_State* state) { + void* mem = lua_newuserdata(state, maximumSpaceNeededToAlign()); + luaL_getmetatable(state, typeid(T).name()); + ASSERT(lua_istable(state, -1)); + lua_setmetatable(state, -2); + + return alignAndCast(mem); +} + /** * This is the base class for all C++ objects that we expose out to Lua. The goal is to hide as * much ugliness as possible. In general, to use this, do the following: @@ -90,14 +117,9 @@ template class BaseLuaObject : protected Logger::Loggable static std::pair create(lua_State* state, ConstructorArgs&&... args) { - // Create a new user data and assign its metatable. - void* mem = lua_newuserdata(state, sizeof(T)); - luaL_getmetatable(state, typeid(T).name()); - ASSERT(lua_istable(state, -1)); - lua_setmetatable(state, -2); - // Memory is allocated via Lua and it is raw. We use placement new to run the constructor. - ENVOY_LOG(trace, "creating {} at {}", typeid(T).name(), mem); + T* mem = allocateLuaUserData(state); + ENVOY_LOG(trace, "creating {} at {}", typeid(T).name(), static_cast(mem)); return {new (mem) T(std::forward(args)...), state}; } @@ -119,7 +141,7 @@ template class BaseLuaObject : protected Logger::Loggable(luaL_checkudata(state, 1, typeid(T).name())); + T* object = alignAndCast(luaL_checkudata(state, 1, typeid(T).name())); ENVOY_LOG(trace, "destroying {} at {}", typeid(T).name(), static_cast(object)); object->~T(); return 0; diff --git a/source/extensions/filters/common/lua/wrappers.cc b/source/extensions/filters/common/lua/wrappers.cc index efb522fe18a92..399155a253e36 100644 --- a/source/extensions/filters/common/lua/wrappers.cc +++ b/source/extensions/filters/common/lua/wrappers.cc @@ -76,9 +76,8 @@ void MetadataMapHelper::setValue(lua_State* state, const ProtobufWkt::Value& val } } -void MetadataMapHelper::createTable( - lua_State* state, - const Protobuf::Map& fields) { +void MetadataMapHelper::createTable(lua_State* state, + const Protobuf::Map& fields) { lua_createtable(state, 0, fields.size()); for (const auto& field : fields) { int top = lua_gettop(state); diff --git a/source/extensions/filters/common/lua/wrappers.h b/source/extensions/filters/common/lua/wrappers.h index 2019a8887961f..5ac8ff217c3fb 100644 --- a/source/extensions/filters/common/lua/wrappers.h +++ b/source/extensions/filters/common/lua/wrappers.h @@ -44,9 +44,8 @@ class MetadataMapWrapper; struct MetadataMapHelper { static void setValue(lua_State* state, const ProtobufWkt::Value& value); - static void - createTable(lua_State* state, - const Protobuf::Map& fields); + static void createTable(lua_State* state, + const Protobuf::Map& fields); }; /** @@ -62,7 +61,7 @@ class MetadataMapIterator : public BaseLuaObject { private: MetadataMapWrapper& parent_; - Protobuf::Map::const_iterator current_; + Protobuf::Map::const_iterator current_; }; /** @@ -103,11 +102,11 @@ class MetadataMapWrapper : public BaseLuaObject { }; /** - * Lua wrapper for Ssl::Connection. + * Lua wrapper for Ssl::ConnectionInfo. */ class SslConnectionWrapper : public BaseLuaObject { public: - SslConnectionWrapper(const Ssl::Connection*) {} + SslConnectionWrapper(const Ssl::ConnectionInfo*) {} static ExportedFunctions exportedFunctions() { return {}; } // TODO(dio): Add more Lua APIs around Ssl::Connection. diff --git a/source/extensions/filters/common/ratelimit/BUILD b/source/extensions/filters/common/ratelimit/BUILD index d7009e81d64c5..50a91c7a50ccb 100644 --- a/source/extensions/filters/common/ratelimit/BUILD +++ b/source/extensions/filters/common/ratelimit/BUILD @@ -30,21 +30,6 @@ envoy_cc_library( ], ) -envoy_cc_library( - name = "ratelimit_registration_lib", - srcs = ["ratelimit_registration.cc"], - hdrs = ["ratelimit_registration.h"], - deps = [ - ":ratelimit_client_interface", - ":ratelimit_lib", - "//include/envoy/registry", - "//include/envoy/server:filter_config_interface", - "//include/envoy/server:instance_interface", - "//source/common/common:assert_lib", - "@envoy_api//envoy/config/bootstrap/v2:bootstrap_cc", - ], -) - envoy_cc_library( name = "ratelimit_client_interface", hdrs = ["ratelimit.h"], diff --git a/source/extensions/filters/common/ratelimit/ratelimit.h b/source/extensions/filters/common/ratelimit/ratelimit.h index 24d5050136f42..0da01c1f250e5 100644 --- a/source/extensions/filters/common/ratelimit/ratelimit.h +++ b/source/extensions/filters/common/ratelimit/ratelimit.h @@ -76,27 +76,6 @@ class Client { typedef std::unique_ptr ClientPtr; -/** - * An interface for creating a rate limit client. - */ -class ClientFactory : public Singleton::Instance { -public: - virtual ~ClientFactory() {} - - /** - * Returns rate limit client from singleton manager. - */ - virtual ClientPtr create(const absl::optional& timeout) PURE; - - /** - * Returns configuration with which the factory has been built. - */ - virtual const absl::optional& - rateLimitConfig() const PURE; -}; - -typedef std::shared_ptr ClientFactoryPtr; - } // namespace RateLimit } // namespace Common } // namespace Filters diff --git a/source/extensions/filters/common/ratelimit/ratelimit_impl.cc b/source/extensions/filters/common/ratelimit/ratelimit_impl.cc index 43066c35f0492..2c1a5da468d80 100644 --- a/source/extensions/filters/common/ratelimit/ratelimit_impl.cc +++ b/source/extensions/filters/common/ratelimit/ratelimit_impl.cc @@ -89,17 +89,16 @@ void GrpcClientImpl::onFailure(Grpc::Status::GrpcStatus status, const std::strin callbacks_ = nullptr; } -GrpcFactoryImpl::GrpcFactoryImpl(const envoy::config::ratelimit::v2::RateLimitServiceConfig& config, - Grpc::AsyncClientManager& async_client_manager, - Stats::Scope& scope) - : config_(config) { - envoy::api::v2::core::GrpcService grpc_service; - grpc_service.MergeFrom(config_->grpc_service()); - async_client_factory_ = async_client_manager.factoryForGrpcService(grpc_service, scope, false); -} - -ClientPtr GrpcFactoryImpl::create(const absl::optional& timeout) { - return std::make_unique(async_client_factory_->create(), timeout); +ClientPtr rateLimitClient(Server::Configuration::FactoryContext& context, + const envoy::api::v2::core::GrpcService& grpc_service, + const std::chrono::milliseconds timeout) { + // TODO(ramaraochavali): register client to singleton when GrpcClientImpl supports concurrent + // requests. + const auto async_client_factory = + context.clusterManager().grpcAsyncClientManager().factoryForGrpcService( + grpc_service, context.scope(), true); + return std::make_unique( + async_client_factory->create(), timeout); } } // namespace RateLimit diff --git a/source/extensions/filters/common/ratelimit/ratelimit_impl.h b/source/extensions/filters/common/ratelimit/ratelimit_impl.h index e4f0300c330a9..7e24b0b849ed3 100644 --- a/source/extensions/filters/common/ratelimit/ratelimit_impl.h +++ b/source/extensions/filters/common/ratelimit/ratelimit_impl.h @@ -9,6 +9,7 @@ #include "envoy/grpc/async_client.h" #include "envoy/grpc/async_client_manager.h" #include "envoy/ratelimit/ratelimit.h" +#include "envoy/server/filter_config.h" #include "envoy/service/ratelimit/v2/rls.pb.h" #include "envoy/stats/scope.h" #include "envoy/tracing/http_tracer.h" @@ -72,51 +73,12 @@ class GrpcClientImpl : public Client, RequestCallbacks* callbacks_{}; }; -class GrpcFactoryImpl : public ClientFactory { -public: - GrpcFactoryImpl(const envoy::config::ratelimit::v2::RateLimitServiceConfig& config, - Grpc::AsyncClientManager& async_client_manager, Stats::Scope& scope); - - // Filters::Common::RateLimit::ClientFactory - ClientPtr create(const absl::optional& timeout) override; - - const absl::optional& - rateLimitConfig() const override { - return config_; - } - -private: - Grpc::AsyncClientFactoryPtr async_client_factory_; - const absl::optional config_; -}; - -// TODO(ramaraochavali): NullClientImpl and NullFactoryImpl should be removed when we remove rate -// limit config from bootstrap. -class NullClientImpl : public Client { -public: - // Filters::Common::RateLimit::Client - void cancel() override {} - void limit(RequestCallbacks& callbacks, const std::string&, - const std::vector&, Tracing::Span&) override { - callbacks.complete(LimitStatus::OK, nullptr); - } -}; - -class NullFactoryImpl : public ClientFactory { -public: - // Filters::Common::RateLimit::ClientFactory - ClientPtr create(const absl::optional&) override { - return ClientPtr{new NullClientImpl()}; - } - - const absl::optional& - rateLimitConfig() const override { - return config_; - } - -private: - const absl::optional config_; -}; +/** + * Builds the rate limit client. + */ +ClientPtr rateLimitClient(Server::Configuration::FactoryContext& context, + const envoy::api::v2::core::GrpcService& grpc_service, + const std::chrono::milliseconds timeout); } // namespace RateLimit } // namespace Common diff --git a/source/extensions/filters/common/ratelimit/ratelimit_registration.cc b/source/extensions/filters/common/ratelimit/ratelimit_registration.cc deleted file mode 100644 index 7c8c3d2e571cf..0000000000000 --- a/source/extensions/filters/common/ratelimit/ratelimit_registration.cc +++ /dev/null @@ -1,71 +0,0 @@ -#include "extensions/filters/common/ratelimit/ratelimit_registration.h" - -#include "envoy/registry/registry.h" - -#include "common/common/assert.h" -#include "common/protobuf/protobuf.h" - -namespace Envoy { -namespace Extensions { -namespace Filters { -namespace Common { -namespace RateLimit { - -// Singleton registration via macro defined in envoy/singleton/manager.h -SINGLETON_MANAGER_REGISTRATION(ratelimit_factory); - -ClientFactoryPtr rateLimitClientFactory(Server::Instance& server, - Grpc::AsyncClientManager& async_client_manager, - const envoy::config::bootstrap::v2::Bootstrap& bootstrap) { - return server.singletonManager().getTyped( - SINGLETON_MANAGER_REGISTERED_NAME(ratelimit_factory), - [&bootstrap, &server, &async_client_manager] { - ClientFactoryPtr client_factory; - if (bootstrap.rate_limit_service().has_grpc_service()) { - client_factory = - std::make_shared( - bootstrap.rate_limit_service(), async_client_manager, server.stats()); - } else { - client_factory = - std::make_shared(); - } - return client_factory; - }); -} - -ClientFactoryPtr rateLimitClientFactory(Server::Configuration::FactoryContext& context) { - return context.singletonManager().getTyped( - SINGLETON_MANAGER_REGISTERED_NAME(ratelimit_factory), [] { - // This should never happen. We expect factory to be registered to singleton, during - // configuration processing in the core at start up. - NOT_REACHED_GCOVR_EXCL_LINE; - return nullptr; - }); -} - -ClientPtr rateLimitClient(ClientFactoryPtr client_factory, - Server::Configuration::FactoryContext& context, - const envoy::api::v2::core::GrpcService& grpc_service, - const std::chrono::milliseconds timeout) { - Filters::Common::RateLimit::ClientPtr ratelimit_client; - // If ratelimit service is defined in bootstrap, just use the factory registered to singleton, - // otherwise create it based on the filter config. - if (client_factory->rateLimitConfig().has_value()) { - ratelimit_client = client_factory->create(timeout); - } else { - // TODO(ramaraochavali): register this factory/client to singleton when bootstrap config is - // completely deleted. - const auto async_client_factory = - context.clusterManager().grpcAsyncClientManager().factoryForGrpcService( - grpc_service, context.scope(), true); - ratelimit_client = std::make_unique( - async_client_factory->create(), timeout); - } - return ratelimit_client; -} - -} // namespace RateLimit -} // namespace Common -} // namespace Filters -} // namespace Extensions -} // namespace Envoy diff --git a/source/extensions/filters/common/ratelimit/ratelimit_registration.h b/source/extensions/filters/common/ratelimit/ratelimit_registration.h deleted file mode 100644 index 044cc32741a75..0000000000000 --- a/source/extensions/filters/common/ratelimit/ratelimit_registration.h +++ /dev/null @@ -1,51 +0,0 @@ -#pragma once - -#include "envoy/config/bootstrap/v2/bootstrap.pb.h" -#include "envoy/server/instance.h" - -#include "extensions/filters/common/ratelimit/ratelimit.h" -#include "extensions/filters/common/ratelimit/ratelimit_impl.h" - -namespace Envoy { -namespace Extensions { -namespace Filters { -namespace Common { -namespace RateLimit { - -/** - * Builds the ClientFactory and registers with singleton manager. - * @return ClientFactoryPtr the registered client factory. - */ -ClientFactoryPtr rateLimitClientFactory(Server::Instance& server, - Grpc::AsyncClientManager& async_client_manager, - const envoy::config::bootstrap::v2::Bootstrap& bootstrap); - -/** - * Returns the registered ClientFactory from singleton manager. - */ -ClientFactoryPtr rateLimitClientFactory(Server::Configuration::FactoryContext& context); - -/** - * Builds the rate limit client. - */ -ClientPtr rateLimitClient(ClientFactoryPtr ratelimit_factory, - Server::Configuration::FactoryContext& context, - const envoy::api::v2::core::GrpcService& grpc_service, - const std::chrono::milliseconds timeout); - -/** - * Validates the supplied filter config against the bootstrap config. - */ -template -void validateRateLimitConfig(RateLimitProtoConfig proto_config, ClientFactoryPtr client_factory) { - if (proto_config.has_rate_limit_service() && client_factory->rateLimitConfig().has_value() && - !Envoy::Protobuf::util::MessageDifferencer::Equals(*client_factory->rateLimitConfig(), - proto_config.rate_limit_service())) { - throw EnvoyException("rate limit service config in filter does not match with bootstrap"); - } -} -} // namespace RateLimit -} // namespace Common -} // namespace Filters -} // namespace Extensions -} // namespace Envoy diff --git a/source/extensions/filters/common/rbac/matchers.cc b/source/extensions/filters/common/rbac/matchers.cc index 97f859e716773..5d44f09cd7441 100644 --- a/source/extensions/filters/common/rbac/matchers.cc +++ b/source/extensions/filters/common/rbac/matchers.cc @@ -141,8 +141,13 @@ bool AuthenticatedMatcher::matches(const Network::Connection& connection, return true; } - std::string principal = ssl->uriSanPeerCertificate(); - principal = principal.empty() ? ssl->subjectPeerCertificate() : principal; + const auto uriSans = ssl->uriSanPeerCertificate(); + std::string principal; + if (uriSans.empty()) { + principal = ssl->subjectPeerCertificate(); + } else { + principal = uriSans[0]; + } return matcher_.value().match(principal); } diff --git a/source/extensions/filters/http/common/aws/utility.cc b/source/extensions/filters/http/common/aws/utility.cc index 55f4a5aab08b4..88836f7b7721b 100644 --- a/source/extensions/filters/http/common/aws/utility.cc +++ b/source/extensions/filters/http/common/aws/utility.cc @@ -21,18 +21,18 @@ std::map Utility::canonicalizeHeaders(const Http::Head return Http::HeaderMap::Iterate::Continue; } // Pseudo-headers should not be canonicalized - if (entry.key().c_str()[0] == ':') { + if (!entry.key().getStringView().empty() && entry.key().getStringView()[0] == ':') { return Http::HeaderMap::Iterate::Continue; } std::string value(entry.value().getStringView()); // Remove leading, trailing, and deduplicate repeated ascii spaces absl::RemoveExtraAsciiWhitespace(&value); - const auto iter = map->find(entry.key().c_str()); + const auto iter = map->find(std::string(entry.key().getStringView())); // If the entry already exists, append the new value to the end if (iter != map->end()) { iter->second += fmt::format(",{}", value); } else { - map->emplace(entry.key().c_str(), value); + map->emplace(std::string(entry.key().getStringView()), value); } return Http::HeaderMap::Iterate::Continue; }, @@ -91,4 +91,4 @@ Utility::joinCanonicalHeaderNames(const std::map& cano } // namespace Common } // namespace HttpFilters } // namespace Extensions -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/source/extensions/filters/http/cors/cors_filter.cc b/source/extensions/filters/http/cors/cors_filter.cc index 42443222d41b3..997beb5385fc2 100644 --- a/source/extensions/filters/http/cors/cors_filter.cc +++ b/source/extensions/filters/http/cors/cors_filter.cc @@ -54,7 +54,8 @@ Http::FilterHeadersStatus CorsFilter::decodeHeaders(Http::HeaderMap& headers, bo is_cors_request_ = true; const auto method = headers.Method(); - if (method == nullptr || method->value().c_str() != Http::Headers::get().MethodValues.Options) { + if (method == nullptr || + method->value().getStringView() != Http::Headers::get().MethodValues.Options) { return Http::FilterHeadersStatus::Continue; } @@ -134,7 +135,8 @@ bool CorsFilter::isOriginAllowedRegex(const Http::HeaderString& origin) { return false; } for (const auto& regex : *allowOriginRegexes()) { - if (std::regex_match(origin.c_str(), regex)) { + const absl::string_view origin_view = origin.getStringView(); + if (std::regex_match(origin_view.begin(), origin_view.end(), regex)) { return true; } } diff --git a/source/extensions/filters/http/csrf/BUILD b/source/extensions/filters/http/csrf/BUILD new file mode 100644 index 0000000000000..b9b6fd26b0071 --- /dev/null +++ b/source/extensions/filters/http/csrf/BUILD @@ -0,0 +1,39 @@ +licenses(["notice"]) # Apache 2 + +# L7 HTTP filter which implements CSRF processing (https://www.owasp.org/index.php/Cross-Site_Request_Forgery_(CSRF)) +# Public docs: docs/root/configuration/http_filters/csrf_filter.rst + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "csrf_filter_lib", + srcs = ["csrf_filter.cc"], + hdrs = ["csrf_filter.h"], + deps = [ + "//include/envoy/http:filter_interface", + "//source/common/buffer:buffer_lib", + "//source/common/http:header_map_lib", + "//source/common/http:headers_lib", + "//source/common/http:utility_lib", + "//source/extensions/filters/http:well_known_names", + "@envoy_api//envoy/config/filter/http/csrf/v2:csrf_cc", + ], +) + +envoy_cc_library( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + deps = [ + "//include/envoy/registry", + "//source/extensions/filters/http:well_known_names", + "//source/extensions/filters/http/common:factory_base_lib", + "//source/extensions/filters/http/csrf:csrf_filter_lib", + ], +) diff --git a/source/extensions/filters/http/csrf/config.cc b/source/extensions/filters/http/csrf/config.cc new file mode 100644 index 0000000000000..d9f76e23fbea5 --- /dev/null +++ b/source/extensions/filters/http/csrf/config.cc @@ -0,0 +1,38 @@ +#include "extensions/filters/http/csrf/config.h" + +#include "envoy/config/filter/http/csrf/v2/csrf.pb.validate.h" +#include "envoy/registry/registry.h" + +#include "extensions/filters/http/csrf/csrf_filter.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Csrf { + +Http::FilterFactoryCb CsrfFilterFactory::createFilterFactoryFromProtoTyped( + const envoy::config::filter::http::csrf::v2::CsrfPolicy& policy, + const std::string& stats_prefix, Server::Configuration::FactoryContext& context) { + CsrfFilterConfigSharedPtr config = + std::make_shared(policy, stats_prefix, context.scope(), context.runtime()); + return [config](Http::FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamDecoderFilter(std::make_shared(config)); + }; +} + +Router::RouteSpecificFilterConfigConstSharedPtr +CsrfFilterFactory::createRouteSpecificFilterConfigTyped( + const envoy::config::filter::http::csrf::v2::CsrfPolicy& policy, + Server::Configuration::FactoryContext& context) { + return std::make_shared(policy, context.runtime()); +} + +/** + * Static registration for the CSRF filter. @see RegisterFactory. + */ +REGISTER_FACTORY(CsrfFilterFactory, Server::Configuration::NamedHttpFilterConfigFactory); + +} // namespace Csrf +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/csrf/config.h b/source/extensions/filters/http/csrf/config.h new file mode 100644 index 0000000000000..b7cd4bc77b0a4 --- /dev/null +++ b/source/extensions/filters/http/csrf/config.h @@ -0,0 +1,35 @@ +#pragma once + +#include "envoy/config/filter/http/csrf/v2/csrf.pb.h" +#include "envoy/config/filter/http/csrf/v2/csrf.pb.validate.h" + +#include "extensions/filters/http/common/factory_base.h" +#include "extensions/filters/http/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Csrf { + +/** + * Config registration for the CSRF filter. @see NamedHttpFilterConfigFactory. + */ +class CsrfFilterFactory + : public Common::FactoryBase { +public: + CsrfFilterFactory() : FactoryBase(HttpFilterNames::get().Csrf) {} + +private: + Http::FilterFactoryCb + createFilterFactoryFromProtoTyped(const envoy::config::filter::http::csrf::v2::CsrfPolicy& policy, + const std::string& stats_prefix, + Server::Configuration::FactoryContext& context) override; + Router::RouteSpecificFilterConfigConstSharedPtr createRouteSpecificFilterConfigTyped( + const envoy::config::filter::http::csrf::v2::CsrfPolicy& policy, + Server::Configuration::FactoryContext& context) override; +}; + +} // namespace Csrf +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/csrf/csrf_filter.cc b/source/extensions/filters/http/csrf/csrf_filter.cc new file mode 100644 index 0000000000000..b536ef914a2ea --- /dev/null +++ b/source/extensions/filters/http/csrf/csrf_filter.cc @@ -0,0 +1,128 @@ +#include "extensions/filters/http/csrf/csrf_filter.h" + +#include "envoy/stats/scope.h" + +#include "common/common/empty_string.h" +#include "common/http/header_map_impl.h" +#include "common/http/headers.h" +#include "common/http/utility.h" + +#include "extensions/filters/http/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Csrf { + +struct RcDetailsValues { + const std::string OriginMismatch = "csrf_origin_mismatch"; +}; +typedef ConstSingleton RcDetails; + +namespace { +bool isModifyMethod(const Http::HeaderMap& headers) { + const Envoy::Http::HeaderEntry* method = headers.Method(); + if (method == nullptr) { + return false; + } + const absl::string_view method_type = method->value().getStringView(); + const auto& method_values = Http::Headers::get().MethodValues; + return (method_type == method_values.Post || method_type == method_values.Put || + method_type == method_values.Delete); +} + +absl::string_view hostAndPort(const Http::HeaderEntry* header) { + Http::Utility::Url absolute_url; + if (header != nullptr && !header->value().empty()) { + if (absolute_url.initialize(header->value().getStringView())) { + return absolute_url.host_and_port(); + } + return header->value().getStringView(); + } + return EMPTY_STRING; +} + +absl::string_view sourceOriginValue(const Http::HeaderMap& headers) { + const absl::string_view origin = hostAndPort(headers.Origin()); + if (origin != EMPTY_STRING) { + return origin; + } + return hostAndPort(headers.Referer()); +} + +absl::string_view targetOriginValue(const Http::HeaderMap& headers) { + return hostAndPort(headers.Host()); +} + +static CsrfStats generateStats(const std::string& prefix, Stats::Scope& scope) { + const std::string final_prefix = prefix + "csrf."; + return CsrfStats{ALL_CSRF_STATS(POOL_COUNTER_PREFIX(scope, final_prefix))}; +} + +static const CsrfPolicy +generatePolicy(const envoy::config::filter::http::csrf::v2::CsrfPolicy& policy, + Runtime::Loader& runtime) { + return CsrfPolicy(policy, runtime); +} +} // namespace + +CsrfFilterConfig::CsrfFilterConfig(const envoy::config::filter::http::csrf::v2::CsrfPolicy& policy, + const std::string& stats_prefix, Stats::Scope& scope, + Runtime::Loader& runtime) + : stats_(generateStats(stats_prefix, scope)), policy_(generatePolicy(policy, runtime)) {} + +CsrfFilter::CsrfFilter(const CsrfFilterConfigSharedPtr config) : config_(config) {} + +Http::FilterHeadersStatus CsrfFilter::decodeHeaders(Http::HeaderMap& headers, bool) { + determinePolicy(); + + if (!policy_->enabled() && !policy_->shadowEnabled()) { + return Http::FilterHeadersStatus::Continue; + } + + if (!isModifyMethod(headers)) { + return Http::FilterHeadersStatus::Continue; + } + + bool is_valid = true; + const absl::string_view source_origin = sourceOriginValue(headers); + if (source_origin == EMPTY_STRING) { + is_valid = false; + config_->stats().missing_source_origin_.inc(); + } + + const absl::string_view target_origin = targetOriginValue(headers); + if (source_origin != target_origin) { + is_valid = false; + config_->stats().request_invalid_.inc(); + } + + if (is_valid == true) { + config_->stats().request_valid_.inc(); + return Http::FilterHeadersStatus::Continue; + } + + if (policy_->shadowEnabled() && !policy_->enabled()) { + return Http::FilterHeadersStatus::Continue; + } + + callbacks_->sendLocalReply(Http::Code::Forbidden, "Invalid origin", nullptr, absl::nullopt, + RcDetails::get().OriginMismatch); + return Http::FilterHeadersStatus::StopIteration; +} + +void CsrfFilter::determinePolicy() { + const std::string& name = Extensions::HttpFilters::HttpFilterNames::get().Csrf; + const CsrfPolicy* policy = + Http::Utility::resolveMostSpecificPerFilterConfig(name, callbacks_->route()); + if (policy != nullptr) { + policy_ = policy; + } else { + policy_ = config_->policy(); + } +} + +} // namespace Csrf +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/csrf/csrf_filter.h b/source/extensions/filters/http/csrf/csrf_filter.h new file mode 100644 index 0000000000000..392e5e2a95a68 --- /dev/null +++ b/source/extensions/filters/http/csrf/csrf_filter.h @@ -0,0 +1,109 @@ +#pragma once + +#include "envoy/api/v2/route/route.pb.h" +#include "envoy/config/filter/http/csrf/v2/csrf.pb.h" +#include "envoy/http/filter.h" +#include "envoy/stats/scope.h" +#include "envoy/stats/stats_macros.h" + +#include "common/buffer/buffer_impl.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Csrf { + +/** + * All CSRF filter stats. @see stats_macros.h + */ +// clang-format off +#define ALL_CSRF_STATS(COUNTER) \ + COUNTER(missing_source_origin)\ + COUNTER(request_invalid) \ + COUNTER(request_valid) \ +// clang-format on + +/** + * Struct definition for CSRF stats. @see stats_macros.h + */ +struct CsrfStats { + ALL_CSRF_STATS(GENERATE_COUNTER_STRUCT) +}; + +/** + * Configuration for CSRF policy. + */ +class CsrfPolicy : public Router::RouteSpecificFilterConfig { +public: + CsrfPolicy(const envoy::config::filter::http::csrf::v2::CsrfPolicy& policy, + Runtime::Loader& runtime) : policy_(policy), runtime_(runtime) {} + + bool enabled() const { + const envoy::api::v2::core::RuntimeFractionalPercent& filter_enabled = policy_.filter_enabled(); + return runtime_.snapshot().featureEnabled(filter_enabled.runtime_key(), + filter_enabled.default_value()); + } + + bool shadowEnabled() const { + if (!policy_.has_shadow_enabled()) { + return false; + } + const envoy::api::v2::core::RuntimeFractionalPercent& shadow_enabled = policy_.shadow_enabled(); + return runtime_.snapshot().featureEnabled(shadow_enabled.runtime_key(), + shadow_enabled.default_value()); + } + +private: + const envoy::config::filter::http::csrf::v2::CsrfPolicy policy_; + Runtime::Loader& runtime_; +}; + +/** + * Configuration for the CSRF filter. + */ +class CsrfFilterConfig { +public: + CsrfFilterConfig(const envoy::config::filter::http::csrf::v2::CsrfPolicy& policy, + const std::string& stats_prefix, Stats::Scope& scope, + Runtime::Loader& runtime); + + CsrfStats& stats() { return stats_; } + const CsrfPolicy* policy() { return &policy_; } + +private: + CsrfStats stats_; + const CsrfPolicy policy_; +}; +typedef std::shared_ptr CsrfFilterConfigSharedPtr; + +class CsrfFilter : public Http::StreamDecoderFilter { +public: + CsrfFilter(CsrfFilterConfigSharedPtr config); + + // Http::StreamFilterBase + void onDestroy() override {} + + // Http::StreamDecoderFilter + Http::FilterHeadersStatus decodeHeaders(Http::HeaderMap& headers, bool end_stream) override; + Http::FilterDataStatus decodeData(Buffer::Instance&, bool) override { + return Http::FilterDataStatus::Continue; + }; + Http::FilterTrailersStatus decodeTrailers(Http::HeaderMap&) override { + return Http::FilterTrailersStatus::Continue; + }; + void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override { + callbacks_ = &callbacks; + }; + +private: + void determinePolicy(); + + Http::StreamDecoderFilterCallbacks* callbacks_{}; + CsrfFilterConfigSharedPtr config_; + const CsrfPolicy* policy_; +}; + +} // namespace Csrf +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/dynamo/config.cc b/source/extensions/filters/http/dynamo/config.cc index f95f46268dba8..7355e46760f63 100644 --- a/source/extensions/filters/http/dynamo/config.cc +++ b/source/extensions/filters/http/dynamo/config.cc @@ -16,7 +16,7 @@ DynamoFilterConfig::createFilter(const std::string& stat_prefix, Server::Configuration::FactoryContext& context) { return [&context, stat_prefix](Http::FilterChainFactoryCallbacks& callbacks) -> void { callbacks.addStreamFilter(Http::StreamFilterSharedPtr{new Dynamo::DynamoFilter( - context.runtime(), stat_prefix, context.scope(), context.dispatcher().timeSystem())}); + context.runtime(), stat_prefix, context.scope(), context.dispatcher().timeSource())}); }; } diff --git a/source/extensions/filters/http/dynamo/dynamo_filter.cc b/source/extensions/filters/http/dynamo/dynamo_filter.cc index c16a443869de7..cb7ee4c357025 100644 --- a/source/extensions/filters/http/dynamo/dynamo_filter.cc +++ b/source/extensions/filters/http/dynamo/dynamo_filter.cc @@ -24,7 +24,7 @@ namespace Dynamo { Http::FilterHeadersStatus DynamoFilter::decodeHeaders(Http::HeaderMap& headers, bool) { if (enabled_) { - start_decode_ = time_system_.monotonicTime(); + start_decode_ = time_source_.monotonicTime(); operation_ = RequestParser::parseOperation(headers); return Http::FilterHeadersStatus::StopIteration; } else { @@ -173,7 +173,7 @@ void DynamoFilter::chargeBasicStats(uint64_t status) { void DynamoFilter::chargeStatsPerEntity(const std::string& entity, const std::string& entity_type, uint64_t status) { std::chrono::milliseconds latency = std::chrono::duration_cast( - time_system_.monotonicTime() - start_decode_); + time_source_.monotonicTime() - start_decode_); std::string group_string = Http::CodeUtility::groupStringForResponseCode(static_cast(status)); @@ -237,9 +237,8 @@ void DynamoFilter::chargeTablePartitionIdStats(const Json::Object& json_body) { std::vector partitions = RequestParser::parsePartitions(json_body); for (const RequestParser::PartitionDescriptor& partition : partitions) { - std::string scope_string = - Utility::buildPartitionStatString(stat_prefix_, table_descriptor_.table_name, operation_, - partition.partition_id_, scope_.statsOptions()); + std::string scope_string = Utility::buildPartitionStatString( + stat_prefix_, table_descriptor_.table_name, operation_, partition.partition_id_); scope_.counter(scope_string).add(partition.capacity_); } } diff --git a/source/extensions/filters/http/dynamo/dynamo_filter.h b/source/extensions/filters/http/dynamo/dynamo_filter.h index 3f3b765022bbf..3de6e378db1fc 100644 --- a/source/extensions/filters/http/dynamo/dynamo_filter.h +++ b/source/extensions/filters/http/dynamo/dynamo_filter.h @@ -25,9 +25,9 @@ namespace Dynamo { class DynamoFilter : public Http::StreamFilter { public: DynamoFilter(Runtime::Loader& runtime, const std::string& stat_prefix, Stats::Scope& scope, - Event::TimeSystem& time_system) + TimeSource& time_system) : runtime_(runtime), stat_prefix_(stat_prefix + "dynamodb."), scope_(scope), - time_system_(time_system) { + time_source_(time_system) { enabled_ = runtime_.snapshot().featureEnabled("dynamodb.filter_enabled", 100); } @@ -79,7 +79,7 @@ class DynamoFilter : public Http::StreamFilter { Http::HeaderMap* response_headers_; Http::StreamDecoderFilterCallbacks* decoder_callbacks_{}; Http::StreamEncoderFilterCallbacks* encoder_callbacks_{}; - Event::TimeSystem& time_system_; + TimeSource& time_source_; }; } // namespace Dynamo diff --git a/source/extensions/filters/http/dynamo/dynamo_request_parser.cc b/source/extensions/filters/http/dynamo/dynamo_request_parser.cc index ab427876a20c8..3ddf79653ad28 100644 --- a/source/extensions/filters/http/dynamo/dynamo_request_parser.cc +++ b/source/extensions/filters/http/dynamo/dynamo_request_parser.cc @@ -59,7 +59,7 @@ std::string RequestParser::parseOperation(const Http::HeaderMap& headerMap) { const Http::HeaderEntry* x_amz_target = headerMap.get(X_AMZ_TARGET); if (x_amz_target) { // Normally x-amz-target contains Version.Operation, e.g., DynamoDB_20160101.GetItem - auto version_and_operation = StringUtil::splitToken(x_amz_target->value().c_str(), "."); + auto version_and_operation = StringUtil::splitToken(x_amz_target->value().getStringView(), "."); if (version_and_operation.size() == 2) { operation = std::string{version_and_operation[1]}; } diff --git a/source/extensions/filters/http/dynamo/dynamo_utility.cc b/source/extensions/filters/http/dynamo/dynamo_utility.cc index 6f5c60627a85b..a71408439fc31 100644 --- a/source/extensions/filters/http/dynamo/dynamo_utility.cc +++ b/source/extensions/filters/http/dynamo/dynamo_utility.cc @@ -2,8 +2,6 @@ #include -#include "envoy/stats/stats_options.h" - #include "common/common/fmt.h" namespace Envoy { @@ -14,21 +12,12 @@ namespace Dynamo { std::string Utility::buildPartitionStatString(const std::string& stat_prefix, const std::string& table_name, const std::string& operation, - const std::string& partition_id, - const Stats::StatsOptions& stats_options) { + const std::string& partition_id) { // Use the last 7 characters of the partition id. std::string stats_partition_postfix = fmt::format(".capacity.{}.__partition_id={}", operation, partition_id.substr(partition_id.size() - 7, partition_id.size())); - - // Calculate how many characters are available for the table prefix. - size_t remaining_size = stats_options.maxNameLength() - stats_partition_postfix.size(); - std::string stats_table_prefix = fmt::format("{}table.{}", stat_prefix, table_name); - // Truncate the table prefix if the current string is too large. - if (stats_table_prefix.size() > remaining_size) { - stats_table_prefix = stats_table_prefix.substr(0, remaining_size); - } return fmt::format("{}{}", stats_table_prefix, stats_partition_postfix); } diff --git a/source/extensions/filters/http/dynamo/dynamo_utility.h b/source/extensions/filters/http/dynamo/dynamo_utility.h index 435d8f825c525..87b4bd0bc119f 100644 --- a/source/extensions/filters/http/dynamo/dynamo_utility.h +++ b/source/extensions/filters/http/dynamo/dynamo_utility.h @@ -2,8 +2,6 @@ #include -#include "envoy/stats/stats_options.h" - namespace Envoy { namespace Extensions { namespace HttpFilters { @@ -25,8 +23,7 @@ class Utility { static std::string buildPartitionStatString(const std::string& stat_prefix, const std::string& table_name, const std::string& operation, - const std::string& partition_id, - const Stats::StatsOptions& stats_options); + const std::string& partition_id); }; } // namespace Dynamo diff --git a/source/extensions/filters/http/ext_authz/BUILD b/source/extensions/filters/http/ext_authz/BUILD index 423a8b51b338d..f5b4e7eacea12 100644 --- a/source/extensions/filters/http/ext_authz/BUILD +++ b/source/extensions/filters/http/ext_authz/BUILD @@ -24,6 +24,7 @@ envoy_cc_library( "//source/common/common:matchers_lib", "//source/common/common:minimal_logger_lib", "//source/common/http:codes_lib", + "//source/common/http:utility_lib", "//source/common/router:config_lib", "//source/extensions/filters/common/ext_authz:ext_authz_grpc_lib", "//source/extensions/filters/common/ext_authz:ext_authz_http_lib", diff --git a/source/extensions/filters/http/ext_authz/config.cc b/source/extensions/filters/http/ext_authz/config.cc index ead40305a4f74..fb7ba3895e4e2 100644 --- a/source/extensions/filters/http/ext_authz/config.cc +++ b/source/extensions/filters/http/ext_authz/config.cc @@ -42,13 +42,14 @@ Http::FilterFactoryCb ExtAuthzFilterConfig::createFilterFactoryFromProtoTyped( // gRPC client. const uint32_t timeout_ms = PROTOBUF_GET_MS_OR_DEFAULT(proto_config.grpc_service(), timeout, DefaultTimeout); - callback = [grpc_service = proto_config.grpc_service(), &context, filter_config, - timeout_ms](Http::FilterChainFactoryCallbacks& callbacks) { + callback = [grpc_service = proto_config.grpc_service(), &context, filter_config, timeout_ms, + use_alpha = + proto_config.use_alpha()](Http::FilterChainFactoryCallbacks& callbacks) { const auto async_client_factory = context.clusterManager().grpcAsyncClientManager().factoryForGrpcService( grpc_service, context.scope(), true); auto client = std::make_unique( - async_client_factory->create(), std::chrono::milliseconds(timeout_ms)); + async_client_factory->create(), std::chrono::milliseconds(timeout_ms), use_alpha); callbacks.addStreamDecoderFilter(Http::StreamDecoderFilterSharedPtr{ std::make_shared(filter_config, std::move(client))}); }; diff --git a/source/extensions/filters/http/ext_authz/ext_authz.cc b/source/extensions/filters/http/ext_authz/ext_authz.cc index a4dac53b8f2ad..c7b4bb98779a7 100644 --- a/source/extensions/filters/http/ext_authz/ext_authz.cc +++ b/source/extensions/filters/http/ext_authz/ext_authz.cc @@ -3,6 +3,7 @@ #include "common/common/assert.h" #include "common/common/enum_to_int.h" #include "common/http/codes.h" +#include "common/http/utility.h" #include "common/router/config_impl.h" #include "extensions/filters/http/well_known_names.h" @@ -12,6 +13,14 @@ namespace Extensions { namespace HttpFilters { namespace ExtAuthz { +struct RcDetailsValues { + // The ext_authz filter denied the downstream request. + const std::string AuthzDenied = "ext_authz_denied"; + // The ext_authz filter encountered a failure, and was configured to fail-closed. + const std::string AuthzError = "ext_authz_error"; +}; +typedef ConstSingleton RcDetails; + void FilterConfigPerRoute::merge(const FilterConfigPerRoute& other) { disabled_ = other.disabled_; auto begin_it = other.context_extensions_.begin(); @@ -49,38 +58,69 @@ void Filter::initiateCall(const Http::HeaderMap& headers) { cfg_base.merge(cfg); }); - Protobuf::Map context_extensions; + Protobuf::Map context_extensions; if (maybe_merged_per_route_config) { context_extensions = maybe_merged_per_route_config.value().takeContextExtensions(); } Filters::Common::ExtAuthz::CheckRequestUtils::createHttpCheck( - callbacks_, headers, std::move(context_extensions), check_request_); + callbacks_, headers, std::move(context_extensions), check_request_, + config_->maxRequestBytes()); + ENVOY_STREAM_LOG(trace, "ext_authz filter calling authorization server", *callbacks_); state_ = State::Calling; - // Don't let the filter chain continue as we are going to invoke check call. - filter_return_ = FilterReturn::StopDecoding; + filter_return_ = FilterReturn::StopDecoding; // Don't let the filter chain continue as we are + // going to invoke check call. initiating_call_ = true; - ENVOY_STREAM_LOG(trace, "ext_authz filter calling authorization server", *callbacks_); client_->check(*this, check_request_, callbacks_->activeSpan()); initiating_call_ = false; } -Http::FilterHeadersStatus Filter::decodeHeaders(Http::HeaderMap& headers, bool) { +Http::FilterHeadersStatus Filter::decodeHeaders(Http::HeaderMap& headers, bool end_stream) { request_headers_ = &headers; + buffer_data_ = config_->withRequestBody() && + !(end_stream || Http::Utility::isWebSocketUpgradeRequest(headers) || + Http::Utility::isH2UpgradeRequest(headers)); + if (buffer_data_) { + ENVOY_STREAM_LOG(debug, "ext_authz filter is buffering the request", *callbacks_); + if (!config_->allowPartialMessage()) { + callbacks_->setDecoderBufferLimit(config_->maxRequestBytes()); + } + return Http::FilterHeadersStatus::StopIteration; + } + initiateCall(headers); - return filter_return_ == FilterReturn::StopDecoding ? Http::FilterHeadersStatus::StopIteration - : Http::FilterHeadersStatus::Continue; + return filter_return_ == FilterReturn::StopDecoding + ? Http::FilterHeadersStatus::StopAllIterationAndWatermark + : Http::FilterHeadersStatus::Continue; } -Http::FilterDataStatus Filter::decodeData(Buffer::Instance&, bool) { - return filter_return_ == FilterReturn::StopDecoding - ? Http::FilterDataStatus::StopIterationAndWatermark - : Http::FilterDataStatus::Continue; +Http::FilterDataStatus Filter::decodeData(Buffer::Instance&, bool end_stream) { + if (buffer_data_) { + if (end_stream || isBufferFull()) { + ENVOY_STREAM_LOG(debug, "ext_authz filter finished buffering the request", *callbacks_); + initiateCall(*request_headers_); + return filter_return_ == FilterReturn::StopDecoding + ? Http::FilterDataStatus::StopIterationAndWatermark + : Http::FilterDataStatus::Continue; + } else { + return Http::FilterDataStatus::StopIterationAndBuffer; + } + } + + return Http::FilterDataStatus::Continue; } Http::FilterTrailersStatus Filter::decodeTrailers(Http::HeaderMap&) { - return filter_return_ == FilterReturn::StopDecoding ? Http::FilterTrailersStatus::StopIteration - : Http::FilterTrailersStatus::Continue; + if (buffer_data_) { + if (filter_return_ != FilterReturn::StopDecoding) { + ENVOY_STREAM_LOG(debug, "ext_authz filter finished buffering the request", *callbacks_); + initiateCall(*request_headers_); + } + return filter_return_ == FilterReturn::StopDecoding ? Http::FilterTrailersStatus::StopIteration + : Http::FilterTrailersStatus::Continue; + } + + return Http::FilterTrailersStatus::Continue; } void Filter::setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) { @@ -122,29 +162,33 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { break; } - ENVOY_STREAM_LOG(trace, "ext_authz received status code {}", *callbacks_, + ENVOY_STREAM_LOG(trace, "ext_authz filter received status code {}", *callbacks_, enumToInt(response->status_code)); // We fail open/fail close based of filter config // if there is an error contacting the service. if (response->status == CheckStatus::Denied || (response->status == CheckStatus::Error && !config_->failureModeAllow())) { - ENVOY_STREAM_LOG(debug, "ext_authz rejected the request", *callbacks_); - ENVOY_STREAM_LOG(trace, "ext_authz downstream header(s):", *callbacks_); - callbacks_->sendLocalReply(response->status_code, response->body, - [& headers = response->headers_to_add, &callbacks = *callbacks_]( - Http::HeaderMap& response_headers) -> void { - for (const auto& header : headers) { - response_headers.remove(header.first); - response_headers.addCopy(header.first, header.second); - ENVOY_STREAM_LOG(trace, " '{}':'{}'", callbacks, - header.first.get(), header.second); - } - }, - absl::nullopt); + ENVOY_STREAM_LOG(debug, "ext_authz filter rejected the request", *callbacks_); + const std::string& details = response->status == CheckStatus::Denied + ? RcDetails::get().AuthzDenied + : RcDetails::get().AuthzError; + callbacks_->sendLocalReply( + response->status_code, response->body, + [& headers = response->headers_to_add, + &callbacks = *callbacks_](Http::HeaderMap& response_headers) -> void { + ENVOY_STREAM_LOG(trace, + "ext_authz filter added header(s) to the local response:", callbacks); + for (const auto& header : headers) { + response_headers.remove(header.first); + response_headers.addCopy(header.first, header.second); + ENVOY_STREAM_LOG(trace, " '{}':'{}'", callbacks, header.first.get(), header.second); + } + }, + absl::nullopt, details); callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::UnauthorizedExternalService); } else { - ENVOY_STREAM_LOG(debug, "ext_authz accepted the request", *callbacks_); + ENVOY_STREAM_LOG(debug, "ext_authz filter accepted the request", *callbacks_); // Let the filter chain continue. filter_return_ = FilterReturn::ContinueDecoding; if (config_->failureModeAllow() && response->status == CheckStatus::Error) { @@ -153,7 +197,13 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { } // Only send headers if the response is ok. if (response->status == CheckStatus::OK) { - ENVOY_STREAM_LOG(trace, "ext_authz upstream header(s):", *callbacks_); + ENVOY_STREAM_LOG(trace, "ext_authz filter added header(s) to the request:", *callbacks_); + if (config_->clearRouteCache() && + (!response->headers_to_add.empty() || !response->headers_to_append.empty())) { + ENVOY_STREAM_LOG(debug, "ext_authz is clearing route cache", *callbacks_); + callbacks_->clearRouteCache(); + } + for (const auto& header : response->headers_to_add) { Http::HeaderEntry* header_to_modify = request_headers_->get(header.first); if (header_to_modify) { @@ -179,6 +229,14 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { } } +bool Filter::isBufferFull() { + const auto* buffer = callbacks_->decodingBuffer(); + if (config_->allowPartialMessage() && buffer != nullptr) { + return buffer->length() >= config_->maxRequestBytes(); + } + return false; +} + } // namespace ExtAuthz } // namespace HttpFilters } // namespace Extensions diff --git a/source/extensions/filters/http/ext_authz/ext_authz.h b/source/extensions/filters/http/ext_authz/ext_authz.h index 1993ec641c18e..e32bea814b3c6 100644 --- a/source/extensions/filters/http/ext_authz/ext_authz.h +++ b/source/extensions/filters/http/ext_authz/ext_authz.h @@ -39,18 +39,35 @@ class FilterConfig { FilterConfig(const envoy::config::filter::http::ext_authz::v2::ExtAuthz& config, const LocalInfo::LocalInfo& local_info, Stats::Scope& scope, Runtime::Loader& runtime, Http::Context& http_context) - : failure_mode_allow_(config.failure_mode_allow()), local_info_(local_info), scope_(scope), - runtime_(runtime), http_context_(http_context) {} + : allow_partial_message_(config.with_request_body().allow_partial_message()), + failure_mode_allow_(config.failure_mode_allow()), + clear_route_cache_(config.clear_route_cache()), + max_request_bytes_(config.with_request_body().max_request_bytes()), local_info_(local_info), + scope_(scope), runtime_(runtime), http_context_(http_context) {} + + bool allowPartialMessage() const { return allow_partial_message_; } + + bool withRequestBody() const { return max_request_bytes_ > 0; } bool failureModeAllow() const { return failure_mode_allow_; } + + bool clearRouteCache() const { return clear_route_cache_; } + + uint32_t maxRequestBytes() const { return max_request_bytes_; } + const LocalInfo::LocalInfo& localInfo() const { return local_info_; } + Runtime::Loader& runtime() { return runtime_; } + Stats::Scope& scope() { return scope_; } Http::Context& httpContext() { return http_context_; } private: - bool failure_mode_allow_{}; + const bool allow_partial_message_; + const bool failure_mode_allow_; + const bool clear_route_cache_; + const uint32_t max_request_bytes_; const LocalInfo::LocalInfo& local_info_; Stats::Scope& scope_; Runtime::Loader& runtime_; @@ -65,7 +82,7 @@ typedef std::shared_ptr FilterConfigSharedPtr; */ class FilterConfigPerRoute : public Router::RouteSpecificFilterConfig { public: - using ContextExtensionsMap = Protobuf::Map; + using ContextExtensionsMap = Protobuf::Map; FilterConfigPerRoute(const envoy::config::filter::http::ext_authz::v2::ExtAuthzPerRoute& config) : context_extensions_(config.has_check_settings() @@ -116,6 +133,8 @@ class Filter : public Logger::Loggable, private: void addResponseHeaders(Http::HeaderMap& header_map, const Http::HeaderVector& headers); + void initiateCall(const Http::HeaderMap& headers); + bool isBufferFull(); // State of this filter's communication with the external authorization service. // The filter has either not started calling the external service, in the middle of calling @@ -127,7 +146,6 @@ class Filter : public Logger::Loggable, // the filter chain should stop. Otherwise the filter chain can continue to the next filter. enum class FilterReturn { ContinueDecoding, StopDecoding }; - void initiateCall(const Http::HeaderMap& headers); Http::HeaderMapPtr getHeaderMap(const Filters::Common::ExtAuthz::ResponsePtr& response); FilterConfigSharedPtr config_; Filters::Common::ExtAuthz::ClientPtr client_; @@ -139,6 +157,7 @@ class Filter : public Logger::Loggable, // Used to identify if the callback to onComplete() is synchronous (on the stack) or asynchronous. bool initiating_call_{}; + bool buffer_data_{}; envoy::service::auth::v2::CheckRequest check_request_{}; }; diff --git a/source/extensions/filters/http/fault/BUILD b/source/extensions/filters/http/fault/BUILD index 847e32b50e922..a5b48b5af103c 100644 --- a/source/extensions/filters/http/fault/BUILD +++ b/source/extensions/filters/http/fault/BUILD @@ -23,13 +23,16 @@ envoy_cc_library( "//include/envoy/runtime:runtime_interface", "//include/envoy/stats:stats_interface", "//include/envoy/stats:stats_macros", + "//source/common/buffer:watermark_buffer_lib", "//source/common/common:assert_lib", "//source/common/common:empty_string", + "//source/common/common:token_bucket_impl_lib", "//source/common/http:codes_lib", "//source/common/http:header_map_lib", "//source/common/http:header_utility_lib", "//source/common/http:headers_lib", "//source/common/protobuf:utility_lib", + "//source/extensions/filters/common/fault:fault_config_lib", "@envoy_api//envoy/config/filter/http/fault/v2:fault_cc", ], ) diff --git a/source/extensions/filters/http/fault/config.cc b/source/extensions/filters/http/fault/config.cc index 726917d282f7c..f0a0c8bd26047 100644 --- a/source/extensions/filters/http/fault/config.cc +++ b/source/extensions/filters/http/fault/config.cc @@ -16,9 +16,9 @@ Http::FilterFactoryCb FaultFilterFactory::createFilterFactoryFromProtoTyped( const envoy::config::filter::http::fault::v2::HTTPFault& config, const std::string& stats_prefix, Server::Configuration::FactoryContext& context) { FaultFilterConfigSharedPtr filter_config(new FaultFilterConfig( - config, context.runtime(), stats_prefix, context.scope(), context.random())); + config, context.runtime(), stats_prefix, context.scope(), context.timeSource())); return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void { - callbacks.addStreamDecoderFilter(std::make_shared(filter_config)); + callbacks.addStreamFilter(std::make_shared(filter_config)); }; } diff --git a/source/extensions/filters/http/fault/fault_filter.cc b/source/extensions/filters/http/fault/fault_filter.cc index 6281c6bda5f9c..ee491a73f695c 100644 --- a/source/extensions/filters/http/fault/fault_filter.cc +++ b/source/extensions/filters/http/fault/fault_filter.cc @@ -26,13 +26,13 @@ namespace Extensions { namespace HttpFilters { namespace Fault { -const std::string FaultFilter::DELAY_PERCENT_KEY = "fault.http.delay.fixed_delay_percent"; -const std::string FaultFilter::ABORT_PERCENT_KEY = "fault.http.abort.abort_percent"; -const std::string FaultFilter::DELAY_DURATION_KEY = "fault.http.delay.fixed_duration_ms"; -const std::string FaultFilter::ABORT_HTTP_STATUS_KEY = "fault.http.abort.http_status"; +struct RcDetailsValues { + // The fault filter injected an abort for this request. + const std::string FaultAbort = "fault_filter_abort"; +}; +typedef ConstSingleton RcDetails; FaultSettings::FaultSettings(const envoy::config::filter::http::fault::v2::HTTPFault& fault) { - if (fault.has_abort()) { const auto& abort = fault.abort(); abort_percentage_ = abort.percentage(); @@ -40,9 +40,8 @@ FaultSettings::FaultSettings(const envoy::config::filter::http::fault::v2::HTTPF } if (fault.has_delay()) { - const auto& delay = fault.delay(); - fixed_delay_percentage_ = delay.percentage(); - fixed_duration_ms_ = PROTOBUF_GET_MS_OR_DEFAULT(delay, fixed_delay, 0); + request_delay_config_ = + std::make_unique(fault.delay()); } for (const Http::HeaderUtility::HeaderData& header_map : fault.headers()) { @@ -54,13 +53,22 @@ FaultSettings::FaultSettings(const envoy::config::filter::http::fault::v2::HTTPF for (const auto& node : fault.downstream_nodes()) { downstream_nodes_.insert(node); } + + if (fault.has_max_active_faults()) { + max_active_faults_ = fault.max_active_faults().value(); + } + + if (fault.has_response_rate_limit()) { + response_rate_limit_ = + std::make_unique(fault.response_rate_limit()); + } } FaultFilterConfig::FaultFilterConfig(const envoy::config::filter::http::fault::v2::HTTPFault& fault, Runtime::Loader& runtime, const std::string& stats_prefix, - Stats::Scope& scope, Runtime::RandomGenerator& generator) + Stats::Scope& scope, TimeSource& time_source) : settings_(fault), runtime_(runtime), stats_(generateStats(stats_prefix, scope)), - stats_prefix_(stats_prefix), scope_(scope), generator_(generator) {} + stats_prefix_(stats_prefix), scope_(scope), time_source_(time_source) {} FaultFilter::FaultFilter(FaultFilterConfigSharedPtr config) : config_(config) {} @@ -76,9 +84,9 @@ Http::FilterHeadersStatus FaultFilter::decodeHeaders(Http::HeaderMap& headers, b // faults. In other words, runtime is supported only when faults are // configured at the filter level. fault_settings_ = config_->settings(); - if (callbacks_->route() && callbacks_->route()->routeEntry()) { + if (decoder_callbacks_->route() && decoder_callbacks_->route()->routeEntry()) { const std::string& name = Extensions::HttpFilters::HttpFilterNames::get().Fault; - const auto* route_entry = callbacks_->route()->routeEntry(); + const auto* route_entry = decoder_callbacks_->route()->routeEntry(); const FaultSettings* tmp = route_entry->perFilterConfigTyped(name); const FaultSettings* per_route_settings = @@ -86,6 +94,10 @@ Http::FilterHeadersStatus FaultFilter::decodeHeaders(Http::HeaderMap& headers, b fault_settings_ = per_route_settings ? per_route_settings : fault_settings_; } + if (faultOverflow()) { + return Http::FilterHeadersStatus::Continue; + } + if (!matchesTargetUpstreamCluster()) { return Http::FilterHeadersStatus::Continue; } @@ -100,7 +112,8 @@ Http::FilterHeadersStatus FaultFilter::decodeHeaders(Http::HeaderMap& headers, b } if (headers.EnvoyDownstreamServiceCluster()) { - downstream_cluster_ = headers.EnvoyDownstreamServiceCluster()->value().c_str(); + downstream_cluster_ = + std::string(headers.EnvoyDownstreamServiceCluster()->value().getStringView()); downstream_cluster_delay_percent_key_ = fmt::format("fault.http.{}.delay.fixed_delay_percent", downstream_cluster_); @@ -112,12 +125,16 @@ Http::FilterHeadersStatus FaultFilter::decodeHeaders(Http::HeaderMap& headers, b fmt::format("fault.http.{}.abort.http_status", downstream_cluster_); } - absl::optional duration_ms = delayDuration(); - if (duration_ms) { - delay_timer_ = callbacks_->dispatcher().createTimer([this]() -> void { postDelayInjection(); }); - delay_timer_->enableTimer(std::chrono::milliseconds(duration_ms.value())); + maybeSetupResponseRateLimit(headers); + + absl::optional duration = delayDuration(headers); + if (duration.has_value()) { + delay_timer_ = + decoder_callbacks_->dispatcher().createTimer([this]() -> void { postDelayInjection(); }); + ENVOY_LOG(debug, "fault: delaying request {}ms", duration.value().count()); + delay_timer_->enableTimer(duration.value()); recordDelaysInjectedStats(); - callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::DelayInjected); + decoder_callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::DelayInjected); return Http::FilterHeadersStatus::StopIteration; } @@ -129,54 +146,104 @@ Http::FilterHeadersStatus FaultFilter::decodeHeaders(Http::HeaderMap& headers, b return Http::FilterHeadersStatus::Continue; } +void FaultFilter::maybeSetupResponseRateLimit(const Http::HeaderMap& request_headers) { + if (fault_settings_->responseRateLimit() == nullptr) { + return; + } + + absl::optional rate_kbps = fault_settings_->responseRateLimit()->rateKbps( + request_headers.get(Filters::Common::Fault::HeaderNames::get().ThroughputResponse)); + if (!rate_kbps.has_value()) { + return; + } + + // TODO(mattklein123): Allow runtime override via downstream cluster similar to the other keys. + if (!config_->runtime().snapshot().featureEnabled( + RuntimeKeys::get().ResponseRateLimitPercentKey, + fault_settings_->responseRateLimit()->percentage())) { + return; + } + + // General stats. All injected faults are considered a single aggregate active fault. + maybeIncActiveFaults(); + config_->stats().response_rl_injected_.inc(); + + response_limiter_ = std::make_unique( + rate_kbps.value(), encoder_callbacks_->encoderBufferLimit(), + [this] { encoder_callbacks_->onEncoderFilterAboveWriteBufferHighWatermark(); }, + [this] { encoder_callbacks_->onEncoderFilterBelowWriteBufferLowWatermark(); }, + [this](Buffer::Instance& data, bool end_stream) { + encoder_callbacks_->injectEncodedDataToFilterChain(data, end_stream); + }, + [this] { encoder_callbacks_->continueEncoding(); }, config_->timeSource(), + decoder_callbacks_->dispatcher()); +} + +bool FaultFilter::faultOverflow() { + const uint64_t max_faults = config_->runtime().snapshot().getInteger( + RuntimeKeys::get().MaxActiveFaultsKey, fault_settings_->maxActiveFaults().has_value() + ? fault_settings_->maxActiveFaults().value() + : std::numeric_limits::max()); + // Note: Since we don't compare/swap here this is a fuzzy limit which is similar to how the + // other circuit breakers work. + if (config_->stats().active_faults_.value() >= max_faults) { + config_->stats().faults_overflow_.inc(); + return true; + } + + return false; +} + bool FaultFilter::isDelayEnabled() { + if (fault_settings_->requestDelay() == nullptr) { + return false; + } + bool enabled = config_->runtime().snapshot().featureEnabled( - DELAY_PERCENT_KEY, fault_settings_->delayPercentage().numerator(), - config_->randomGenerator().random(), - ProtobufPercentHelper::fractionalPercentDenominatorToInt( - fault_settings_->delayPercentage().denominator())); + RuntimeKeys::get().DelayPercentKey, fault_settings_->requestDelay()->percentage()); if (!downstream_cluster_delay_percent_key_.empty()) { enabled |= config_->runtime().snapshot().featureEnabled( - downstream_cluster_delay_percent_key_, fault_settings_->delayPercentage().numerator(), - config_->randomGenerator().random(), - ProtobufPercentHelper::fractionalPercentDenominatorToInt( - fault_settings_->delayPercentage().denominator())); + downstream_cluster_delay_percent_key_, fault_settings_->requestDelay()->percentage()); } return enabled; } bool FaultFilter::isAbortEnabled() { - bool enabled = config_->runtime().snapshot().featureEnabled( - ABORT_PERCENT_KEY, fault_settings_->abortPercentage().numerator(), - config_->randomGenerator().random(), - ProtobufPercentHelper::fractionalPercentDenominatorToInt( - fault_settings_->abortPercentage().denominator())); + bool enabled = config_->runtime().snapshot().featureEnabled(RuntimeKeys::get().AbortPercentKey, + fault_settings_->abortPercentage()); if (!downstream_cluster_abort_percent_key_.empty()) { - enabled |= config_->runtime().snapshot().featureEnabled( - downstream_cluster_abort_percent_key_, fault_settings_->abortPercentage().numerator(), - config_->randomGenerator().random(), - ProtobufPercentHelper::fractionalPercentDenominatorToInt( - fault_settings_->abortPercentage().denominator())); + enabled |= config_->runtime().snapshot().featureEnabled(downstream_cluster_abort_percent_key_, + fault_settings_->abortPercentage()); } return enabled; } -absl::optional FaultFilter::delayDuration() { - absl::optional ret; +absl::optional +FaultFilter::delayDuration(const Http::HeaderMap& request_headers) { + absl::optional ret; if (!isDelayEnabled()) { return ret; } - uint64_t duration = config_->runtime().snapshot().getInteger(DELAY_DURATION_KEY, - fault_settings_->delayDuration()); + // See if the configured delay provider has a default delay, if not there is no delay (e.g., + // header configuration and no/invalid header). + auto config_duration = fault_settings_->requestDelay()->duration( + request_headers.get(Filters::Common::Fault::HeaderNames::get().DelayRequest)); + if (!config_duration.has_value()) { + return ret; + } + + std::chrono::milliseconds duration = + std::chrono::milliseconds(config_->runtime().snapshot().getInteger( + RuntimeKeys::get().DelayDurationKey, config_duration.value().count())); if (!downstream_cluster_delay_duration_key_.empty()) { - duration = - config_->runtime().snapshot().getInteger(downstream_cluster_delay_duration_key_, duration); + duration = std::chrono::milliseconds(config_->runtime().snapshot().getInteger( + downstream_cluster_delay_duration_key_, duration.count())); } // Delay only if the duration is >0ms - if (duration > 0) { + if (duration.count() > 0) { ret = duration; } @@ -185,8 +252,8 @@ absl::optional FaultFilter::delayDuration() { uint64_t FaultFilter::abortHttpStatus() { // TODO(mattklein123): check http status codes obtained from runtime. - uint64_t http_status = - config_->runtime().snapshot().getInteger(ABORT_HTTP_STATUS_KEY, fault_settings_->abortCode()); + uint64_t http_status = config_->runtime().snapshot().getInteger( + RuntimeKeys::get().AbortHttpStatusKey, fault_settings_->abortCode()); if (!downstream_cluster_abort_http_status_key_.empty()) { http_status = config_->runtime().snapshot().getInteger( @@ -205,7 +272,8 @@ void FaultFilter::recordDelaysInjectedStats() { config_->scope().counter(stats_counter).inc(); } - // General stats. + // General stats. All injected faults are considered a single aggregate active fault. + maybeIncActiveFaults(); config_->stats().delays_injected_.inc(); } @@ -218,7 +286,8 @@ void FaultFilter::recordAbortsInjectedStats() { config_->scope().counter(stats_counter).inc(); } - // General stats. + // General stats. All injected faults are considered a single aggregate active fault. + maybeIncActiveFaults(); config_->stats().aborts_injected_.inc(); } @@ -236,11 +305,28 @@ Http::FilterTrailersStatus FaultFilter::decodeTrailers(Http::HeaderMap&) { } FaultFilterStats FaultFilterConfig::generateStats(const std::string& prefix, Stats::Scope& scope) { - std::string final_prefix = prefix + "fault."; - return {ALL_FAULT_FILTER_STATS(POOL_COUNTER_PREFIX(scope, final_prefix))}; + const std::string final_prefix = prefix + "fault."; + return {ALL_FAULT_FILTER_STATS(POOL_COUNTER_PREFIX(scope, final_prefix), + POOL_GAUGE_PREFIX(scope, final_prefix))}; +} + +void FaultFilter::maybeIncActiveFaults() { + // Only charge 1 active fault per filter in case we are injecting multiple faults. + if (fault_active_) { + return; + } + + // TODO(mattklein123): Consider per-fault type active fault gauges. + config_->stats().active_faults_.inc(); + fault_active_ = true; } -void FaultFilter::onDestroy() { resetTimerState(); } +void FaultFilter::onDestroy() { + resetTimerState(); + if (fault_active_) { + config_->stats().active_faults_.dec(); + } +} void FaultFilter::postDelayInjection() { resetTimerState(); @@ -250,14 +336,15 @@ void FaultFilter::postDelayInjection() { abortWithHTTPStatus(); } else { // Continue request processing. - callbacks_->continueDecoding(); + decoder_callbacks_->continueDecoding(); } } void FaultFilter::abortWithHTTPStatus() { - callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::FaultInjected); - callbacks_->sendLocalReply(static_cast(abortHttpStatus()), "fault filter abort", - nullptr, absl::nullopt); + decoder_callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::FaultInjected); + decoder_callbacks_->sendLocalReply(static_cast(abortHttpStatus()), + "fault filter abort", nullptr, absl::nullopt, + RcDetails::get().FaultAbort); recordAbortsInjectedStats(); } @@ -265,7 +352,7 @@ bool FaultFilter::matchesTargetUpstreamCluster() { bool matches = true; if (!fault_settings_->upstreamCluster().empty()) { - Router::RouteConstSharedPtr route = callbacks_->route(); + Router::RouteConstSharedPtr route = decoder_callbacks_->route(); matches = route && route->routeEntry() && (route->routeEntry()->clusterName() == fault_settings_->upstreamCluster()); } @@ -282,7 +369,8 @@ bool FaultFilter::matchesDownstreamNodes(const Http::HeaderMap& headers) { return false; } - const std::string downstream_node = headers.EnvoyDownstreamServiceNode()->value().c_str(); + const absl::string_view downstream_node = + headers.EnvoyDownstreamServiceNode()->value().getStringView(); return fault_settings_->downstreamNodes().find(downstream_node) != fault_settings_->downstreamNodes().end(); } @@ -294,8 +382,112 @@ void FaultFilter::resetTimerState() { } } -void FaultFilter::setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) { - callbacks_ = &callbacks; +Http::FilterDataStatus FaultFilter::encodeData(Buffer::Instance& data, bool end_stream) { + if (response_limiter_ != nullptr) { + response_limiter_->writeData(data, end_stream); + return Http::FilterDataStatus::StopIterationNoBuffer; + } + + return Http::FilterDataStatus::Continue; +} + +Http::FilterTrailersStatus FaultFilter::encodeTrailers(Http::HeaderMap&) { + if (response_limiter_ != nullptr) { + return response_limiter_->onTrailers(); + } + + return Http::FilterTrailersStatus::Continue; +} + +StreamRateLimiter::StreamRateLimiter(uint64_t max_kbps, uint64_t max_buffered_data, + std::function pause_data_cb, + std::function resume_data_cb, + std::function write_data_cb, + std::function continue_cb, TimeSource& time_source, + Event::Dispatcher& dispatcher) + : // bytes_per_time_slice is KiB converted to bytes divided by the number of ticks per second. + bytes_per_time_slice_((max_kbps * 1024) / SecondDivisor), write_data_cb_(write_data_cb), + continue_cb_(continue_cb), + // The token bucket is configured with a max token count of the number of ticks per second, + // and refills at the same rate, so that we have a per second limit which refills gradually in + // ~63ms intervals. + token_bucket_(SecondDivisor, time_source, SecondDivisor), + token_timer_(dispatcher.createTimer([this] { onTokenTimer(); })), + buffer_(resume_data_cb, pause_data_cb) { + ASSERT(bytes_per_time_slice_ > 0); + ASSERT(max_buffered_data > 0); + buffer_.setWatermarks(max_buffered_data); +} + +void StreamRateLimiter::onTokenTimer() { + ENVOY_LOG(trace, "limiter: timer wakeup: buffered={}", buffer_.length()); + Buffer::OwnedImpl data_to_write; + + if (!saw_data_) { + // The first time we see any data on this stream (via writeData()), reset the number of tokens + // to 1. This will ensure that we start pacing the data at the desired rate (and don't send a + // full 1s of data right away which might not introduce enough delay for a stream that doesn't + // have enough data to span more than 1s of rate allowance). Once we reset, we will subsequently + // allow for bursting within the second to account for our data provider being bursty. + token_bucket_.reset(1); + saw_data_ = true; + } + + // Compute the number of tokens needed (rounded up), try to obtain that many tickets, and then + // figure out how many bytes to write given the number of tokens we actually got. + const uint64_t tokens_needed = + (buffer_.length() + bytes_per_time_slice_ - 1) / bytes_per_time_slice_; + const uint64_t tokens_obtained = token_bucket_.consume(tokens_needed, true); + const uint64_t bytes_to_write = + std::min(tokens_obtained * bytes_per_time_slice_, buffer_.length()); + ENVOY_LOG(trace, "limiter: tokens_needed={} tokens_obtained={} to_write={}", tokens_needed, + tokens_obtained, bytes_to_write); + + // Move the data to write into the output buffer with as little copying as possible. + // NOTE: This might be moving zero bytes, but that should work fine. + data_to_write.move(buffer_, bytes_to_write); + + // If the buffer still contains data in it, we couldn't get enough tokens, so schedule the next + // token available time. + if (buffer_.length() > 0) { + const std::chrono::milliseconds ms = token_bucket_.nextTokenAvailable(); + if (ms.count() > 0) { + ENVOY_LOG(trace, "limiter: scheduling wakeup for {}ms", ms.count()); + token_timer_->enableTimer(ms); + } + } + + // Write the data out, indicating end stream if we saw end stream, there is no further data to + // send, and there are no trailers. + write_data_cb_(data_to_write, saw_end_stream_ && buffer_.length() == 0 && !saw_trailers_); + + // If there is no more data to send and we saw trailers, we need to continue iteration to release + // the trailers to further filters. + if (buffer_.length() == 0 && saw_trailers_) { + continue_cb_(); + } +} + +void StreamRateLimiter::writeData(Buffer::Instance& incoming_buffer, bool end_stream) { + ENVOY_LOG(trace, "limiter: incoming data length={} buffered={}", incoming_buffer.length(), + buffer_.length()); + buffer_.move(incoming_buffer); + saw_end_stream_ = end_stream; + if (!token_timer_->enabled()) { + // TODO(mattklein123): In an optimal world we would be able to continue iteration with the data + // we want in the buffer, but have a way to clear end_stream in case we can't send it all. + // The filter API does not currently support that and it will not be a trivial change to add. + // Instead we cheat here by scheduling the token timer to run immediately after the stack is + // unwound, at which point we can directly called encode/decodeData. + token_timer_->enableTimer(std::chrono::milliseconds(0)); + } +} + +Http::FilterTrailersStatus StreamRateLimiter::onTrailers() { + saw_end_stream_ = true; + saw_trailers_ = true; + return buffer_.length() > 0 ? Http::FilterTrailersStatus::StopIteration + : Http::FilterTrailersStatus::Continue; } } // namespace Fault diff --git a/source/extensions/filters/http/fault/fault_filter.h b/source/extensions/filters/http/fault/fault_filter.h index 59f5be4423b8a..2b535edd666e0 100644 --- a/source/extensions/filters/http/fault/fault_filter.h +++ b/source/extensions/filters/http/fault/fault_filter.h @@ -13,8 +13,12 @@ #include "envoy/stats/scope.h" #include "envoy/stats/stats_macros.h" +#include "common/buffer/watermark_buffer.h" +#include "common/common/token_bucket_impl.h" #include "common/http/header_utility.h" +#include "extensions/filters/common/fault/fault_config.h" + namespace Envoy { namespace Extensions { namespace HttpFilters { @@ -24,16 +28,19 @@ namespace Fault { * All stats for the fault filter. @see stats_macros.h */ // clang-format off -#define ALL_FAULT_FILTER_STATS(COUNTER) \ +#define ALL_FAULT_FILTER_STATS(COUNTER, GAUGE) \ COUNTER(delays_injected) \ - COUNTER(aborts_injected) + COUNTER(aborts_injected) \ + COUNTER(response_rl_injected) \ + COUNTER(faults_overflow) \ + GAUGE (active_faults) // clang-format on /** * Wrapper struct for connection manager stats. @see stats_macros.h */ struct FaultFilterStats { - ALL_FAULT_FILTER_STATS(GENERATE_COUNTER_STRUCT) + ALL_FAULT_FILTER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT) }; /** @@ -47,20 +54,26 @@ class FaultSettings : public Router::RouteSpecificFilterConfig { return fault_filter_headers_; } envoy::type::FractionalPercent abortPercentage() const { return abort_percentage_; } - envoy::type::FractionalPercent delayPercentage() const { return fixed_delay_percentage_; } - uint64_t delayDuration() const { return fixed_duration_ms_; } uint64_t abortCode() const { return http_status_; } + const Filters::Common::Fault::FaultDelayConfig* requestDelay() const { + return request_delay_config_.get(); + } const std::string& upstreamCluster() const { return upstream_cluster_; } - const std::unordered_set& downstreamNodes() const { return downstream_nodes_; } + const absl::flat_hash_set& downstreamNodes() const { return downstream_nodes_; } + absl::optional maxActiveFaults() const { return max_active_faults_; } + const Filters::Common::Fault::FaultRateLimitConfig* responseRateLimit() const { + return response_rate_limit_.get(); + } private: envoy::type::FractionalPercent abort_percentage_; uint64_t http_status_{}; // HTTP or gRPC return codes - envoy::type::FractionalPercent fixed_delay_percentage_; - uint64_t fixed_duration_ms_{}; // in milliseconds + Filters::Common::Fault::FaultDelayConfigPtr request_delay_config_; std::string upstream_cluster_; // restrict faults to specific upstream cluster std::vector fault_filter_headers_; - std::unordered_set downstream_nodes_{}; // Inject failures for specific downstream + absl::flat_hash_set downstream_nodes_{}; // Inject failures for specific downstream + absl::optional max_active_faults_; + Filters::Common::Fault::FaultRateLimitConfigPtr response_rate_limit_; }; /** @@ -70,14 +83,14 @@ class FaultFilterConfig { public: FaultFilterConfig(const envoy::config::filter::http::fault::v2::HTTPFault& fault, Runtime::Loader& runtime, const std::string& stats_prefix, Stats::Scope& scope, - Runtime::RandomGenerator& generator); + TimeSource& time_source); Runtime::Loader& runtime() { return runtime_; } FaultFilterStats& stats() { return stats_; } const std::string& statsPrefix() { return stats_prefix_; } Stats::Scope& scope() { return scope_; } const FaultSettings* settings() { return &settings_; } - Runtime::RandomGenerator& randomGenerator() { return generator_; } + TimeSource& timeSource() { return time_source_; } private: static FaultFilterStats generateStats(const std::string& prefix, Stats::Scope& scope); @@ -87,15 +100,67 @@ class FaultFilterConfig { FaultFilterStats stats_; const std::string stats_prefix_; Stats::Scope& scope_; - Runtime::RandomGenerator& generator_; + TimeSource& time_source_; }; typedef std::shared_ptr FaultFilterConfigSharedPtr; +/** + * An HTTP stream rate limiter. Split out for ease of testing and potential code reuse elsewhere. + */ +class StreamRateLimiter : Logger::Loggable { +public: + /** + * @param max_kbps maximum rate in KiB/s. + * @param max_buffered_data maximum data to buffer before invoking the pause callback. + * @param pause_data_cb callback invoked when the limiter has buffered too much data. + * @param resume_data_cb callback invoked when the limiter has gone under the buffer limit. + * @param write_data_cb callback invoked to write data to the stream. + * @param continue_cb callback invoked to continue the stream. This is only used to continue + * trailers that have been paused during body flush. + * @param time_source the time source to run the token bucket with. + * @param dispatcher the stream's dispatcher to use for creating timers. + */ + StreamRateLimiter(uint64_t max_kbps, uint64_t max_buffered_data, + std::function pause_data_cb, std::function resume_data_cb, + std::function write_data_cb, + std::function continue_cb, TimeSource& time_source, + Event::Dispatcher& dispatcher); + + /** + * Called by the stream to write data. All data writes happen asynchronously, the stream should + * be stopped after this call (all data will be drained from incoming_buffer). + */ + void writeData(Buffer::Instance& incoming_buffer, bool end_stream); + + /** + * Called if the stream receives trailers. + */ + Http::FilterTrailersStatus onTrailers(); + +private: + void onTokenTimer(); + + // We currently divide each second into 16 segments for the token bucket. Thus, the rate limit is + // KiB per second, divided into 16 segments, ~63ms apart. 16 is used because it divides into 1024 + // evenly. + static constexpr uint64_t SecondDivisor = 16; + + const uint64_t bytes_per_time_slice_; + const std::function write_data_cb_; + const std::function continue_cb_; + TokenBucketImpl token_bucket_; + Event::TimerPtr token_timer_; + bool saw_data_{}; + bool saw_end_stream_{}; + bool saw_trailers_{}; + Buffer::WatermarkBuffer buffer_; +}; + /** * A filter that is capable of faulting an entire request before dispatching it upstream. */ -class FaultFilter : public Http::StreamDecoderFilter { +class FaultFilter : public Http::StreamFilter, Logger::Loggable { public: FaultFilter(FaultFilterConfigSharedPtr config); ~FaultFilter(); @@ -107,9 +172,40 @@ class FaultFilter : public Http::StreamDecoderFilter { Http::FilterHeadersStatus decodeHeaders(Http::HeaderMap& headers, bool end_stream) override; Http::FilterDataStatus decodeData(Buffer::Instance& data, bool end_stream) override; Http::FilterTrailersStatus decodeTrailers(Http::HeaderMap& trailers) override; - void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override; + void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override { + decoder_callbacks_ = &callbacks; + } + + // Http::StreamEncoderFilter + Http::FilterHeadersStatus encode100ContinueHeaders(Http::HeaderMap&) override { + return Http::FilterHeadersStatus::Continue; + } + Http::FilterHeadersStatus encodeHeaders(Http::HeaderMap&, bool) override { + return Http::FilterHeadersStatus::Continue; + } + Http::FilterDataStatus encodeData(Buffer::Instance& data, bool end_stream) override; + Http::FilterTrailersStatus encodeTrailers(Http::HeaderMap&) override; + Http::FilterMetadataStatus encodeMetadata(Http::MetadataMap&) override { + return Http::FilterMetadataStatus::Continue; + } + void setEncoderFilterCallbacks(Http::StreamEncoderFilterCallbacks& callbacks) override { + encoder_callbacks_ = &callbacks; + } private: + class RuntimeKeyValues { + public: + const std::string DelayPercentKey = "fault.http.delay.fixed_delay_percent"; + const std::string AbortPercentKey = "fault.http.abort.abort_percent"; + const std::string DelayDurationKey = "fault.http.delay.fixed_duration_ms"; + const std::string AbortHttpStatusKey = "fault.http.abort.http_status"; + const std::string MaxActiveFaultsKey = "fault.http.max_active_faults"; + const std::string ResponseRateLimitPercentKey = "fault.http.rate_limit.response_percent"; + }; + + using RuntimeKeys = ConstSingleton; + + bool faultOverflow(); void recordAbortsInjectedStats(); void recordDelaysInjectedStats(); void resetTimerState(); @@ -117,27 +213,25 @@ class FaultFilter : public Http::StreamDecoderFilter { void abortWithHTTPStatus(); bool matchesTargetUpstreamCluster(); bool matchesDownstreamNodes(const Http::HeaderMap& headers); - bool isAbortEnabled(); bool isDelayEnabled(); - absl::optional delayDuration(); + absl::optional delayDuration(const Http::HeaderMap& request_headers); uint64_t abortHttpStatus(); + void maybeIncActiveFaults(); + void maybeSetupResponseRateLimit(const Http::HeaderMap& request_headers); FaultFilterConfigSharedPtr config_; - Http::StreamDecoderFilterCallbacks* callbacks_{}; + Http::StreamDecoderFilterCallbacks* decoder_callbacks_{}; + Http::StreamEncoderFilterCallbacks* encoder_callbacks_{}; Event::TimerPtr delay_timer_; std::string downstream_cluster_{}; const FaultSettings* fault_settings_; - + bool fault_active_{}; + std::unique_ptr response_limiter_; std::string downstream_cluster_delay_percent_key_{}; std::string downstream_cluster_abort_percent_key_{}; std::string downstream_cluster_delay_duration_key_{}; std::string downstream_cluster_abort_http_status_key_{}; - - const static std::string DELAY_PERCENT_KEY; - const static std::string ABORT_PERCENT_KEY; - const static std::string DELAY_DURATION_KEY; - const static std::string ABORT_HTTP_STATUS_KEY; }; } // namespace Fault diff --git a/source/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter.cc b/source/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter.cc index e1b413ccc771d..052eb7538a290 100644 --- a/source/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter.cc +++ b/source/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter.cc @@ -71,7 +71,7 @@ Http::FilterTrailersStatus Http1BridgeFilter::encodeTrailers(Http::HeaderMap& tr const Http::HeaderEntry* grpc_status_header = trailers.GrpcStatus(); if (grpc_status_header) { uint64_t grpc_status_code; - if (!StringUtil::atoul(grpc_status_header->value().c_str(), grpc_status_code) || + if (!absl::SimpleAtoi(grpc_status_header->value().getStringView(), &grpc_status_code) || grpc_status_code != 0) { response_headers_->Status()->value(enumToInt(Http::Code::ServiceUnavailable)); } diff --git a/source/extensions/filters/http/grpc_http1_reverse_bridge/config.cc b/source/extensions/filters/http/grpc_http1_reverse_bridge/config.cc index c7703ba29e1f0..0a012b9116015 100644 --- a/source/extensions/filters/http/grpc_http1_reverse_bridge/config.cc +++ b/source/extensions/filters/http/grpc_http1_reverse_bridge/config.cc @@ -10,8 +10,7 @@ namespace HttpFilters { namespace GrpcHttp1ReverseBridge { Http::FilterFactoryCb Config::createFilterFactoryFromProtoTyped( - const envoy::extensions::filter::http::grpc_http1_reverse_bridge::v2alpha1::FilterConfig& - config, + const envoy::config::filter::http::grpc_http1_reverse_bridge::v2alpha1::FilterConfig& config, const std::string&, Server::Configuration::FactoryContext&) { return [config](Envoy::Http::FilterChainFactoryCallbacks& callbacks) -> void { callbacks.addStreamFilter( diff --git a/source/extensions/filters/http/grpc_http1_reverse_bridge/config.h b/source/extensions/filters/http/grpc_http1_reverse_bridge/config.h index 803f5660e5051..073fbb5e958a0 100644 --- a/source/extensions/filters/http/grpc_http1_reverse_bridge/config.h +++ b/source/extensions/filters/http/grpc_http1_reverse_bridge/config.h @@ -11,15 +11,13 @@ namespace Extensions { namespace HttpFilters { namespace GrpcHttp1ReverseBridge { -class Config - : public Common::FactoryBase< - envoy::extensions::filter::http::grpc_http1_reverse_bridge::v2alpha1::FilterConfig> { +class Config : public Common::FactoryBase< + envoy::config::filter::http::grpc_http1_reverse_bridge::v2alpha1::FilterConfig> { public: Config() : FactoryBase(HttpFilterNames::get().GrpcHttp1ReverseBridge) {} Http::FilterFactoryCb createFilterFactoryFromProtoTyped( - const envoy::extensions::filter::http::grpc_http1_reverse_bridge::v2alpha1::FilterConfig& - config, + const envoy::config::filter::http::grpc_http1_reverse_bridge::v2alpha1::FilterConfig& config, const std::string& stat_prefix, Envoy::Server::Configuration::FactoryContext& context) override; }; diff --git a/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc b/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc index 4bcd711a0cab9..54b1fca59efc6 100644 --- a/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc +++ b/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc @@ -14,6 +14,15 @@ namespace Extensions { namespace HttpFilters { namespace GrpcHttp1ReverseBridge { +struct RcDetailsValues { + // The gRPC HTTP/1 reverse bridge failed because the body payload was too + // small to be a gRPC frame. + const std::string GrpcBridgeFailedTooSmall = "grpc_bridge_data_too_small"; + // The gRPC HTTP/1 bridge encountered an unsupported content type. + const std::string GrpcBridgeFailedContentType = "grpc_bridge_content_type_wrong"; +}; +typedef ConstSingleton RcDetails; + namespace { Grpc::Status::GrpcStatus grpcStatusFromHeaders(Http::HeaderMap& headers) { const auto http_response_status = Http::Utility::getResponseStatus(headers); @@ -33,7 +42,7 @@ void adjustContentLength(Http::HeaderMap& headers, auto length_header = headers.ContentLength(); if (length_header != nullptr) { uint64_t length; - if (StringUtil::atoul(length_header->value().c_str(), length)) { + if (absl::SimpleAtoi(length_header->value().getStringView(), &length)) { length_header->value(adjustment(length)); } } @@ -56,7 +65,7 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::HeaderMap& headers, bool e // We keep track of the original content-type to ensure that we handle // gRPC content type variations such as application/grpc+proto. - content_type_ = headers.ContentType()->value().c_str(); + content_type_ = std::string(headers.ContentType()->value().getStringView()); headers.ContentType()->value(upstream_content_type_); headers.insertAccept().value(upstream_content_type_); @@ -78,7 +87,8 @@ Http::FilterDataStatus Filter::decodeData(Buffer::Instance& buffer, bool) { // Fail the request if the body is too small to possibly contain a gRPC frame. if (buffer.length() < Grpc::GRPC_FRAME_HEADER_SIZE) { decoder_callbacks_->sendLocalReply(Http::Code::OK, "invalid request body", nullptr, - Grpc::Status::GrpcStatus::Unknown); + Grpc::Status::GrpcStatus::Unknown, + RcDetails::get().GrpcBridgeFailedTooSmall); return Http::FilterDataStatus::StopIterationNoBuffer; } @@ -107,6 +117,8 @@ Http::FilterHeadersStatus Filter::encodeHeaders(Http::HeaderMap& headers, bool) headers.insertStatus().value(enumToInt(Http::Code::OK)); content_type->value(content_type_); + decoder_callbacks_->streamInfo().setResponseCodeDetails( + RcDetails::get().GrpcBridgeFailedContentType); return Http::FilterHeadersStatus::ContinueAndEndStream; } diff --git a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc index 95efe9885e83a..85f227845cb6a 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc +++ b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc @@ -14,6 +14,7 @@ #include "common/http/headers.h" #include "common/http/utility.h" #include "common/protobuf/protobuf.h" +#include "common/protobuf/utility.h" #include "google/api/annotations.pb.h" #include "google/api/http.pb.h" @@ -28,6 +29,7 @@ using Envoy::Protobuf::FileDescriptorSet; using Envoy::Protobuf::io::ZeroCopyInputStream; using Envoy::ProtobufUtil::Status; using Envoy::ProtobufUtil::error::Code; +using google::api::HttpRule; using google::grpc::transcoding::JsonRequestTranslator; using google::grpc::transcoding::PathMatcherBuilder; using google::grpc::transcoding::PathMatcherUtility; @@ -41,8 +43,17 @@ namespace Extensions { namespace HttpFilters { namespace GrpcJsonTranscoder { -namespace { +struct RcDetailsValues { + // The gRPC json transcoder filter failed to transcode when processing request headers. + // This will generally be accompanied by details about the transcoder failure. + const std::string GrpcTranscodeFailedEarly = "early_grpc_json_transcode_failure"; + // The gRPC json transcoder filter failed to transcode when processing the request body. + // This will generally be accompanied by details about the transcoder failure. + const std::string GrpcTranscodeFailed = "grpc_json_transcode_failure"; +}; +typedef ConstSingleton RcDetails; +namespace { // Transcoder: // https://github.com/grpc-ecosystem/grpc-httpjson-transcoding/blob/master/src/include/grpc_transcoding/transcoder.h // implementation based on JsonRequestTranslator & ResponseToJsonTranslator @@ -106,7 +117,7 @@ JsonTranscoderConfig::JsonTranscoderConfig( } PathMatcherBuilder pmb; - std::unordered_set ignored_query_parameters; + std::unordered_set ignored_query_parameters; for (const auto& query_param : proto_config.ignored_query_parameters()) { ignored_query_parameters.insert(query_param); } @@ -119,9 +130,18 @@ JsonTranscoderConfig::JsonTranscoderConfig( } for (int i = 0; i < service->method_count(); ++i) { auto method = service->method(i); - if (!PathMatcherUtility::RegisterByHttpRule(pmb, - method->options().GetExtension(google::api::http), - ignored_query_parameters, method)) { + + HttpRule http_rule; + if (method->options().HasExtension(google::api::http)) { + http_rule = method->options().GetExtension(google::api::http); + } else if (proto_config.auto_mapping()) { + auto post = "/" + service->full_name() + "/" + method->name(); + http_rule.set_post(post); + http_rule.set_body("*"); + } + + if (!PathMatcherUtility::RegisterByHttpRule(pmb, http_rule, ignored_query_parameters, + method)) { throw EnvoyException("transcoding_filter: Cannot register '" + method->full_name() + "' to path matcher"); } @@ -155,9 +175,9 @@ ProtobufUtil::Status JsonTranscoderConfig::createTranscoder( return ProtobufUtil::Status(Code::INVALID_ARGUMENT, "Request headers has application/grpc content-type"); } - const ProtobufTypes::String method = headers.Method()->value().c_str(); - ProtobufTypes::String path = headers.Path()->value().c_str(); - ProtobufTypes::String args; + const std::string method(headers.Method()->value().getStringView()); + std::string path(headers.Path()->value().getStringView()); + std::string args; const size_t pos = path.find('?'); if (pos != std::string::npos) { @@ -252,10 +272,13 @@ Http::FilterHeadersStatus JsonTranscoderFilter::decodeHeaders(Http::HeaderMap& h if (!request_status.ok()) { ENVOY_LOG(debug, "Transcoding request error {}", request_status.ToString()); error_ = true; - decoder_callbacks_->sendLocalReply(Http::Code::BadRequest, - absl::string_view(request_status.error_message().data(), - request_status.error_message().size()), - nullptr, absl::nullopt); + decoder_callbacks_->sendLocalReply( + Http::Code::BadRequest, + absl::string_view(request_status.error_message().data(), + request_status.error_message().size()), + nullptr, absl::nullopt, + absl::StrCat(RcDetails::get().GrpcTranscodeFailedEarly, "{", + MessageUtil::CodeEnumToString(request_status.code()), "}")); return Http::FilterHeadersStatus::StopIteration; } @@ -290,10 +313,13 @@ Http::FilterDataStatus JsonTranscoderFilter::decodeData(Buffer::Instance& data, if (!request_status.ok()) { ENVOY_LOG(debug, "Transcoding request error {}", request_status.ToString()); error_ = true; - decoder_callbacks_->sendLocalReply(Http::Code::BadRequest, - absl::string_view(request_status.error_message().data(), - request_status.error_message().size()), - nullptr, absl::nullopt); + decoder_callbacks_->sendLocalReply( + Http::Code::BadRequest, + absl::string_view(request_status.error_message().data(), + request_status.error_message().size()), + nullptr, absl::nullopt, + absl::StrCat(RcDetails::get().GrpcTranscodeFailed, "{", + MessageUtil::CodeEnumToString(request_status.code()), "}")); return Http::FilterDataStatus::StopIterationNoBuffer; } @@ -457,9 +483,7 @@ void JsonTranscoderFilter::buildResponseFromHttpBodyOutput(Http::HeaderMap& resp http_body.ParseFromZeroCopyStream(&stream); const auto& body = http_body.data(); - // TODO(mrice32): This string conversion is currently required because body has a different - // type within Google. Remove when the string types merge. - data.add(ProtobufTypes::String(body)); + data.add(body); response_headers.insertContentType().value(http_body.content_type()); response_headers.insertContentLength().value(body.size()); diff --git a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.h b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.h index c9eeb59791d04..48b91b922bda3 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.h +++ b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.h @@ -35,9 +35,9 @@ struct VariableBinding { // The location of the field in the protobuf message, where the value // needs to be inserted, e.g. "shelf.theme" would mean the "theme" field // of the nested "shelf" message of the request protobuf message. - std::vector field_path; + std::vector field_path; // The value to be inserted. - ProtobufTypes::String value; + std::string value; }; /** diff --git a/source/extensions/filters/http/grpc_web/grpc_web_filter.cc b/source/extensions/filters/http/grpc_web/grpc_web_filter.cc index d6dbccefb88a3..88cf127a936c3 100644 --- a/source/extensions/filters/http/grpc_web/grpc_web_filter.cc +++ b/source/extensions/filters/http/grpc_web/grpc_web_filter.cc @@ -15,12 +15,20 @@ namespace Extensions { namespace HttpFilters { namespace GrpcWeb { +struct RcDetailsValues { + // The grpc web filter couldn't decode the data as the size wasn't a multiple of 4. + const std::string GrpcDecodeFailedDueToSize = "grpc_base_64_decode_failed_bad_size"; + // The grpc web filter couldn't decode the data provided. + const std::string GrpcDecodeFailedDueToData = "grpc_base_64_decode_failed"; +}; +typedef ConstSingleton RcDetails; + // Bit mask denotes a trailers frame of gRPC-Web. const uint8_t GrpcWebFilter::GRPC_WEB_TRAILER = 0b10000000; // Supported gRPC-Web content-types. -const std::unordered_set& GrpcWebFilter::gRpcWebContentTypes() const { - static const std::unordered_set* types = new std::unordered_set( +const absl::flat_hash_set& GrpcWebFilter::gRpcWebContentTypes() const { + static const absl::flat_hash_set* types = new absl::flat_hash_set( {Http::Headers::get().ContentTypeValues.GrpcWeb, Http::Headers::get().ContentTypeValues.GrpcWebProto, Http::Headers::get().ContentTypeValues.GrpcWebText, @@ -31,7 +39,7 @@ const std::unordered_set& GrpcWebFilter::gRpcWebContentTypes() cons bool GrpcWebFilter::isGrpcWebRequest(const Http::HeaderMap& headers) { const Http::HeaderEntry* content_type = headers.ContentType(); if (content_type != nullptr) { - return gRpcWebContentTypes().count(content_type->value().c_str()) > 0; + return gRpcWebContentTypes().count(content_type->value().getStringView()) > 0; } return false; } @@ -51,9 +59,10 @@ Http::FilterHeadersStatus GrpcWebFilter::decodeHeaders(Http::HeaderMap& headers, headers.removeContentLength(); setupStatTracking(headers); - if (content_type != nullptr && - (Http::Headers::get().ContentTypeValues.GrpcWebText == content_type->value().c_str() || - Http::Headers::get().ContentTypeValues.GrpcWebTextProto == content_type->value().c_str())) { + if (content_type != nullptr && (Http::Headers::get().ContentTypeValues.GrpcWebText == + content_type->value().getStringView() || + Http::Headers::get().ContentTypeValues.GrpcWebTextProto == + content_type->value().getStringView())) { // Checks whether gRPC-Web client is sending base64 encoded request. is_text_request_ = true; } @@ -61,8 +70,9 @@ Http::FilterHeadersStatus GrpcWebFilter::decodeHeaders(Http::HeaderMap& headers, const Http::HeaderEntry* accept = headers.Accept(); if (accept != nullptr && - (Http::Headers::get().ContentTypeValues.GrpcWebText == accept->value().c_str() || - Http::Headers::get().ContentTypeValues.GrpcWebTextProto == accept->value().c_str())) { + (Http::Headers::get().ContentTypeValues.GrpcWebText == accept->value().getStringView() || + Http::Headers::get().ContentTypeValues.GrpcWebTextProto == + accept->value().getStringView())) { // Checks whether gRPC-Web client is asking for base64 encoded response. is_text_response_ = true; } @@ -95,7 +105,7 @@ Http::FilterDataStatus GrpcWebFilter::decodeData(Buffer::Instance& data, bool en // Client end stream with invalid base64. Note, base64 padding is mandatory. decoder_callbacks_->sendLocalReply(Http::Code::BadRequest, "Bad gRPC-web request, invalid base64 data.", nullptr, - absl::nullopt); + absl::nullopt, RcDetails::get().GrpcDecodeFailedDueToSize); return Http::FilterDataStatus::StopIterationNoBuffer; } } else if (available < 4) { @@ -112,7 +122,7 @@ Http::FilterDataStatus GrpcWebFilter::decodeData(Buffer::Instance& data, bool en // Error happened when decoding base64. decoder_callbacks_->sendLocalReply(Http::Code::BadRequest, "Bad gRPC-web request, invalid base64 data.", nullptr, - absl::nullopt); + absl::nullopt, RcDetails::get().GrpcDecodeFailedDueToData); return Http::FilterDataStatus::StopIterationNoBuffer; } @@ -192,9 +202,9 @@ Http::FilterTrailersStatus GrpcWebFilter::encodeTrailers(Http::HeaderMap& traile trailers.iterate( [](const Http::HeaderEntry& header, void* context) -> Http::HeaderMap::Iterate { Buffer::Instance* temp = static_cast(context); - temp->add(header.key().c_str(), header.key().size()); + temp->add(header.key().getStringView().data(), header.key().size()); temp->add(":"); - temp->add(header.value().c_str(), header.value().size()); + temp->add(header.value().getStringView().data(), header.value().size()); temp->add("\r\n"); return Http::HeaderMap::Iterate::Continue; }, diff --git a/source/extensions/filters/http/grpc_web/grpc_web_filter.h b/source/extensions/filters/http/grpc_web/grpc_web_filter.h index 5ed34171966b2..07b5e3187a181 100644 --- a/source/extensions/filters/http/grpc_web/grpc_web_filter.h +++ b/source/extensions/filters/http/grpc_web/grpc_web_filter.h @@ -56,7 +56,7 @@ class GrpcWebFilter : public Http::StreamFilter, NonCopyable { bool isGrpcWebRequest(const Http::HeaderMap& headers); static const uint8_t GRPC_WEB_TRAILER; - const std::unordered_set& gRpcWebContentTypes() const; + const absl::flat_hash_set& gRpcWebContentTypes() const; Upstream::ClusterInfoConstSharedPtr cluster_; Http::StreamDecoderFilterCallbacks* decoder_callbacks_{}; diff --git a/source/extensions/filters/http/gzip/gzip_filter.cc b/source/extensions/filters/http/gzip/gzip_filter.cc index e674930a664a4..883cf928e6410 100644 --- a/source/extensions/filters/http/gzip/gzip_filter.cc +++ b/source/extensions/filters/http/gzip/gzip_filter.cc @@ -83,8 +83,8 @@ Compressor::ZlibCompressorImpl::CompressionStrategy GzipFilterConfig::compressio } } -StringUtil::CaseUnorderedSet GzipFilterConfig::contentTypeSet( - const Protobuf::RepeatedPtrField& types) { +StringUtil::CaseUnorderedSet +GzipFilterConfig::contentTypeSet(const Protobuf::RepeatedPtrField& types) { return types.empty() ? StringUtil::CaseUnorderedSet(defaultContentEncoding().begin(), defaultContentEncoding().end()) : StringUtil::CaseUnorderedSet(types.cbegin(), types.cend()); @@ -149,8 +149,8 @@ Http::FilterDataStatus GzipFilter::encodeData(Buffer::Instance& data, bool end_s bool GzipFilter::hasCacheControlNoTransform(Http::HeaderMap& headers) const { const Http::HeaderEntry* cache_control = headers.CacheControl(); if (cache_control) { - return StringUtil::caseFindToken(cache_control->value().c_str(), ",", - Http::Headers::get().CacheControlValues.NoTransform.c_str()); + return StringUtil::caseFindToken(cache_control->value().getStringView(), ",", + Http::Headers::get().CacheControlValues.NoTransform); } return false; @@ -166,8 +166,8 @@ bool GzipFilter::isAcceptEncodingAllowed(Http::HeaderMap& headers) const { if (accept_encoding) { bool is_wildcard = false; // true if found and not followed by `q=0`. - for (const auto token : StringUtil::splitToken(headers.AcceptEncoding()->value().c_str(), ",", - false /* keep_empty */)) { + for (const auto token : StringUtil::splitToken( + headers.AcceptEncoding()->value().getStringView(), ",", false /* keep_empty */)) { const auto value = StringUtil::trim(StringUtil::cropRight(token, ";")); const auto q_value = StringUtil::trim(StringUtil::cropLeft(token, ";")); // If value is the gzip coding, check the qvalue and return. @@ -211,7 +211,8 @@ bool GzipFilter::isAcceptEncodingAllowed(Http::HeaderMap& headers) const { bool GzipFilter::isContentTypeAllowed(Http::HeaderMap& headers) const { const Http::HeaderEntry* content_type = headers.ContentType(); if (content_type && !config_->contentTypeValues().empty()) { - std::string value{StringUtil::trim(StringUtil::cropRight(content_type->value().c_str(), ";"))}; + const absl::string_view value = + StringUtil::trim(StringUtil::cropRight(content_type->value().getStringView(), ";")); return config_->contentTypeValues().find(value) != config_->contentTypeValues().end(); } @@ -231,7 +232,7 @@ bool GzipFilter::isMinimumContentLength(Http::HeaderMap& headers) const { if (content_length) { uint64_t length; const bool is_minimum_content_length = - StringUtil::atoul(content_length->value().c_str(), length) && + absl::SimpleAtoi(content_length->value().getStringView(), &length) && length >= config_->minimumLength(); if (!is_minimum_content_length) { config_->stats().content_length_too_small_.inc(); @@ -241,8 +242,8 @@ bool GzipFilter::isMinimumContentLength(Http::HeaderMap& headers) const { const Http::HeaderEntry* transfer_encoding = headers.TransferEncoding(); return (transfer_encoding && - StringUtil::caseFindToken(transfer_encoding->value().c_str(), ",", - Http::Headers::get().TransferEncodingValues.Chunked.c_str())); + StringUtil::caseFindToken(transfer_encoding->value().getStringView(), ",", + Http::Headers::get().TransferEncodingValues.Chunked)); } bool GzipFilter::isTransferEncodingAllowed(Http::HeaderMap& headers) const { @@ -251,7 +252,7 @@ bool GzipFilter::isTransferEncodingAllowed(Http::HeaderMap& headers) const { for (auto header_value : // TODO(gsagula): add Http::HeaderMap::string_view() so string length doesn't need to be // computed twice. Find all other sites where this can be improved. - StringUtil::splitToken(transfer_encoding->value().c_str(), ",", true)) { + StringUtil::splitToken(transfer_encoding->value().getStringView(), ",", true)) { const auto trimmed_value = StringUtil::trim(header_value); if (StringUtil::caseCompare(trimmed_value, Http::Headers::get().TransferEncodingValues.Gzip) || @@ -268,10 +269,10 @@ bool GzipFilter::isTransferEncodingAllowed(Http::HeaderMap& headers) const { void GzipFilter::insertVaryHeader(Http::HeaderMap& headers) { const Http::HeaderEntry* vary = headers.Vary(); if (vary) { - if (!StringUtil::findToken(vary->value().c_str(), ",", + if (!StringUtil::findToken(vary->value().getStringView(), ",", Http::Headers::get().VaryValues.AcceptEncoding, true)) { std::string new_header; - absl::StrAppend(&new_header, vary->value().c_str(), ", ", + absl::StrAppend(&new_header, vary->value().getStringView(), ", ", Http::Headers::get().VaryValues.AcceptEncoding); headers.insertVary().value(new_header); } @@ -288,7 +289,7 @@ void GzipFilter::insertVaryHeader(Http::HeaderMap& headers) { void GzipFilter::sanitizeEtagHeader(Http::HeaderMap& headers) { const Http::HeaderEntry* etag = headers.Etag(); if (etag) { - absl::string_view value(etag->value().c_str()); + absl::string_view value(etag->value().getStringView()); if (value.length() > 2 && !((value[0] == 'w' || value[0] == 'W') && value[1] == '/')) { headers.removeEtag(); } diff --git a/source/extensions/filters/http/gzip/gzip_filter.h b/source/extensions/filters/http/gzip/gzip_filter.h index 9fdcab98a9d33..d60f0b16d77be 100644 --- a/source/extensions/filters/http/gzip/gzip_filter.h +++ b/source/extensions/filters/http/gzip/gzip_filter.h @@ -84,7 +84,7 @@ class GzipFilterConfig { static Compressor::ZlibCompressorImpl::CompressionStrategy compressionStrategyEnum( envoy::config::filter::http::gzip::v2::Gzip_CompressionStrategy compression_strategy); static StringUtil::CaseUnorderedSet - contentTypeSet(const Protobuf::RepeatedPtrField& types); + contentTypeSet(const Protobuf::RepeatedPtrField& types); static uint64_t contentLengthUint(Protobuf::uint32 length); static uint64_t memoryLevelUint(Protobuf::uint32 level); diff --git a/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc b/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc index 8075c0b0ff148..c7ccf19b54c14 100644 --- a/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc +++ b/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc @@ -30,7 +30,7 @@ Config::Config(const envoy::config::filter::http::header_to_metadata::v2::Config bool Config::configToVector(const ProtobufRepeatedRule& proto_rules, HeaderToMetadataRules& vector) { - if (proto_rules.size() == 0) { + if (proto_rules.empty()) { ENVOY_LOG(debug, "no rules provided"); return false; } @@ -101,7 +101,7 @@ bool HeaderToMetadataFilter::addMetadata(StructMap& map, const std::string& meta // Sane enough, add the key/value. switch (type) { case envoy::config::filter::http::header_to_metadata::v2::Config_ValueType_STRING: - val.set_string_value(ProtobufTypes::String(value)); + val.set_string_value(std::string(value)); break; case envoy::config::filter::http::header_to_metadata::v2::Config_ValueType_NUMBER: { double dval; @@ -174,7 +174,7 @@ void HeaderToMetadataFilter::writeHeaderToMetadata(Http::HeaderMap& headers, } // Any matching rules? - if (structs_by_namespace.size() > 0) { + if (!structs_by_namespace.empty()) { for (auto const& entry : structs_by_namespace) { callbacks.streamInfo().setDynamicMetadata(entry.first, entry.second); } diff --git a/source/extensions/filters/http/health_check/health_check.cc b/source/extensions/filters/http/health_check/health_check.cc index f48036863048d..aa704773706ff 100644 --- a/source/extensions/filters/http/health_check/health_check.cc +++ b/source/extensions/filters/http/health_check/health_check.cc @@ -20,6 +20,24 @@ namespace Extensions { namespace HttpFilters { namespace HealthCheck { +struct RcDetailsValues { + // The health check filter returned healthy to a health check. + const std::string HealthCheckOk = "health_check_ok"; + // The health check filter responded with a failed health check. + const std::string HealthCheckFailed = "health_check_failed"; + // The health check filter returned a cached health value. + const std::string HealthCheckCached = "health_check_cached"; + // The health check filter failed due to health checking a nonexistent cluster. + const std::string HealthCheckNoCluster = "health_check_failed_no_cluster_found"; + // The health check filter failed due to checking min_degraded against an empty cluster. + const std::string HealthCheckClusterEmpty = "health_check_failed_cluster_empty"; + // The health check filter succeeded given the cluster health was sufficient. + const std::string HealthCheckClusterHealthy = "health_check_ok_cluster_healthy"; + // The health check filter failed given the cluster health was not sufficient. + const std::string HealthCheckClusterUnhealthy = "health_check_failed_cluster_unhealthy"; +}; +typedef ConstSingleton RcDetails; + HealthCheckCacheManager::HealthCheckCacheManager(Event::Dispatcher& dispatcher, std::chrono::milliseconds timeout) : clear_cache_timer_(dispatcher.createTimer([this]() -> void { onTimer(); })), @@ -97,26 +115,32 @@ Http::FilterHeadersStatus HealthCheckFilter::encodeHeaders(Http::HeaderMap& head void HealthCheckFilter::onComplete() { ASSERT(handling_); Http::Code final_status = Http::Code::OK; + const std::string* details = &RcDetails::get().HealthCheckOk; bool degraded = false; if (context_.healthCheckFailed()) { callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::FailedLocalHealthCheck); final_status = Http::Code::ServiceUnavailable; + details = &RcDetails::get().HealthCheckFailed; } else { if (cache_manager_) { const auto status_and_degraded = cache_manager_->getCachedResponse(); final_status = status_and_degraded.first; + details = &RcDetails::get().HealthCheckCached; degraded = status_and_degraded.second; } else if (cluster_min_healthy_percentages_ != nullptr && !cluster_min_healthy_percentages_->empty()) { // Check the status of the specified upstream cluster(s) to determine the right response. auto& clusterManager = context_.clusterManager(); for (const auto& item : *cluster_min_healthy_percentages_) { + details = &RcDetails::get().HealthCheckClusterHealthy; const std::string& cluster_name = item.first; const double min_healthy_percentage = item.second; auto* cluster = clusterManager.get(cluster_name); if (cluster == nullptr) { // If the cluster does not exist at all, consider the service unhealthy. final_status = Http::Code::ServiceUnavailable; + details = &RcDetails::get().HealthCheckNoCluster; + break; } const auto& stats = cluster->info()->stats(); @@ -128,15 +152,18 @@ void HealthCheckFilter::onComplete() { continue; } else { final_status = Http::Code::ServiceUnavailable; + details = &RcDetails::get().HealthCheckClusterEmpty; break; } } // In the general case, consider the service unhealthy if fewer than the - // specified percentage of the servers in the cluster are healthy. + // specified percentage of the servers in the cluster are available (healthy + degraded). // TODO(brian-pane) switch to purely integer-based math here, because the // int-to-float conversions and floating point division are slow. - if (stats.membership_healthy_.value() < membership_total * min_healthy_percentage / 100.0) { + if ((stats.membership_healthy_.value() + stats.membership_degraded_.value()) < + membership_total * min_healthy_percentage / 100.0) { final_status = Http::Code::ServiceUnavailable; + details = &RcDetails::get().HealthCheckClusterUnhealthy; break; } } @@ -147,13 +174,14 @@ void HealthCheckFilter::onComplete() { } } - callbacks_->sendLocalReply(final_status, "", - [degraded](auto& headers) { - if (degraded) { - headers.insertEnvoyDegraded(); - } - }, - absl::nullopt); + callbacks_->sendLocalReply( + final_status, "", + [degraded](auto& headers) { + if (degraded) { + headers.insertEnvoyDegraded(); + } + }, + absl::nullopt, *details); } } // namespace HealthCheck diff --git a/source/extensions/filters/http/jwt_authn/BUILD b/source/extensions/filters/http/jwt_authn/BUILD index c2db41e23144c..b566159039b3c 100644 --- a/source/extensions/filters/http/jwt_authn/BUILD +++ b/source/extensions/filters/http/jwt_authn/BUILD @@ -100,6 +100,7 @@ envoy_cc_library( deps = [ ":jwks_cache_lib", ":matchers_lib", + "//include/envoy/router:string_accessor_interface", "//include/envoy/server:filter_config_interface", "//include/envoy/stats:stats_macros", "//include/envoy/thread_local:thread_local_interface", diff --git a/source/extensions/filters/http/jwt_authn/extractor.cc b/source/extensions/filters/http/jwt_authn/extractor.cc index 979275981d735..b589c2c8bb716 100644 --- a/source/extensions/filters/http/jwt_authn/extractor.cc +++ b/source/extensions/filters/http/jwt_authn/extractor.cc @@ -102,6 +102,10 @@ class ExtractorImpl : public Extractor { // ctor helper for a jwt provider config void addProvider(const JwtProvider& provider); + // @return what should be the 3-part base64url-encoded substring; see RFC-7519 + absl::string_view extractJWT(absl::string_view value_str, + absl::string_view::size_type after) const; + // HeaderMap value type to store prefix and issuers that specified this // header. struct HeaderLocationSpec { @@ -181,11 +185,12 @@ std::vector ExtractorImpl::extract(const Http::HeaderMap& h if (entry) { auto value_str = entry->value().getStringView(); if (!location_spec->value_prefix_.empty()) { - if (!absl::StartsWith(value_str, location_spec->value_prefix_)) { - // prefix doesn't match, skip it. + const auto pos = value_str.find(location_spec->value_prefix_); + if (pos == absl::string_view::npos) { + // value_prefix not found anywhere in value_str, so skip continue; } - value_str = value_str.substr(location_spec->value_prefix_.size()); + value_str = extractJWT(value_str, pos + location_spec->value_prefix_.length()); } tokens.push_back(std::make_unique( std::string(value_str), location_spec->specified_issuers_, location_spec->header_)); @@ -198,7 +203,7 @@ std::vector ExtractorImpl::extract(const Http::HeaderMap& h } // Check query parameter locations. - const auto& params = Http::Utility::parseQueryString(headers.Path()->value().c_str()); + const auto& params = Http::Utility::parseQueryString(headers.Path()->value().getStringView()); for (const auto& location_it : param_locations_) { const auto& param_key = location_it.first; const auto& location_spec = location_it.second; @@ -211,6 +216,36 @@ std::vector ExtractorImpl::extract(const Http::HeaderMap& h return tokens; } +// as specified in RFC-4648 § 5, plus dot (period, 0x2e), of which two are required in the JWT +constexpr absl::string_view ConstantBase64UrlEncodingCharsPlusDot = + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_."; + +// Returns a token, not a URL: skips non-Base64Url-legal (or dot) characters, collects following +// Base64Url+dot string until first non-Base64Url char. +// +// The input parameters: +// "value_str" - the header value string, perhaps "Bearer string....", and +// "after" - the offset into that string after which to begin looking for JWT-legal characters +// +// For backwards compatibility, if it finds no suitable string, it returns value_str as-is. +// +// It is forgiving w.r.t. dots/periods, as the exact syntax will be verified after extraction. +// +// See RFC-7519 § 2, RFC-7515 § 2, and RFC-4648 "Base-N Encodings" § 5. +absl::string_view ExtractorImpl::extractJWT(absl::string_view value_str, + absl::string_view::size_type after) const { + const auto starting = value_str.find_first_of(ConstantBase64UrlEncodingCharsPlusDot, after); + if (starting == value_str.npos) { + return value_str; + } + // There should be two dots (periods; 0x2e) inside the string, but we don't verify that here + auto ending = value_str.find_first_not_of(ConstantBase64UrlEncodingCharsPlusDot, starting); + if (ending == value_str.npos) { // Base64Url-encoded string occupies the rest of the line + return value_str.substr(starting); + } + return value_str.substr(starting, ending - starting); +} + void ExtractorImpl::sanitizePayloadHeaders(Http::HeaderMap& headers) const { for (const auto& header : forward_payload_headers_) { headers.remove(header); diff --git a/source/extensions/filters/http/jwt_authn/filter.cc b/source/extensions/filters/http/jwt_authn/filter.cc index 57e4e4347b788..bae13d7825ec0 100644 --- a/source/extensions/filters/http/jwt_authn/filter.cc +++ b/source/extensions/filters/http/jwt_authn/filter.cc @@ -11,6 +11,12 @@ namespace Extensions { namespace HttpFilters { namespace JwtAuthn { +struct RcDetailsValues { + // The jwt_authn filter rejected the request + const std::string JwtAuthnAccessDenied = "jwt_authn_access_denied"; +}; +typedef ConstSingleton RcDetails; + Filter::Filter(FilterConfigSharedPtr config) : stats_(config->stats()), config_(config) {} void Filter::onDestroy() { @@ -26,7 +32,8 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::HeaderMap& headers, bool) state_ = Calling; stopped_ = false; // Verify the JWT token, onComplete() will be called when completed. - const auto* verifier = config_->findVerifier(headers); + const auto* verifier = + config_->findVerifier(headers, decoder_callbacks_->streamInfo().filterState()); if (!verifier) { onComplete(Status::Ok); } else { @@ -59,7 +66,7 @@ void Filter::onComplete(const Status& status) { Http::Code code = Http::Code::Unauthorized; // return failure reason as message body decoder_callbacks_->sendLocalReply(code, ::google::jwt_verify::getStatusString(status), nullptr, - absl::nullopt); + absl::nullopt, RcDetails::get().JwtAuthnAccessDenied); return; } stats_.allowed_.inc(); diff --git a/source/extensions/filters/http/jwt_authn/filter_config.h b/source/extensions/filters/http/jwt_authn/filter_config.h index 0b31217fd009a..622a1e50f5c4d 100644 --- a/source/extensions/filters/http/jwt_authn/filter_config.h +++ b/source/extensions/filters/http/jwt_authn/filter_config.h @@ -1,6 +1,7 @@ #pragma once #include "envoy/api/api.h" +#include "envoy/router/string_accessor.h" #include "envoy/server/filter_config.h" #include "envoy/stats/scope.h" #include "envoy/stats/stats_macros.h" @@ -9,6 +10,8 @@ #include "extensions/filters/http/jwt_authn/matcher.h" #include "extensions/filters/http/jwt_authn/verifier.h" +#include "absl/container/flat_hash_map.h" + namespace Envoy { namespace Extensions { namespace HttpFilters { @@ -65,7 +68,7 @@ class FilterConfig : public Logger::Loggable, public AuthFac const std::string& stats_prefix, Server::Configuration::FactoryContext& context) : proto_config_(proto_config), stats_(generateStats(stats_prefix, context.scope())), tls_(context.threadLocal().allocateSlot()), cm_(context.clusterManager()), - time_source_(context.dispatcher().timeSystem()), api_(context.api()) { + time_source_(context.dispatcher().timeSource()), api_(context.api()) { ENVOY_LOG(info, "Loaded JwtAuthConfig: {}", proto_config_.DebugString()); tls_->set([this](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr { return std::make_shared(proto_config_, time_source_, api_); @@ -77,6 +80,15 @@ class FilterConfig : public Logger::Loggable, public AuthFac Matcher::create(rule), Verifier::create(rule.requires(), proto_config_.providers(), *this, getExtractor())); } + + if (proto_config_.has_filter_state_rules()) { + filter_state_name_ = proto_config_.filter_state_rules().name(); + for (const auto& it : proto_config_.filter_state_rules().requires()) { + filter_state_verifiers_.emplace( + it.first, + Verifier::create(it.second, proto_config_.providers(), *this, getExtractor())); + } + } } JwtAuthnFilterStats& stats() { return stats_; } @@ -97,12 +109,22 @@ class FilterConfig : public Logger::Loggable, public AuthFac const Extractor& getExtractor() const { return *extractor_; } // Finds the matcher that matched the header - virtual const Verifier* findVerifier(const Http::HeaderMap& headers) const { + virtual const Verifier* findVerifier(const Http::HeaderMap& headers, + const StreamInfo::FilterState& filter_state) const { for (const auto& pair : rule_pairs_) { if (pair.matcher_->matches(headers)) { return pair.verifier_.get(); } } + if (!filter_state_name_.empty() && !filter_state_verifiers_.empty() && + filter_state.hasData(filter_state_name_)) { + const auto& state = filter_state.getDataReadOnly(filter_state_name_); + ENVOY_LOG(debug, "use filter state value {} to find verifier.", state.asString()); + const auto& it = filter_state_verifiers_.find(state.asString()); + if (it != filter_state_verifiers_.end()) { + return it->second.get(); + } + } return nullptr; } @@ -139,6 +161,10 @@ class FilterConfig : public Logger::Loggable, public AuthFac ExtractorConstPtr extractor_; // The list of rule matchers. std::vector rule_pairs_; + // The filter state name to lookup filter_state_rules. + std::string filter_state_name_; + // The filter state verifier map from filter_state_rules. + absl::flat_hash_map filter_state_verifiers_; TimeSource& time_source_; Api::Api& api_; }; diff --git a/source/extensions/filters/http/jwt_authn/matcher.cc b/source/extensions/filters/http/jwt_authn/matcher.cc index 252bc330d739a..123e590a7e727 100644 --- a/source/extensions/filters/http/jwt_authn/matcher.cc +++ b/source/extensions/filters/http/jwt_authn/matcher.cc @@ -90,8 +90,8 @@ class PathMatcherImpl : public BaseMatcherImpl { bool matches(const Http::HeaderMap& headers) const override { if (BaseMatcherImpl::matchRoute(headers)) { const Http::HeaderString& path = headers.Path()->value(); - size_t compare_length = Http::Utility::findQueryStringStart(path) - path.c_str(); - + const size_t compare_length = + path.getStringView().length() - Http::Utility::findQueryStringStart(path).length(); auto real_path = path.getStringView().substr(0, compare_length); bool match = case_sensitive_ ? real_path == path_ : StringUtil::caseCompare(real_path, path_); if (match) { @@ -119,8 +119,10 @@ class RegexMatcherImpl : public BaseMatcherImpl { bool matches(const Http::HeaderMap& headers) const override { if (BaseMatcherImpl::matchRoute(headers)) { const Http::HeaderString& path = headers.Path()->value(); - const char* query_string_start = Http::Utility::findQueryStringStart(path); - if (std::regex_match(path.c_str(), query_string_start, regex_)) { + const absl::string_view query_string = Http::Utility::findQueryStringStart(path); + absl::string_view path_view = path.getStringView(); + path_view.remove_suffix(query_string.length()); + if (std::regex_match(path_view.begin(), path_view.end(), regex_)) { ENVOY_LOG(debug, "Regex requirement '{}' matched.", regex_str_); return true; } diff --git a/source/extensions/filters/http/jwt_authn/verifier.cc b/source/extensions/filters/http/jwt_authn/verifier.cc index 9b73a81453ef9..6d3ec8aa971cd 100644 --- a/source/extensions/filters/http/jwt_authn/verifier.cc +++ b/source/extensions/filters/http/jwt_authn/verifier.cc @@ -54,7 +54,7 @@ class ContextImpl : public Verifier::Context { } void setPayload() { - if (payload_.fields().size() > 0) { + if (!payload_.fields().empty()) { callback_->setPayload(payload_); } } @@ -111,13 +111,14 @@ class ProviderVerifierImpl : public BaseVerifierImpl { auto& ctximpl = static_cast(*context); auto auth = auth_factory_.create(getAudienceChecker(), provider_name_, false); extractor_->sanitizePayloadHeaders(ctximpl.headers()); - auth->verify(ctximpl.headers(), extractor_->extract(ctximpl.headers()), - [&ctximpl](const std::string& name, const ProtobufWkt::Struct& payload) { - ctximpl.addPayload(name, payload); - }, - [this, context](const Status& status) { - onComplete(status, static_cast(*context)); - }); + auth->verify( + ctximpl.headers(), extractor_->extract(ctximpl.headers()), + [&ctximpl](const std::string& name, const ProtobufWkt::Struct& payload) { + ctximpl.addPayload(name, payload); + }, + [this, context](const Status& status) { + onComplete(status, static_cast(*context)); + }); if (!ctximpl.getCompletionState(this).is_completed_) { ctximpl.storeAuth(std::move(auth)); } else { @@ -160,13 +161,14 @@ class AllowFailedVerifierImpl : public BaseVerifierImpl { auto& ctximpl = static_cast(*context); auto auth = auth_factory_.create(nullptr, absl::nullopt, true); extractor_.sanitizePayloadHeaders(ctximpl.headers()); - auth->verify(ctximpl.headers(), extractor_.extract(ctximpl.headers()), - [&ctximpl](const std::string& name, const ProtobufWkt::Struct& payload) { - ctximpl.addPayload(name, payload); - }, - [this, context](const Status& status) { - onComplete(status, static_cast(*context)); - }); + auth->verify( + ctximpl.headers(), extractor_.extract(ctximpl.headers()), + [&ctximpl](const std::string& name, const ProtobufWkt::Struct& payload) { + ctximpl.addPayload(name, payload); + }, + [this, context](const Status& status) { + onComplete(status, static_cast(*context)); + }); if (!ctximpl.getCompletionState(this).is_completed_) { ctximpl.storeAuth(std::move(auth)); } else { @@ -180,7 +182,7 @@ class AllowFailedVerifierImpl : public BaseVerifierImpl { }; VerifierConstPtr innerCreate(const JwtRequirement& requirement, - const Protobuf::Map& providers, + const Protobuf::Map& providers, const AuthFactory& factory, const Extractor& extractor, const BaseVerifierImpl* parent); @@ -208,7 +210,7 @@ class BaseGroupVerifierImpl : public BaseVerifierImpl { class AnyVerifierImpl : public BaseGroupVerifierImpl { public: AnyVerifierImpl(const JwtRequirementOrList& or_list, const AuthFactory& factory, - const Protobuf::Map& providers, + const Protobuf::Map& providers, const Extractor& extractor_for_allow_fail, const BaseVerifierImpl* parent) : BaseGroupVerifierImpl(parent) { for (const auto& it : or_list.requirements()) { @@ -233,7 +235,7 @@ class AnyVerifierImpl : public BaseGroupVerifierImpl { class AllVerifierImpl : public BaseGroupVerifierImpl { public: AllVerifierImpl(const JwtRequirementAndList& and_list, const AuthFactory& factory, - const Protobuf::Map& providers, + const Protobuf::Map& providers, const Extractor& extractor_for_allow_fail, const BaseVerifierImpl* parent) : BaseGroupVerifierImpl(parent) { for (const auto& it : and_list.requirements()) { @@ -265,7 +267,7 @@ class AllowAllVerifierImpl : public BaseVerifierImpl { }; VerifierConstPtr innerCreate(const JwtRequirement& requirement, - const Protobuf::Map& providers, + const Protobuf::Map& providers, const AuthFactory& factory, const Extractor& extractor_for_allow_fail, const BaseVerifierImpl* parent) { std::string provider_name; @@ -311,10 +313,10 @@ ContextSharedPtr Verifier::createContext(Http::HeaderMap& headers, Callbacks* ca return std::make_shared(headers, callback); } -VerifierConstPtr -Verifier::create(const JwtRequirement& requirement, - const Protobuf::Map& providers, - const AuthFactory& factory, const Extractor& extractor_for_allow_fail) { +VerifierConstPtr Verifier::create(const JwtRequirement& requirement, + const Protobuf::Map& providers, + const AuthFactory& factory, + const Extractor& extractor_for_allow_fail) { return innerCreate(requirement, providers, factory, extractor_for_allow_fail, nullptr); } diff --git a/source/extensions/filters/http/jwt_authn/verifier.h b/source/extensions/filters/http/jwt_authn/verifier.h index 5ccca60c7c24d..d3a9575ad40e6 100644 --- a/source/extensions/filters/http/jwt_authn/verifier.h +++ b/source/extensions/filters/http/jwt_authn/verifier.h @@ -71,12 +71,11 @@ class Verifier { virtual void verify(ContextSharedPtr context) const PURE; // Factory method for creating verifiers. - static VerifierConstPtr - create(const ::envoy::config::filter::http::jwt_authn::v2alpha::JwtRequirement& requirement, - const Protobuf::Map& - providers, - const AuthFactory& factory, const Extractor& extractor_for_allow_fail); + static VerifierConstPtr create( + const ::envoy::config::filter::http::jwt_authn::v2alpha::JwtRequirement& requirement, + const Protobuf::Map< + std::string, ::envoy::config::filter::http::jwt_authn::v2alpha::JwtProvider>& providers, + const AuthFactory& factory, const Extractor& extractor_for_allow_fail); // Factory method for creating verifier contexts. static ContextSharedPtr createContext(Http::HeaderMap& headers, Callbacks* callback); diff --git a/source/extensions/filters/http/lua/lua_filter.cc b/source/extensions/filters/http/lua/lua_filter.cc index ed67a47113a9e..38b997dd27e7e 100644 --- a/source/extensions/filters/http/lua/lua_filter.cc +++ b/source/extensions/filters/http/lua/lua_filter.cc @@ -118,7 +118,7 @@ int StreamHandleWrapper::luaRespond(lua_State* state) { uint64_t status; if (headers->Status() == nullptr || - !StringUtil::atoul(headers->Status()->value().c_str(), status) || status < 200 || + !absl::SimpleAtoi(headers->Status()->value().getStringView(), &status) || status < 200 || status >= 600) { luaL_error(state, ":status must be between 200-599"); } @@ -212,8 +212,10 @@ void StreamHandleWrapper::onSuccess(Http::MessagePtr&& response) { response->headers().iterate( [](const Http::HeaderEntry& header, void* context) -> Http::HeaderMap::Iterate { lua_State* state = static_cast(context); - lua_pushstring(state, header.key().c_str()); - lua_pushstring(state, header.value().c_str()); + lua_pushlstring(state, header.key().getStringView().data(), + header.key().getStringView().length()); + lua_pushlstring(state, header.value().getStringView().data(), + header.value().getStringView().length()); lua_settable(state, -3); return Http::HeaderMap::Iterate::Continue; }, diff --git a/source/extensions/filters/http/lua/wrappers.cc b/source/extensions/filters/http/lua/wrappers.cc index cec3f527dc3b9..f675d34cfa598 100644 --- a/source/extensions/filters/http/lua/wrappers.cc +++ b/source/extensions/filters/http/lua/wrappers.cc @@ -25,8 +25,10 @@ int HeaderMapIterator::luaPairsIterator(lua_State* state) { parent_.iterator_.reset(); return 0; } else { - lua_pushstring(state, entries_[current_]->key().c_str()); - lua_pushstring(state, entries_[current_]->value().c_str()); + const absl::string_view key_view(entries_[current_]->key().getStringView()); + lua_pushlstring(state, key_view.data(), key_view.length()); + const absl::string_view value_view(entries_[current_]->value().getStringView()); + lua_pushlstring(state, value_view.data(), value_view.length()); current_++; return 2; } @@ -45,7 +47,8 @@ int HeaderMapWrapper::luaGet(lua_State* state) { const char* key = luaL_checkstring(state, 2); const Http::HeaderEntry* entry = headers_.get(Http::LowerCaseString(key)); if (entry != nullptr) { - lua_pushstring(state, entry->value().c_str()); + lua_pushlstring(state, entry->value().getStringView().data(), + entry->value().getStringView().length()); return 1; } else { return 0; diff --git a/source/extensions/filters/http/lua/wrappers.h b/source/extensions/filters/http/lua/wrappers.h index b8644afc3dda3..91f1a400f5b81 100644 --- a/source/extensions/filters/http/lua/wrappers.h +++ b/source/extensions/filters/http/lua/wrappers.h @@ -116,7 +116,7 @@ class DynamicMetadataMapIterator private: DynamicMetadataMapWrapper& parent_; - Protobuf::Map::const_iterator current_; + Protobuf::Map::const_iterator current_; }; /** diff --git a/source/extensions/filters/http/ratelimit/BUILD b/source/extensions/filters/http/ratelimit/BUILD index daec6fcda658a..45e8abef7c789 100644 --- a/source/extensions/filters/http/ratelimit/BUILD +++ b/source/extensions/filters/http/ratelimit/BUILD @@ -24,7 +24,6 @@ envoy_cc_library( "//source/common/http:codes_lib", "//source/common/router:config_lib", "//source/extensions/filters/common/ratelimit:ratelimit_client_interface", - "//source/extensions/filters/common/ratelimit:ratelimit_registration_lib", "@envoy_api//envoy/config/filter/http/rate_limit/v2:rate_limit_cc", ], ) diff --git a/source/extensions/filters/http/ratelimit/config.cc b/source/extensions/filters/http/ratelimit/config.cc index eacacf5006b9b..950ad9ce03d5b 100644 --- a/source/extensions/filters/http/ratelimit/config.cc +++ b/source/extensions/filters/http/ratelimit/config.cc @@ -10,7 +10,6 @@ #include "common/protobuf/utility.h" #include "extensions/filters/common/ratelimit/ratelimit_impl.h" -#include "extensions/filters/common/ratelimit/ratelimit_registration.h" #include "extensions/filters/http/ratelimit/ratelimit.h" namespace Envoy { @@ -27,19 +26,12 @@ Http::FilterFactoryCb RateLimitFilterConfig::createFilterFactoryFromProtoTyped( context.httpContext())); const std::chrono::milliseconds timeout = std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(proto_config, timeout, 20)); - Filters::Common::RateLimit::ClientFactoryPtr client_factory = - Filters::Common::RateLimit::rateLimitClientFactory(context); - // If ratelimit service config is provided in both bootstrap and filter, we should validate that - // they are same. - Filters::Common::RateLimit::validateRateLimitConfig< - const envoy::config::filter::http::rate_limit::v2::RateLimit&>(proto_config, client_factory); - - return [client_factory, proto_config, &context, timeout, + + return [proto_config, &context, timeout, filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void { callbacks.addStreamFilter(std::make_shared( - filter_config, - Filters::Common::RateLimit::rateLimitClient( - client_factory, context, proto_config.rate_limit_service().grpc_service(), timeout))); + filter_config, Filters::Common::RateLimit::rateLimitClient( + context, proto_config.rate_limit_service().grpc_service(), timeout))); }; } diff --git a/source/extensions/filters/http/ratelimit/ratelimit.cc b/source/extensions/filters/http/ratelimit/ratelimit.cc index 244014f0fbe44..a23d3037676d0 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.cc +++ b/source/extensions/filters/http/ratelimit/ratelimit.cc @@ -17,6 +17,14 @@ namespace Extensions { namespace HttpFilters { namespace RateLimitFilter { +struct RcDetailsValues { + // This request went above the configured limits for the rate limit filter. + const std::string RateLimited = "request_rate_limited"; + // The rate limiter encountered a failure, and was configured to fail-closed. + const std::string RateLimitError = "rate_limiter_error"; +}; +typedef ConstSingleton RcDetails; + void Filter::initiateCall(const Http::HeaderMap& headers) { bool is_internal_request = headers.EnvoyInternalRequest() && (headers.EnvoyInternalRequest()->value() == "true"); @@ -150,9 +158,9 @@ void Filter::complete(Filters::Common::RateLimit::LimitStatus status, if (status == Filters::Common::RateLimit::LimitStatus::OverLimit && config_->runtime().snapshot().featureEnabled("ratelimit.http_filter_enforcing", 100)) { state_ = State::Responded; - callbacks_->sendLocalReply(Http::Code::TooManyRequests, "", - [this](Http::HeaderMap& headers) { addHeaders(headers); }, - config_->rateLimitedGrpcStatus()); + callbacks_->sendLocalReply( + Http::Code::TooManyRequests, "", [this](Http::HeaderMap& headers) { addHeaders(headers); }, + config_->rateLimitedGrpcStatus(), RcDetails::get().RateLimited); callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::RateLimited); } else if (status == Filters::Common::RateLimit::LimitStatus::Error) { if (config_->failureModeAllow()) { @@ -162,7 +170,8 @@ void Filter::complete(Filters::Common::RateLimit::LimitStatus status, } } else { state_ = State::Responded; - callbacks_->sendLocalReply(Http::Code::InternalServerError, "", nullptr, absl::nullopt); + callbacks_->sendLocalReply(Http::Code::InternalServerError, "", nullptr, absl::nullopt, + RcDetails::get().RateLimitError); callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::RateLimitServiceError); } } else if (!initiating_call_) { diff --git a/source/extensions/filters/http/rbac/rbac_filter.cc b/source/extensions/filters/http/rbac/rbac_filter.cc index 5316d15a77225..ce82d3415b6d4 100644 --- a/source/extensions/filters/http/rbac/rbac_filter.cc +++ b/source/extensions/filters/http/rbac/rbac_filter.cc @@ -6,11 +6,19 @@ #include "extensions/filters/http/well_known_names.h" +#include "absl/strings/str_join.h" + namespace Envoy { namespace Extensions { namespace HttpFilters { namespace RBACFilter { +struct RcDetailsValues { + // The rbac filter rejected the request + const std::string RbacAccessDenied = "rbac_access_denied"; +}; +typedef ConstSingleton RcDetails; + RoleBasedAccessControlFilterConfig::RoleBasedAccessControlFilterConfig( const envoy::config::filter::http::rbac::v2::RBAC& proto_config, const std::string& stats_prefix, Stats::Scope& scope) @@ -48,18 +56,18 @@ RoleBasedAccessControlRouteSpecificFilterConfig::RoleBasedAccessControlRouteSpec Http::FilterHeadersStatus RoleBasedAccessControlFilter::decodeHeaders(Http::HeaderMap& headers, bool) { - ENVOY_LOG( - debug, - "checking request: remoteAddress: {}, localAddress: {}, ssl: {}, headers: {}, " - "dynamicMetadata: {}", - callbacks_->connection()->remoteAddress()->asString(), - callbacks_->connection()->localAddress()->asString(), - callbacks_->connection()->ssl() - ? "uriSanPeerCertificate: " + callbacks_->connection()->ssl()->uriSanPeerCertificate() + - ", subjectPeerCertificate: " + - callbacks_->connection()->ssl()->subjectPeerCertificate() - : "none", - headers, callbacks_->streamInfo().dynamicMetadata().DebugString()); + ENVOY_LOG(debug, + "checking request: remoteAddress: {}, localAddress: {}, ssl: {}, headers: {}, " + "dynamicMetadata: {}", + callbacks_->connection()->remoteAddress()->asString(), + callbacks_->connection()->localAddress()->asString(), + callbacks_->connection()->ssl() + ? "uriSanPeerCertificate: " + + absl::StrJoin(callbacks_->connection()->ssl()->uriSanPeerCertificate(), ",") + + ", subjectPeerCertificate: " + + callbacks_->connection()->ssl()->subjectPeerCertificate() + : "none", + headers, callbacks_->streamInfo().dynamicMetadata().DebugString()); std::string effective_policy_id; const auto& shadow_engine = @@ -105,7 +113,7 @@ Http::FilterHeadersStatus RoleBasedAccessControlFilter::decodeHeaders(Http::Head } else { ENVOY_LOG(debug, "enforced denied"); callbacks_->sendLocalReply(Http::Code::Forbidden, "RBAC: access denied", nullptr, - absl::nullopt); + absl::nullopt, RcDetails::get().RbacAccessDenied); config_->stats().denied_.inc(); return Http::FilterHeadersStatus::StopIteration; } diff --git a/source/extensions/filters/http/squash/squash_filter.cc b/source/extensions/filters/http/squash/squash_filter.cc index d55f82f9410fb..9e12c6c4d43fe 100644 --- a/source/extensions/filters/http/squash/squash_filter.cc +++ b/source/extensions/filters/http/squash/squash_filter.cc @@ -197,7 +197,7 @@ void SquashFilter::onCreateAttachmentSuccess(Http::MessagePtr&& m) { // Get the config object that was created if (Http::Utility::getResponseStatus(m->headers()) != enumToInt(Http::Code::Created)) { ENVOY_LOG(debug, "Squash: can't create attachment object. status {} - not squashing", - m->headers().Status()->value().c_str()); + m->headers().Status()->value().getStringView()); doneSquashing(); } else { std::string debug_attachment_id; diff --git a/source/extensions/filters/http/squash/squash_filter.h b/source/extensions/filters/http/squash/squash_filter.h index 08c81053a5740..b09e230925474 100644 --- a/source/extensions/filters/http/squash/squash_filter.h +++ b/source/extensions/filters/http/squash/squash_filter.h @@ -103,7 +103,7 @@ class SquashFilter : public Http::StreamDecoderFilter, const SquashFilterConfigSharedPtr config_; // Current state of the squash filter. If is_squashing_ is true, Hold the request while we - // communicate with the squash server to attach a debugger. If it is false, let the the request + // communicate with the squash server to attach a debugger. If it is false, let the request // pass-through. bool is_squashing_; // The API path of the created debug attachment (used for polling its state). diff --git a/source/extensions/filters/http/tap/BUILD b/source/extensions/filters/http/tap/BUILD index 2bccd9aee43c3..40e7a1f8f6ac0 100644 --- a/source/extensions/filters/http/tap/BUILD +++ b/source/extensions/filters/http/tap/BUILD @@ -15,6 +15,7 @@ envoy_cc_library( name = "tap_config_interface", hdrs = ["tap_config.h"], deps = [ + "//include/envoy/buffer:buffer_interface", "//include/envoy/http:header_map_interface", "//source/extensions/common/tap:tap_interface", "@envoy_api//envoy/service/tap/v2alpha:common_cc", diff --git a/source/extensions/filters/http/tap/tap_config.h b/source/extensions/filters/http/tap/tap_config.h index 3444f213b37d9..041544f363baa 100644 --- a/source/extensions/filters/http/tap/tap_config.h +++ b/source/extensions/filters/http/tap/tap_config.h @@ -1,5 +1,6 @@ #pragma once +#include "envoy/buffer/buffer.h" #include "envoy/common/pure.h" #include "envoy/http/header_map.h" #include "envoy/service/tap/v2alpha/common.pb.h" @@ -24,6 +25,11 @@ class HttpPerRequestTapper { */ virtual void onRequestHeaders(const Http::HeaderMap& headers) PURE; + /** + * Called when request body is received. + */ + virtual void onRequestBody(const Buffer::Instance& data) PURE; + /** * Called when request trailers are received. */ @@ -34,6 +40,11 @@ class HttpPerRequestTapper { */ virtual void onResponseHeaders(const Http::HeaderMap& headers) PURE; + /** + * Called when response body is received. + */ + virtual void onResponseBody(const Buffer::Instance& data) PURE; + /** * Called when response trailers are received. */ @@ -43,10 +54,7 @@ class HttpPerRequestTapper { * Called when the request is being destroyed and is being logged. * @return whether the request was tapped or not. */ - virtual bool onDestroyLog(const Http::HeaderMap* request_headers, - const Http::HeaderMap* request_trailers, - const Http::HeaderMap* response_headers, - const Http::HeaderMap* response_trailers) PURE; + virtual bool onDestroyLog() PURE; }; using HttpPerRequestTapperPtr = std::unique_ptr; @@ -54,7 +62,7 @@ using HttpPerRequestTapperPtr = std::unique_ptr; /** * Abstract HTTP tap configuration. */ -class HttpTapConfig : public Extensions::Common::Tap::TapConfig { +class HttpTapConfig : public virtual Extensions::Common::Tap::TapConfig { public: /** * @return a new per-request HTTP tapper which is used to handle tapping of a discrete request. diff --git a/source/extensions/filters/http/tap/tap_config_impl.cc b/source/extensions/filters/http/tap/tap_config_impl.cc index 3ed61680a1dd7..295c76d319aec 100644 --- a/source/extensions/filters/http/tap/tap_config_impl.cc +++ b/source/extensions/filters/http/tap/tap_config_impl.cc @@ -10,67 +10,207 @@ namespace Extensions { namespace HttpFilters { namespace TapFilter { +namespace TapCommon = Extensions::Common::Tap; + +namespace { +Http::HeaderMap::Iterate fillHeaderList(const Http::HeaderEntry& header, void* context) { + Protobuf::RepeatedPtrField& header_list = + *reinterpret_cast*>(context); + auto& new_header = *header_list.Add(); + new_header.set_key(std::string(header.key().getStringView())); + new_header.set_value(std::string(header.value().getStringView())); + return Http::HeaderMap::Iterate::Continue; +} +} // namespace + HttpTapConfigImpl::HttpTapConfigImpl(envoy::service::tap::v2alpha::TapConfig&& proto_config, Common::Tap::Sink* admin_streamer) - : Extensions::Common::Tap::TapConfigBaseImpl(std::move(proto_config), admin_streamer) {} + : TapCommon::TapConfigBaseImpl(std::move(proto_config), admin_streamer) {} HttpPerRequestTapperPtr HttpTapConfigImpl::createPerRequestTapper(uint64_t stream_id) { return std::make_unique(shared_from_this(), stream_id); } +void HttpPerRequestTapperImpl::streamRequestHeaders() { + TapCommon::TraceWrapperPtr trace = makeTraceSegment(); + request_headers_->iterate( + fillHeaderList, + trace->mutable_http_streamed_trace_segment()->mutable_request_headers()->mutable_headers()); + sink_handle_->submitTrace(std::move(trace)); +} + void HttpPerRequestTapperImpl::onRequestHeaders(const Http::HeaderMap& headers) { + request_headers_ = &headers; config_->rootMatcher().onHttpRequestHeaders(headers, statuses_); + if (config_->streaming() && config_->rootMatcher().matchStatus(statuses_).matches_) { + ASSERT(!started_streaming_trace_); + started_streaming_trace_ = true; + streamRequestHeaders(); + } +} + +void HttpPerRequestTapperImpl::streamBufferedRequestBody() { + if (buffered_streamed_request_body_ != nullptr) { + sink_handle_->submitTrace(std::move(buffered_streamed_request_body_)); + buffered_streamed_request_body_.reset(); + } +} + +void HttpPerRequestTapperImpl::onRequestBody(const Buffer::Instance& data) { + onBody(data, buffered_streamed_request_body_, config_->maxBufferedRxBytes(), + &envoy::data::tap::v2alpha::HttpStreamedTraceSegment::mutable_request_body_chunk, + &envoy::data::tap::v2alpha::HttpBufferedTrace::mutable_request); +} + +void HttpPerRequestTapperImpl::streamRequestTrailers() { + if (request_trailers_ != nullptr) { + TapCommon::TraceWrapperPtr trace = makeTraceSegment(); + request_trailers_->iterate(fillHeaderList, trace->mutable_http_streamed_trace_segment() + ->mutable_request_trailers() + ->mutable_headers()); + sink_handle_->submitTrace(std::move(trace)); + } } void HttpPerRequestTapperImpl::onRequestTrailers(const Http::HeaderMap& trailers) { + request_trailers_ = &trailers; config_->rootMatcher().onHttpRequestTrailers(trailers, statuses_); + if (config_->streaming() && config_->rootMatcher().matchStatus(statuses_).matches_) { + if (!started_streaming_trace_) { + started_streaming_trace_ = true; + // Flush anything that we already buffered. + streamRequestHeaders(); + streamBufferedRequestBody(); + } + + streamRequestTrailers(); + } +} + +void HttpPerRequestTapperImpl::streamResponseHeaders() { + TapCommon::TraceWrapperPtr trace = makeTraceSegment(); + response_headers_->iterate( + fillHeaderList, + trace->mutable_http_streamed_trace_segment()->mutable_response_headers()->mutable_headers()); + sink_handle_->submitTrace(std::move(trace)); } void HttpPerRequestTapperImpl::onResponseHeaders(const Http::HeaderMap& headers) { + response_headers_ = &headers; config_->rootMatcher().onHttpResponseHeaders(headers, statuses_); + if (config_->streaming() && config_->rootMatcher().matchStatus(statuses_).matches_) { + if (!started_streaming_trace_) { + started_streaming_trace_ = true; + // Flush anything that we already buffered. + streamRequestHeaders(); + streamBufferedRequestBody(); + streamRequestTrailers(); + } + + streamResponseHeaders(); + } +} + +void HttpPerRequestTapperImpl::streamBufferedResponseBody() { + if (buffered_streamed_response_body_ != nullptr) { + sink_handle_->submitTrace(std::move(buffered_streamed_response_body_)); + buffered_streamed_response_body_.reset(); + } +} + +void HttpPerRequestTapperImpl::onResponseBody(const Buffer::Instance& data) { + onBody(data, buffered_streamed_response_body_, config_->maxBufferedTxBytes(), + &envoy::data::tap::v2alpha::HttpStreamedTraceSegment::mutable_response_body_chunk, + &envoy::data::tap::v2alpha::HttpBufferedTrace::mutable_response); } void HttpPerRequestTapperImpl::onResponseTrailers(const Http::HeaderMap& trailers) { + response_trailers_ = &trailers; config_->rootMatcher().onHttpResponseTrailers(trailers, statuses_); -} + if (config_->streaming() && config_->rootMatcher().matchStatus(statuses_).matches_) { + if (!started_streaming_trace_) { + started_streaming_trace_ = true; + // Flush anything that we already buffered. + streamRequestHeaders(); + streamBufferedRequestBody(); + streamRequestTrailers(); + streamResponseHeaders(); + streamBufferedResponseBody(); + } -namespace { -Http::HeaderMap::Iterate fillHeaderList(const Http::HeaderEntry& header, void* context) { - Protobuf::RepeatedPtrField& header_list = - *reinterpret_cast*>(context); - auto& new_header = *header_list.Add(); - new_header.set_key(header.key().c_str()); - new_header.set_value(header.value().c_str()); - return Http::HeaderMap::Iterate::Continue; + TapCommon::TraceWrapperPtr trace = makeTraceSegment(); + trailers.iterate(fillHeaderList, trace->mutable_http_streamed_trace_segment() + ->mutable_response_trailers() + ->mutable_headers()); + sink_handle_->submitTrace(std::move(trace)); + } } -} // namespace -bool HttpPerRequestTapperImpl::onDestroyLog(const Http::HeaderMap* request_headers, - const Http::HeaderMap* request_trailers, - const Http::HeaderMap* response_headers, - const Http::HeaderMap* response_trailers) { - if (!config_->rootMatcher().matches(statuses_)) { - return false; +bool HttpPerRequestTapperImpl::onDestroyLog() { + if (config_->streaming() || !config_->rootMatcher().matchStatus(statuses_).matches_) { + return config_->rootMatcher().matchStatus(statuses_).matches_; } - auto trace = std::make_shared(); - auto& http_trace = *trace->mutable_http_buffered_trace(); - request_headers->iterate(fillHeaderList, http_trace.mutable_request()->mutable_headers()); - if (request_trailers != nullptr) { - request_trailers->iterate(fillHeaderList, http_trace.mutable_request()->mutable_trailers()); + makeBufferedFullTraceIfNeeded(); + auto& http_trace = *buffered_full_trace_->mutable_http_buffered_trace(); + if (request_headers_ != nullptr) { + request_headers_->iterate(fillHeaderList, http_trace.mutable_request()->mutable_headers()); } - if (response_headers != nullptr) { - response_headers->iterate(fillHeaderList, http_trace.mutable_response()->mutable_headers()); + if (request_trailers_ != nullptr) { + request_trailers_->iterate(fillHeaderList, http_trace.mutable_request()->mutable_trailers()); } - if (response_trailers != nullptr) { - response_trailers->iterate(fillHeaderList, http_trace.mutable_response()->mutable_trailers()); + if (response_headers_ != nullptr) { + response_headers_->iterate(fillHeaderList, http_trace.mutable_response()->mutable_headers()); + } + if (response_trailers_ != nullptr) { + response_trailers_->iterate(fillHeaderList, http_trace.mutable_response()->mutable_trailers()); } ENVOY_LOG(debug, "submitting buffered trace sink"); - config_->sink().submitBufferedTrace(trace, stream_id_); + // move is safe as onDestroyLog is the last method called. + sink_handle_->submitTrace(std::move(buffered_full_trace_)); return true; } +void HttpPerRequestTapperImpl::onBody( + const Buffer::Instance& data, Extensions::Common::Tap::TraceWrapperPtr& buffered_streamed_body, + uint32_t maxBufferedBytes, MutableBodyChunk mutable_body_chunk, + MutableMessage mutable_message) { + // TODO(mattklein123): Body matching. + if (config_->streaming()) { + const auto match_status = config_->rootMatcher().matchStatus(statuses_); + // Without body matching, we must have already started tracing or have not yet matched. + ASSERT(started_streaming_trace_ || !match_status.matches_); + + if (started_streaming_trace_) { + // If we have already started streaming, flush a body segment now. + TapCommon::TraceWrapperPtr trace = makeTraceSegment(); + TapCommon::Utility::addBufferToProtoBytes( + *(trace->mutable_http_streamed_trace_segment()->*mutable_body_chunk)(), maxBufferedBytes, + data, 0, data.length()); + sink_handle_->submitTrace(std::move(trace)); + } else if (match_status.might_change_status_) { + // If we might still match, start buffering the body up to our limit. + if (buffered_streamed_body == nullptr) { + buffered_streamed_body = makeTraceSegment(); + } + auto& body = + *(buffered_streamed_body->mutable_http_streamed_trace_segment()->*mutable_body_chunk)(); + ASSERT(body.as_bytes().size() <= maxBufferedBytes); + TapCommon::Utility::addBufferToProtoBytes(body, maxBufferedBytes - body.as_bytes().size(), + data, 0, data.length()); + } + } else { + // If we are not streaming, buffer the body up to our limit. + makeBufferedFullTraceIfNeeded(); + auto& body = + *(buffered_full_trace_->mutable_http_buffered_trace()->*mutable_message)()->mutable_body(); + ASSERT(body.as_bytes().size() <= maxBufferedBytes); + TapCommon::Utility::addBufferToProtoBytes(body, maxBufferedBytes - body.as_bytes().size(), data, + 0, data.length()); + } +} + } // namespace TapFilter } // namespace HttpFilters } // namespace Extensions diff --git a/source/extensions/filters/http/tap/tap_config_impl.h b/source/extensions/filters/http/tap/tap_config_impl.h index 8c0fa527188db..9846b6e529510 100644 --- a/source/extensions/filters/http/tap/tap_config_impl.h +++ b/source/extensions/filters/http/tap/tap_config_impl.h @@ -23,28 +23,66 @@ class HttpTapConfigImpl : public Extensions::Common::Tap::TapConfigBaseImpl, HttpPerRequestTapperPtr createPerRequestTapper(uint64_t stream_id) override; }; -using HttpTapConfigImplSharedPtr = std::shared_ptr; - class HttpPerRequestTapperImpl : public HttpPerRequestTapper, Logger::Loggable { public: - HttpPerRequestTapperImpl(HttpTapConfigImplSharedPtr config, uint64_t stream_id) - : config_(std::move(config)), stream_id_(stream_id), statuses_(config_->numMatchers()) { + HttpPerRequestTapperImpl(HttpTapConfigSharedPtr config, uint64_t stream_id) + : config_(std::move(config)), stream_id_(stream_id), + sink_handle_(config_->createPerTapSinkHandleManager(stream_id)), + statuses_(config_->createMatchStatusVector()) { config_->rootMatcher().onNewStream(statuses_); } // TapFilter::HttpPerRequestTapper void onRequestHeaders(const Http::HeaderMap& headers) override; + void onRequestBody(const Buffer::Instance& data) override; void onRequestTrailers(const Http::HeaderMap& headers) override; void onResponseHeaders(const Http::HeaderMap& headers) override; + void onResponseBody(const Buffer::Instance& data) override; void onResponseTrailers(const Http::HeaderMap& headers) override; - bool onDestroyLog(const Http::HeaderMap* request_headers, const Http::HeaderMap* request_trailers, - const Http::HeaderMap* response_headers, - const Http::HeaderMap* response_trailers) override; + bool onDestroyLog() override; private: - HttpTapConfigImplSharedPtr config_; + typedef envoy::data::tap::v2alpha::Body* ( + envoy::data::tap::v2alpha::HttpStreamedTraceSegment::*MutableBodyChunk)(); + typedef envoy::data::tap::v2alpha::HttpBufferedTrace::Message* ( + envoy::data::tap::v2alpha::HttpBufferedTrace::*MutableMessage)(); + + void onBody(const Buffer::Instance& data, + Extensions::Common::Tap::TraceWrapperPtr& buffered_streamed_body, + uint32_t maxBufferedBytes, MutableBodyChunk mutable_body_chunk, + MutableMessage mutable_message); + + void makeBufferedFullTraceIfNeeded() { + if (buffered_full_trace_ == nullptr) { + buffered_full_trace_ = Extensions::Common::Tap::makeTraceWrapper(); + } + } + + Extensions::Common::Tap::TraceWrapperPtr makeTraceSegment() { + Extensions::Common::Tap::TraceWrapperPtr segment = Extensions::Common::Tap::makeTraceWrapper(); + segment->mutable_http_streamed_trace_segment()->set_trace_id(stream_id_); + return segment; + } + + void streamRequestHeaders(); + void streamBufferedRequestBody(); + void streamRequestTrailers(); + void streamResponseHeaders(); + void streamBufferedResponseBody(); + + HttpTapConfigSharedPtr config_; const uint64_t stream_id_; - std::vector statuses_; + Extensions::Common::Tap::PerTapSinkHandleManagerPtr sink_handle_; + Extensions::Common::Tap::Matcher::MatchStatusVector statuses_; + bool started_streaming_trace_{}; + const Http::HeaderMap* request_headers_{}; + const Http::HeaderMap* request_trailers_{}; + const Http::HeaderMap* response_headers_{}; + const Http::HeaderMap* response_trailers_{}; + // Must be a shared_ptr because of submitTrace(). + Extensions::Common::Tap::TraceWrapperPtr buffered_streamed_request_body_; + Extensions::Common::Tap::TraceWrapperPtr buffered_streamed_response_body_; + Extensions::Common::Tap::TraceWrapperPtr buffered_full_trace_; }; } // namespace TapFilter diff --git a/source/extensions/filters/http/tap/tap_filter.cc b/source/extensions/filters/http/tap/tap_filter.cc index 865d93fe7a07b..aba00bb9734e3 100644 --- a/source/extensions/filters/http/tap/tap_filter.cc +++ b/source/extensions/filters/http/tap/tap_filter.cc @@ -32,9 +32,14 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::HeaderMap& headers, bool) return Http::FilterHeadersStatus::Continue; } +Http::FilterDataStatus Filter::decodeData(Buffer::Instance& data, bool) { + if (tapper_ != nullptr) { + tapper_->onRequestBody(data); + } + return Http::FilterDataStatus::Continue; +} + Http::FilterTrailersStatus Filter::decodeTrailers(Http::HeaderMap& trailers) { - // TODO(mattklein123): Why is this not provided in the log callback? Do a follow-up to make it so. - request_trailers_ = &trailers; if (tapper_ != nullptr) { tapper_->onRequestTrailers(trailers); } @@ -48,6 +53,13 @@ Http::FilterHeadersStatus Filter::encodeHeaders(Http::HeaderMap& headers, bool) return Http::FilterHeadersStatus::Continue; } +Http::FilterDataStatus Filter::encodeData(Buffer::Instance& data, bool) { + if (tapper_ != nullptr) { + tapper_->onResponseBody(data); + } + return Http::FilterDataStatus::Continue; +} + Http::FilterTrailersStatus Filter::encodeTrailers(Http::HeaderMap& trailers) { if (tapper_ != nullptr) { tapper_->onResponseTrailers(trailers); @@ -55,10 +67,9 @@ Http::FilterTrailersStatus Filter::encodeTrailers(Http::HeaderMap& trailers) { return Http::FilterTrailersStatus::Continue; } -void Filter::log(const Http::HeaderMap* request_headers, const Http::HeaderMap* response_headers, - const Http::HeaderMap* response_trailers, const StreamInfo::StreamInfo&) { - if (tapper_ != nullptr && tapper_->onDestroyLog(request_headers, request_trailers_, - response_headers, response_trailers)) { +void Filter::log(const Http::HeaderMap*, const Http::HeaderMap*, const Http::HeaderMap*, + const StreamInfo::StreamInfo&) { + if (tapper_ != nullptr && tapper_->onDestroyLog()) { config_->stats().rq_tapped_.inc(); } } diff --git a/source/extensions/filters/http/tap/tap_filter.h b/source/extensions/filters/http/tap/tap_filter.h index 326dcd846c0c0..a8c5d0d7f5d79 100644 --- a/source/extensions/filters/http/tap/tap_filter.h +++ b/source/extensions/filters/http/tap/tap_filter.h @@ -83,9 +83,7 @@ class Filter : public Http::StreamFilter, public AccessLog::Instance { // Http::StreamDecoderFilter Http::FilterHeadersStatus decodeHeaders(Http::HeaderMap& headers, bool end_stream) override; - Http::FilterDataStatus decodeData(Buffer::Instance&, bool) override { - return Http::FilterDataStatus::Continue; - } + Http::FilterDataStatus decodeData(Buffer::Instance& data, bool end_stream) override; Http::FilterTrailersStatus decodeTrailers(Http::HeaderMap& trailers) override; void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override { HttpTapConfigSharedPtr config = config_->currentConfig(); @@ -97,9 +95,7 @@ class Filter : public Http::StreamFilter, public AccessLog::Instance { return Http::FilterHeadersStatus::Continue; } Http::FilterHeadersStatus encodeHeaders(Http::HeaderMap& headers, bool end_stream) override; - Http::FilterDataStatus encodeData(Buffer::Instance&, bool) override { - return Http::FilterDataStatus::Continue; - } + Http::FilterDataStatus encodeData(Buffer::Instance& data, bool end_stream) override; Http::FilterTrailersStatus encodeTrailers(Http::HeaderMap& trailers) override; Http::FilterMetadataStatus encodeMetadata(Http::MetadataMap&) override { return Http::FilterMetadataStatus::Continue; @@ -114,7 +110,6 @@ class Filter : public Http::StreamFilter, public AccessLog::Instance { private: FilterConfigSharedPtr config_; HttpPerRequestTapperPtr tapper_; - const Http::HeaderMap* request_trailers_{}; }; } // namespace TapFilter diff --git a/source/extensions/filters/http/well_known_names.h b/source/extensions/filters/http/well_known_names.h index 145441da059b1..22f9a174fab67 100644 --- a/source/extensions/filters/http/well_known_names.h +++ b/source/extensions/filters/http/well_known_names.h @@ -16,6 +16,8 @@ class HttpFilterNameValues { const std::string Buffer = "envoy.buffer"; // CORS filter const std::string Cors = "envoy.cors"; + // CSRF filter + const std::string Csrf = "envoy.csrf"; // Dynamo filter const std::string Dynamo = "envoy.http_dynamo_filter"; // Fault filter diff --git a/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc b/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc index 26189a504bea8..9c6fb7b93997c 100644 --- a/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc +++ b/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc @@ -33,13 +33,13 @@ Network::FilterStatus Filter::onAccept(Network::ListenerFilterCallbacks& cb) { ENVOY_LOG(debug, "proxy_protocol: New connection accepted"); Network::ConnectionSocket& socket = cb.socket(); ASSERT(file_event_.get() == nullptr); - file_event_ = - cb.dispatcher().createFileEvent(socket.ioHandle().fd(), - [this](uint32_t events) { - ASSERT(events == Event::FileReadyType::Read); - onRead(); - }, - Event::FileTriggerType::Edge, Event::FileReadyType::Read); + file_event_ = cb.dispatcher().createFileEvent( + socket.ioHandle().fd(), + [this](uint32_t events) { + ASSERT(events == Event::FileReadyType::Read); + onRead(); + }, + Event::FileTriggerType::Edge, Event::FileReadyType::Read); cb_ = &cb; return Network::FilterStatus::StopIteration; } diff --git a/source/extensions/filters/listener/tls_inspector/tls_inspector.cc b/source/extensions/filters/listener/tls_inspector/tls_inspector.cc index 0e895104258ef..f5d2a8e4cef10 100644 --- a/source/extensions/filters/listener/tls_inspector/tls_inspector.cc +++ b/source/extensions/filters/listener/tls_inspector/tls_inspector.cc @@ -16,7 +16,6 @@ #include "extensions/transport_sockets/well_known_names.h" -#include "openssl/bytestring.h" #include "openssl/ssl.h" namespace Envoy { diff --git a/source/extensions/filters/listener/tls_inspector/tls_inspector.h b/source/extensions/filters/listener/tls_inspector/tls_inspector.h index f82444f1eb1cf..52d45dcd108a3 100644 --- a/source/extensions/filters/listener/tls_inspector/tls_inspector.h +++ b/source/extensions/filters/listener/tls_inspector/tls_inspector.h @@ -8,7 +8,6 @@ #include "common/common/logger.h" -#include "openssl/bytestring.h" #include "openssl/ssl.h" namespace Envoy { diff --git a/source/extensions/filters/network/common/redis/BUILD b/source/extensions/filters/network/common/redis/BUILD new file mode 100644 index 0000000000000..c8b07c4ceb5bf --- /dev/null +++ b/source/extensions/filters/network/common/redis/BUILD @@ -0,0 +1,64 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "codec_interface", + hdrs = ["codec.h"], + deps = ["//include/envoy/buffer:buffer_interface"], +) + +envoy_cc_library( + name = "codec_lib", + srcs = ["codec_impl.cc"], + hdrs = ["codec_impl.h"], + deps = [ + ":codec_interface", + "//source/common/common:assert_lib", + "//source/common/common:minimal_logger_lib", + "//source/common/common:stack_array", + "//source/common/common:utility_lib", + ], +) + +envoy_cc_library( + name = "supported_commands_lib", + hdrs = ["supported_commands.h"], + deps = [ + "//source/common/common:macros", + ], +) + +envoy_cc_library( + name = "client_interface", + hdrs = ["client.h"], + deps = [ + ":codec_lib", + "//include/envoy/upstream:cluster_manager_interface", + ], +) + +envoy_cc_library( + name = "client_lib", + srcs = ["client_impl.cc"], + hdrs = ["client_impl.h"], + deps = [ + ":client_interface", + ":codec_lib", + "//include/envoy/router:router_interface", + "//include/envoy/thread_local:thread_local_interface", + "//include/envoy/upstream:cluster_manager_interface", + "//source/common/buffer:buffer_lib", + "//source/common/common:assert_lib", + "//source/common/network:filter_lib", + "//source/common/protobuf:utility_lib", + "//source/common/upstream:load_balancer_lib", + "@envoy_api//envoy/config/filter/network/redis_proxy/v2:redis_proxy_cc", + ], +) diff --git a/source/extensions/filters/network/common/redis/client.h b/source/extensions/filters/network/common/redis/client.h new file mode 100644 index 0000000000000..4a7c53912afc3 --- /dev/null +++ b/source/extensions/filters/network/common/redis/client.h @@ -0,0 +1,150 @@ +#pragma once + +#include + +#include "envoy/upstream/cluster_manager.h" + +#include "extensions/filters/network/common/redis/codec_impl.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Common { +namespace Redis { +namespace Client { + +/** + * A handle to an outbound request. + */ +class PoolRequest { +public: + virtual ~PoolRequest() {} + + /** + * Cancel the request. No further request callbacks will be called. + */ + virtual void cancel() PURE; +}; + +/** + * Outbound request callbacks. + */ +class PoolCallbacks { +public: + virtual ~PoolCallbacks() {} + + /** + * Called when a pipelined response is received. + * @param value supplies the response which is now owned by the callee. + */ + virtual void onResponse(RespValuePtr&& value) PURE; + + /** + * Called when a network/protocol error occurs and there is no response. + */ + virtual void onFailure() PURE; + + /** + * Called when a MOVED or ASK redirection error is received, and the request must be retried. + * @param value supplies the MOVED error response + * @return bool true if the request is successfully redirected, false otherwise + */ + virtual bool onRedirection(const Common::Redis::RespValue& value) PURE; +}; + +/** + * A single redis client connection. + */ +class Client : public Event::DeferredDeletable { +public: + virtual ~Client() {} + + /** + * Adds network connection callbacks to the underlying network connection. + */ + virtual void addConnectionCallbacks(Network::ConnectionCallbacks& callbacks) PURE; + + /** + * Closes the underlying network connection. + */ + virtual void close() PURE; + + /** + * Make a pipelined request to the remote redis server. + * @param request supplies the RESP request to make. + * @param callbacks supplies the request callbacks. + * @return PoolRequest* a handle to the active request or nullptr if the request could not be made + * for some reason. + */ + virtual PoolRequest* makeRequest(const RespValue& request, PoolCallbacks& callbacks) PURE; +}; + +typedef std::unique_ptr ClientPtr; + +/** + * Configuration for a redis connection pool. + */ +class Config { +public: + virtual ~Config() {} + + /** + * @return std::chrono::milliseconds the timeout for an individual redis operation. Currently, + * all operations use the same timeout. + */ + virtual std::chrono::milliseconds opTimeout() const PURE; + + /** + * @return bool disable outlier events even if the cluster has it enabled. This is used by the + * healthchecker's connection pool to avoid double counting active healthcheck operations as + * passive healthcheck operations. + */ + virtual bool disableOutlierEvents() const PURE; + + /** + * @return when enabled, a hash tagging function will be used to guarantee that keys with the + * same hash tag will be forwarded to the same upstream. + */ + virtual bool enableHashtagging() const PURE; + + /** + * @return when enabled, moved/ask redirection errors from upstream redis servers will be + * processed. + */ + virtual bool enableRedirection() const PURE; + + /** + * @return buffer size for batching commands for a single upstream host. + */ + virtual uint32_t maxBufferSizeBeforeFlush() const PURE; + + /** + * @return timeout for batching commands for a single upstream host. + */ + virtual std::chrono::milliseconds bufferFlushTimeoutInMs() const PURE; +}; + +/** + * A factory for individual redis client connections. + */ +class ClientFactory { +public: + virtual ~ClientFactory() {} + + /** + * Create a client given an upstream host. + * @param host supplies the upstream host. + * @param dispatcher supplies the owning thread's dispatcher. + * @param config supplies the connection pool configuration. + * @return ClientPtr a new connection pool client. + */ + virtual ClientPtr create(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, + const Config& config) PURE; +}; + +} // namespace Client +} // namespace Redis +} // namespace Common +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/common/redis/client_impl.cc b/source/extensions/filters/network/common/redis/client_impl.cc new file mode 100644 index 0000000000000..fa4bb4bb5c766 --- /dev/null +++ b/source/extensions/filters/network/common/redis/client_impl.cc @@ -0,0 +1,234 @@ +#include "extensions/filters/network/common/redis/client_impl.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Common { +namespace Redis { +namespace Client { + +ConfigImpl::ConfigImpl( + const envoy::config::filter::network::redis_proxy::v2::RedisProxy::ConnPoolSettings& config) + : op_timeout_(PROTOBUF_GET_MS_REQUIRED(config, op_timeout)), + enable_hashtagging_(config.enable_hashtagging()), + enable_redirection_(config.enable_redirection()), + max_buffer_size_before_flush_( + config.max_buffer_size_before_flush()), // This is a scalar, so default is zero. + buffer_flush_timeout_(PROTOBUF_GET_MS_OR_DEFAULT( + config, buffer_flush_timeout, + 3)) // Default timeout is 3ms. If max_buffer_size_before_flush is zero, this is not used + // as the buffer is flushed on each request immediately. +{} + +ClientPtr ClientImpl::create(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, + EncoderPtr&& encoder, DecoderFactory& decoder_factory, + const Config& config) { + + std::unique_ptr client( + new ClientImpl(host, dispatcher, std::move(encoder), decoder_factory, config)); + client->connection_ = host->createConnection(dispatcher, nullptr, nullptr).connection_; + client->connection_->addConnectionCallbacks(*client); + client->connection_->addReadFilter(Network::ReadFilterSharedPtr{new UpstreamReadFilter(*client)}); + client->connection_->connect(); + client->connection_->noDelay(true); + return std::move(client); +} + +ClientImpl::ClientImpl(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, + EncoderPtr&& encoder, DecoderFactory& decoder_factory, const Config& config) + : host_(host), encoder_(std::move(encoder)), decoder_(decoder_factory.create(*this)), + config_(config), + connect_or_op_timer_(dispatcher.createTimer([this]() -> void { onConnectOrOpTimeout(); })), + flush_timer_(dispatcher.createTimer([this]() -> void { flushBufferAndResetTimer(); })) { + host->cluster().stats().upstream_cx_total_.inc(); + host->stats().cx_total_.inc(); + host->cluster().stats().upstream_cx_active_.inc(); + host->stats().cx_active_.inc(); + connect_or_op_timer_->enableTimer(host->cluster().connectTimeout()); +} + +ClientImpl::~ClientImpl() { + ASSERT(pending_requests_.empty()); + ASSERT(connection_->state() == Network::Connection::State::Closed); + host_->cluster().stats().upstream_cx_active_.dec(); + host_->stats().cx_active_.dec(); +} + +void ClientImpl::close() { connection_->close(Network::ConnectionCloseType::NoFlush); } + +void ClientImpl::flushBufferAndResetTimer() { + if (flush_timer_->enabled()) { + flush_timer_->disableTimer(); + } + connection_->write(encoder_buffer_, false); +} + +PoolRequest* ClientImpl::makeRequest(const RespValue& request, PoolCallbacks& callbacks) { + ASSERT(connection_->state() == Network::Connection::State::Open); + + const bool empty_buffer = encoder_buffer_.length() == 0; + + pending_requests_.emplace_back(*this, callbacks); + encoder_->encode(request, encoder_buffer_); + + // If buffer is full, flush. If the the buffer was empty before the request, start the timer. + if (encoder_buffer_.length() >= config_.maxBufferSizeBeforeFlush()) { + flushBufferAndResetTimer(); + } else if (empty_buffer) { + flush_timer_->enableTimer(std::chrono::milliseconds(config_.bufferFlushTimeoutInMs())); + } + + // Only boost the op timeout if: + // - We are not already connected. Otherwise, we are governed by the connect timeout and the timer + // will be reset when/if connection occurs. This allows a relatively long connection spin up + // time for example if TLS is being used. + // - This is the first request on the pipeline. Otherwise the timeout would effectively start on + // the last operation. + if (connected_ && pending_requests_.size() == 1) { + connect_or_op_timer_->enableTimer(config_.opTimeout()); + } + + return &pending_requests_.back(); +} + +void ClientImpl::onConnectOrOpTimeout() { + putOutlierEvent(Upstream::Outlier::Result::TIMEOUT); + if (connected_) { + host_->cluster().stats().upstream_rq_timeout_.inc(); + host_->stats().rq_timeout_.inc(); + } else { + host_->cluster().stats().upstream_cx_connect_timeout_.inc(); + host_->stats().cx_connect_fail_.inc(); + } + + connection_->close(Network::ConnectionCloseType::NoFlush); +} + +void ClientImpl::onData(Buffer::Instance& data) { + try { + decoder_->decode(data); + } catch (ProtocolError&) { + putOutlierEvent(Upstream::Outlier::Result::REQUEST_FAILED); + host_->cluster().stats().upstream_cx_protocol_error_.inc(); + host_->stats().rq_error_.inc(); + connection_->close(Network::ConnectionCloseType::NoFlush); + } +} + +void ClientImpl::putOutlierEvent(Upstream::Outlier::Result result) { + if (!config_.disableOutlierEvents()) { + host_->outlierDetector().putResult(result); + } +} + +void ClientImpl::onEvent(Network::ConnectionEvent event) { + if (event == Network::ConnectionEvent::RemoteClose || + event == Network::ConnectionEvent::LocalClose) { + if (!pending_requests_.empty()) { + host_->cluster().stats().upstream_cx_destroy_with_active_rq_.inc(); + if (event == Network::ConnectionEvent::RemoteClose) { + putOutlierEvent(Upstream::Outlier::Result::SERVER_FAILURE); + host_->cluster().stats().upstream_cx_destroy_remote_with_active_rq_.inc(); + } + if (event == Network::ConnectionEvent::LocalClose) { + host_->cluster().stats().upstream_cx_destroy_local_with_active_rq_.inc(); + } + } + + while (!pending_requests_.empty()) { + PendingRequest& request = pending_requests_.front(); + if (!request.canceled_) { + request.callbacks_.onFailure(); + } else { + host_->cluster().stats().upstream_rq_cancelled_.inc(); + } + pending_requests_.pop_front(); + } + + connect_or_op_timer_->disableTimer(); + } else if (event == Network::ConnectionEvent::Connected) { + connected_ = true; + ASSERT(!pending_requests_.empty()); + connect_or_op_timer_->enableTimer(config_.opTimeout()); + } + + if (event == Network::ConnectionEvent::RemoteClose && !connected_) { + host_->cluster().stats().upstream_cx_connect_fail_.inc(); + host_->stats().cx_connect_fail_.inc(); + } +} + +void ClientImpl::onRespValue(RespValuePtr&& value) { + ASSERT(!pending_requests_.empty()); + PendingRequest& request = pending_requests_.front(); + + if (request.canceled_) { + host_->cluster().stats().upstream_rq_cancelled_.inc(); + } else if (config_.enableRedirection() && (value->type() == Common::Redis::RespType::Error)) { + std::vector err = StringUtil::splitToken(value->asString(), " ", false); + bool redirected = false; + if (err.size() == 3) { + if (err[0] == RedirectionResponse::get().MOVED || err[0] == RedirectionResponse::get().ASK) { + redirected = request.callbacks_.onRedirection(*value); + if (redirected) { + host_->cluster().stats().upstream_internal_redirect_succeeded_total_.inc(); + } else { + host_->cluster().stats().upstream_internal_redirect_failed_total_.inc(); + } + } + } + if (!redirected) { + request.callbacks_.onResponse(std::move(value)); + } + } else { + request.callbacks_.onResponse(std::move(value)); + } + + pending_requests_.pop_front(); + + // If there are no remaining ops in the pipeline we need to disable the timer. + // Otherwise we boost the timer since we are receiving responses and there are more to flush + // out. + if (pending_requests_.empty()) { + connect_or_op_timer_->disableTimer(); + } else { + connect_or_op_timer_->enableTimer(config_.opTimeout()); + } + + putOutlierEvent(Upstream::Outlier::Result::SUCCESS); +} + +ClientImpl::PendingRequest::PendingRequest(ClientImpl& parent, PoolCallbacks& callbacks) + : parent_(parent), callbacks_(callbacks) { + parent.host_->cluster().stats().upstream_rq_total_.inc(); + parent.host_->stats().rq_total_.inc(); + parent.host_->cluster().stats().upstream_rq_active_.inc(); + parent.host_->stats().rq_active_.inc(); +} + +ClientImpl::PendingRequest::~PendingRequest() { + parent_.host_->cluster().stats().upstream_rq_active_.dec(); + parent_.host_->stats().rq_active_.dec(); +} + +void ClientImpl::PendingRequest::cancel() { + // If we get a cancellation, we just mark the pending request as cancelled, and then we drop + // the response as it comes through. There is no reason to blow away the connection when the + // remote is already responding as fast as possible. + canceled_ = true; +} + +ClientFactoryImpl ClientFactoryImpl::instance_; + +ClientPtr ClientFactoryImpl::create(Upstream::HostConstSharedPtr host, + Event::Dispatcher& dispatcher, const Config& config) { + return ClientImpl::create(host, dispatcher, EncoderPtr{new EncoderImpl()}, decoder_factory_, + config); +} + +} // namespace Client +} // namespace Redis +} // namespace Common +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/common/redis/client_impl.h b/source/extensions/filters/network/common/redis/client_impl.h new file mode 100644 index 0000000000000..fd9b7b7af7b80 --- /dev/null +++ b/source/extensions/filters/network/common/redis/client_impl.h @@ -0,0 +1,141 @@ +#pragma once + +#include + +#include "envoy/config/filter/network/redis_proxy/v2/redis_proxy.pb.h" +#include "envoy/thread_local/thread_local.h" +#include "envoy/upstream/cluster_manager.h" + +#include "common/buffer/buffer_impl.h" +#include "common/common/hash.h" +#include "common/network/filter_impl.h" +#include "common/protobuf/utility.h" +#include "common/singleton/const_singleton.h" +#include "common/upstream/load_balancer_impl.h" + +#include "extensions/filters/network/common/redis/client.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Common { +namespace Redis { +namespace Client { + +// TODO(mattklein123): Circuit breaking +// TODO(rshriram): Fault injection + +struct RedirectionValues { + const std::string ASK = "ASK"; + const std::string MOVED = "MOVED"; +}; + +typedef ConstSingleton RedirectionResponse; + +class ConfigImpl : public Config { +public: + ConfigImpl( + const envoy::config::filter::network::redis_proxy::v2::RedisProxy::ConnPoolSettings& config); + + bool disableOutlierEvents() const override { return false; } + std::chrono::milliseconds opTimeout() const override { return op_timeout_; } + bool enableHashtagging() const override { return enable_hashtagging_; } + bool enableRedirection() const override { return enable_redirection_; } + uint32_t maxBufferSizeBeforeFlush() const override { return max_buffer_size_before_flush_; } + std::chrono::milliseconds bufferFlushTimeoutInMs() const override { + return buffer_flush_timeout_; + } + +private: + const std::chrono::milliseconds op_timeout_; + const bool enable_hashtagging_; + const bool enable_redirection_; + const uint32_t max_buffer_size_before_flush_; + const std::chrono::milliseconds buffer_flush_timeout_; +}; + +class ClientImpl : public Client, public DecoderCallbacks, public Network::ConnectionCallbacks { +public: + static ClientPtr create(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, + EncoderPtr&& encoder, DecoderFactory& decoder_factory, + const Config& config); + + ~ClientImpl(); + + // Client + void addConnectionCallbacks(Network::ConnectionCallbacks& callbacks) override { + connection_->addConnectionCallbacks(callbacks); + } + void close() override; + PoolRequest* makeRequest(const RespValue& request, PoolCallbacks& callbacks) override; + void flushBufferAndResetTimer(); + +private: + struct UpstreamReadFilter : public Network::ReadFilterBaseImpl { + UpstreamReadFilter(ClientImpl& parent) : parent_(parent) {} + + // Network::ReadFilter + Network::FilterStatus onData(Buffer::Instance& data, bool) override { + parent_.onData(data); + return Network::FilterStatus::Continue; + } + + ClientImpl& parent_; + }; + + struct PendingRequest : public PoolRequest { + PendingRequest(ClientImpl& parent, PoolCallbacks& callbacks); + ~PendingRequest(); + + // PoolRequest + void cancel() override; + + ClientImpl& parent_; + PoolCallbacks& callbacks_; + bool canceled_{}; + }; + + ClientImpl(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, EncoderPtr&& encoder, + DecoderFactory& decoder_factory, const Config& config); + void onConnectOrOpTimeout(); + void onData(Buffer::Instance& data); + void putOutlierEvent(Upstream::Outlier::Result result); + + // DecoderCallbacks + void onRespValue(RespValuePtr&& value) override; + + // Network::ConnectionCallbacks + void onEvent(Network::ConnectionEvent event) override; + void onAboveWriteBufferHighWatermark() override {} + void onBelowWriteBufferLowWatermark() override {} + + Upstream::HostConstSharedPtr host_; + Network::ClientConnectionPtr connection_; + EncoderPtr encoder_; + Buffer::OwnedImpl encoder_buffer_; + DecoderPtr decoder_; + const Config& config_; + std::list pending_requests_; + Event::TimerPtr connect_or_op_timer_; + bool connected_{}; + Event::TimerPtr flush_timer_; +}; + +class ClientFactoryImpl : public ClientFactory { +public: + // RedisProxy::ConnPool::ClientFactoryImpl + ClientPtr create(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, + const Config& config) override; + + static ClientFactoryImpl instance_; + +private: + DecoderFactoryImpl decoder_factory_; +}; + +} // namespace Client +} // namespace Redis +} // namespace Common +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/redis_proxy/codec.h b/source/extensions/filters/network/common/redis/codec.h similarity index 89% rename from source/extensions/filters/network/redis_proxy/codec.h rename to source/extensions/filters/network/common/redis/codec.h index 09a0b025f829a..dda00888c8008 100644 --- a/source/extensions/filters/network/redis_proxy/codec.h +++ b/source/extensions/filters/network/common/redis/codec.h @@ -10,7 +10,8 @@ namespace Envoy { namespace Extensions { namespace NetworkFilters { -namespace RedisProxy { +namespace Common { +namespace Redis { /** * All RESP types as defined here: https://redis.io/topics/protocol @@ -26,6 +27,11 @@ class RespValue { RespValue() : type_(RespType::Null) {} ~RespValue() { cleanup(); } + RespValue(const RespValue& other); // copy constructor + RespValue& operator=(const RespValue& other); // copy assignment + bool operator==(const RespValue& other) const; // test for equality, unit tests + bool operator!=(const RespValue& other) const { return !(*this == other); } + /** * Convert a RESP value to a string for debugging purposes. */ @@ -133,7 +139,8 @@ class ProtocolError : public EnvoyException { ProtocolError(const std::string& error) : EnvoyException(error) {} }; -} // namespace RedisProxy +} // namespace Redis +} // namespace Common } // namespace NetworkFilters } // namespace Extensions } // namespace Envoy diff --git a/source/extensions/filters/network/redis_proxy/codec_impl.cc b/source/extensions/filters/network/common/redis/codec_impl.cc similarity index 86% rename from source/extensions/filters/network/redis_proxy/codec_impl.cc rename to source/extensions/filters/network/common/redis/codec_impl.cc index ecd506a136f44..4c80b43ff5123 100644 --- a/source/extensions/filters/network/redis_proxy/codec_impl.cc +++ b/source/extensions/filters/network/common/redis/codec_impl.cc @@ -1,4 +1,4 @@ -#include "extensions/filters/network/redis_proxy/codec_impl.h" +#include "extensions/filters/network/common/redis/codec_impl.h" #include #include @@ -13,7 +13,8 @@ namespace Envoy { namespace Extensions { namespace NetworkFilters { -namespace RedisProxy { +namespace Common { +namespace Redis { std::string RespValue::toString() const { switch (type_) { @@ -115,6 +116,83 @@ void RespValue::type(RespType type) { } } +RespValue::RespValue(const RespValue& other) : type_(RespType::Null) { + this->type(other.type()); + switch (type_) { + case RespType::Array: { + this->asArray() = other.asArray(); + break; + } + case RespType::SimpleString: + case RespType::BulkString: + case RespType::Error: { + this->asString() = other.asString(); + break; + } + case RespType::Integer: { + this->asInteger() = other.asInteger(); + break; + } + case RespType::Null: + break; + } +} + +RespValue& RespValue::operator=(const RespValue& other) { + if (&other == this) { + return *this; + } + this->type(other.type()); + switch (type_) { + case RespType::Array: { + this->asArray() = other.asArray(); + break; + } + case RespType::SimpleString: + case RespType::BulkString: + case RespType::Error: { + this->asString() = other.asString(); + break; + } + case RespType::Integer: { + this->asInteger() = other.asInteger(); + break; + } + case RespType::Null: + break; + } + return *this; +} + +bool RespValue::operator==(const RespValue& other) const { + bool result = false; + if (type_ != other.type()) { + return result; + } + + switch (type_) { + case RespType::Array: { + result = (this->asArray() == other.asArray()); + break; + } + case RespType::SimpleString: + case RespType::BulkString: + case RespType::Error: { + result = (this->asString() == other.asString()); + break; + } + case RespType::Integer: { + result = (this->asInteger() == other.asInteger()); + break; + } + case RespType::Null: { + result = true; + break; + } + } + return result; +} + void DecoderImpl::decode(Buffer::Instance& data) { uint64_t num_slices = data.getRawSlices(nullptr, 0); STACK_ARRAY(slices, Buffer::RawSlice, num_slices); @@ -170,7 +248,9 @@ void DecoderImpl::parseSlice(const Buffer::RawSlice& slice) { pending_value_stack_.front().value_->type(RespType::Integer); break; } - default: { throw ProtocolError("invalid value type"); } + default: { + throw ProtocolError("invalid value type"); + } } remaining--; @@ -421,7 +501,8 @@ void EncoderImpl::encodeSimpleString(const std::string& string, Buffer::Instance out.add("\r\n", 2); } -} // namespace RedisProxy +} // namespace Redis +} // namespace Common } // namespace NetworkFilters } // namespace Extensions } // namespace Envoy diff --git a/source/extensions/filters/network/redis_proxy/codec_impl.h b/source/extensions/filters/network/common/redis/codec_impl.h similarity index 94% rename from source/extensions/filters/network/redis_proxy/codec_impl.h rename to source/extensions/filters/network/common/redis/codec_impl.h index 0c214baafdcec..678e537883f32 100644 --- a/source/extensions/filters/network/redis_proxy/codec_impl.h +++ b/source/extensions/filters/network/common/redis/codec_impl.h @@ -7,12 +7,13 @@ #include "common/common/logger.h" -#include "extensions/filters/network/redis_proxy/codec.h" +#include "extensions/filters/network/common/redis/codec.h" namespace Envoy { namespace Extensions { namespace NetworkFilters { -namespace RedisProxy { +namespace Common { +namespace Redis { /** * Decoder implementation of https://redis.io/topics/protocol @@ -91,7 +92,8 @@ class EncoderImpl : public Encoder { void encodeSimpleString(const std::string& string, Buffer::Instance& out); }; -} // namespace RedisProxy +} // namespace Redis +} // namespace Common } // namespace NetworkFilters } // namespace Extensions } // namespace Envoy diff --git a/source/extensions/filters/network/redis_proxy/supported_commands.h b/source/extensions/filters/network/common/redis/supported_commands.h similarity index 86% rename from source/extensions/filters/network/redis_proxy/supported_commands.h rename to source/extensions/filters/network/common/redis/supported_commands.h index 9561576cfb141..13210d62abb72 100644 --- a/source/extensions/filters/network/redis_proxy/supported_commands.h +++ b/source/extensions/filters/network/common/redis/supported_commands.h @@ -8,7 +8,8 @@ namespace Envoy { namespace Extensions { namespace NetworkFilters { -namespace RedisProxy { +namespace Common { +namespace Redis { struct SupportedCommands { /** @@ -25,9 +26,9 @@ struct SupportedCommands { "pexpireat", "psetex", "pttl", "restore", "rpop", "rpush", "rpushx", "sadd", "scard", "set", "setbit", "setex", "setnx", "setrange", "sismember", "smembers", "spop", "srandmember", "srem", "sscan", "strlen", "ttl", "type", "zadd", "zcard", "zcount", "zincrby", "zlexcount", - "zrange", "zrangebylex", "zrangebyscore", "zrank", "zrem", "zremrangebylex", - "zremrangebyrank", "zremrangebyscore", "zrevrange", "zrevrangebylex", "zrevrangebyscore", - "zrevrank", "zscan", "zscore"); + "zpopmin", "zpopmax", "zrange", "zrangebylex", "zrangebyscore", "zrank", "zrem", + "zremrangebylex", "zremrangebyrank", "zremrangebyscore", "zrevrange", "zrevrangebylex", + "zrevrangebyscore", "zrevrank", "zscan", "zscore"); } /** @@ -60,7 +61,8 @@ struct SupportedCommands { static const std::string& ping() { CONSTRUCT_ON_FIRST_USE(std::string, "ping"); } }; -} // namespace RedisProxy +} // namespace Redis +} // namespace Common } // namespace NetworkFilters } // namespace Extensions } // namespace Envoy diff --git a/source/extensions/filters/network/dubbo_proxy/BUILD b/source/extensions/filters/network/dubbo_proxy/BUILD index b95792467befa..46159802a9475 100644 --- a/source/extensions/filters/network/dubbo_proxy/BUILD +++ b/source/extensions/filters/network/dubbo_proxy/BUILD @@ -35,6 +35,7 @@ envoy_cc_library( deps = [ ":buffer_helper_lib", ":message_lib", + ":metadata_lib", "//source/common/common:assert_lib", "//source/common/config:utility_lib", "//source/common/singleton:const_singleton", @@ -61,6 +62,7 @@ envoy_cc_library( ], deps = [ ":message_lib", + ":metadata_lib", "//include/envoy/buffer:buffer_interface", "//source/common/common:assert_lib", "//source/common/config:utility_lib", @@ -87,42 +89,34 @@ envoy_cc_library( srcs = ["decoder.cc"], hdrs = ["decoder.h"], deps = [ + ":decoder_events_lib", ":dubbo_protocol_impl_lib", + ":heartbeat_response_lib", ":hessian_deserializer_impl_lib", "//source/common/buffer:buffer_lib", "//source/common/common:logger_lib", ], ) -envoy_cc_library( - name = "filter_lib", - srcs = ["filter.cc"], - hdrs = ["filter.h"], - deps = [ - ":decoder_lib", - ":stats_lib", - "//include/envoy/network:connection_interface", - "//include/envoy/network:filter_interface", - "//include/envoy/stats:stats_interface", - "//include/envoy/stats:stats_macros", - "//include/envoy/stats:timespan", - "//source/common/buffer:buffer_lib", - "//source/common/common:assert_lib", - "//source/common/common:logger_lib", - "//source/common/network:filter_lib", - "@envoy_api//envoy/config/filter/network/dubbo_proxy/v2alpha1:dubbo_proxy_cc", - ], -) - envoy_cc_library( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], deps = [ - ":filter_lib", + ":conn_manager_lib", "//include/envoy/registry", + "//include/envoy/stats:stats_interface", + "//include/envoy/stats:stats_macros", + "//source/common/common:utility_lib", + "//source/common/config:filter_json_lib", + "//source/common/config:utility_lib", "//source/extensions/filters/network:well_known_names", "//source/extensions/filters/network/common:factory_base_lib", + "//source/extensions/filters/network/dubbo_proxy/filters:factory_base_lib", + "//source/extensions/filters/network/dubbo_proxy/filters:well_known_names", + "//source/extensions/filters/network/dubbo_proxy/router:config", + "//source/extensions/filters/network/dubbo_proxy/router:route_matcher", + "//source/extensions/filters/network/dubbo_proxy/router:router_lib", "@envoy_api//envoy/config/filter/network/dubbo_proxy/v2alpha1:dubbo_proxy_cc", ], ) @@ -133,7 +127,6 @@ envoy_cc_library( external_deps = ["abseil_optional"], deps = [ ":message_lib", - ":protocol_interface", "//source/common/http:header_map_lib", ], ) @@ -161,3 +154,68 @@ envoy_cc_library( "//include/envoy/stats:stats_macros", ], ) + +envoy_cc_library( + name = "app_exception_lib", + srcs = ["app_exception.cc"], + hdrs = ["app_exception.h"], + deps = [ + ":deserializer_interface", + ":message_lib", + ":metadata_lib", + ":protocol_interface", + "//include/envoy/buffer:buffer_interface", + "//source/common/buffer:buffer_lib", + "//source/extensions/filters/network/dubbo_proxy/filters:filter_interface", + ], +) + +envoy_cc_library( + name = "heartbeat_response_lib", + srcs = ["heartbeat_response.cc"], + hdrs = ["heartbeat_response.h"], + deps = [ + ":deserializer_interface", + ":metadata_lib", + ":protocol_interface", + "//include/envoy/buffer:buffer_interface", + "//source/extensions/filters/network/dubbo_proxy/filters:filter_interface", + ], +) + +envoy_cc_library( + name = "conn_manager_lib", + srcs = [ + "active_message.cc", + "conn_manager.cc", + ], + hdrs = [ + "active_message.h", + "conn_manager.h", + ], + deps = [ + ":app_exception_lib", + ":decoder_events_lib", + ":decoder_lib", + ":dubbo_protocol_impl_lib", + ":heartbeat_response_lib", + ":hessian_deserializer_impl_lib", + ":stats_lib", + "//include/envoy/event:deferred_deletable", + "//include/envoy/event:dispatcher_interface", + "//include/envoy/network:connection_interface", + "//include/envoy/network:filter_interface", + "//include/envoy/stats:stats_interface", + "//include/envoy/stats:timespan", + "//source/common/buffer:buffer_lib", + "//source/common/buffer:watermark_buffer_lib", + "//source/common/common:assert_lib", + "//source/common/common:linked_object", + "//source/common/common:logger_lib", + "//source/common/network:filter_lib", + "//source/common/stream_info:stream_info_lib", + "//source/extensions/filters/network/dubbo_proxy/filters:filter_interface", + "//source/extensions/filters/network/dubbo_proxy/router:router_interface", + "@envoy_api//envoy/config/filter/network/dubbo_proxy/v2alpha1:dubbo_proxy_cc", + ], +) diff --git a/source/extensions/filters/network/dubbo_proxy/active_message.cc b/source/extensions/filters/network/dubbo_proxy/active_message.cc new file mode 100644 index 0000000000000..179c6852b3703 --- /dev/null +++ b/source/extensions/filters/network/dubbo_proxy/active_message.cc @@ -0,0 +1,421 @@ +#include "extensions/filters/network/dubbo_proxy/active_message.h" + +#include "extensions/filters/network/dubbo_proxy/app_exception.h" +#include "extensions/filters/network/dubbo_proxy/conn_manager.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace DubboProxy { + +// class ResponseDecoder +ResponseDecoder::ResponseDecoder(Buffer::Instance& buffer, DubboFilterStats& stats, + Network::Connection& connection, Deserializer& deserializer, + Protocol& protocol) + : response_buffer_(buffer), stats_(stats), response_connection_(connection), + decoder_(std::make_unique(protocol, deserializer, *this)), complete_(false) {} + +bool ResponseDecoder::onData(Buffer::Instance& data) { + ENVOY_LOG(debug, "dubbo response: the received reply data length is {}", data.length()); + + bool underflow = false; + decoder_->onData(data, underflow); + ASSERT(complete_ || underflow); + return complete_; +} + +Network::FilterStatus ResponseDecoder::transportBegin() { + stats_.response_.inc(); + response_buffer_.drain(response_buffer_.length()); + ProtocolDataPassthroughConverter::initProtocolConverter(response_buffer_); + + return Network::FilterStatus::Continue; +} + +Network::FilterStatus ResponseDecoder::transportEnd() { + if (response_connection_.state() != Network::Connection::State::Open) { + throw DownstreamConnectionCloseException("Downstream has closed or closing"); + } + + response_connection_.write(response_buffer_, false); + ENVOY_LOG(debug, + "dubbo response: the upstream response message has been forwarded to the downstream"); + return Network::FilterStatus::Continue; +} + +Network::FilterStatus ResponseDecoder::messageBegin(MessageType, int64_t, SerializationType) { + return Network::FilterStatus::Continue; +} + +Network::FilterStatus ResponseDecoder::messageEnd(MessageMetadataSharedPtr metadata) { + ASSERT(metadata->message_type() == MessageType::Response || + metadata->message_type() == MessageType::Exception); + ASSERT(metadata->response_status().has_value()); + + stats_.response_decoding_success_.inc(); + if (metadata->message_type() == MessageType::Exception) { + stats_.response_business_exception_.inc(); + } + + metadata_ = metadata; + switch (metadata->response_status().value()) { + case ResponseStatus::Ok: + stats_.response_success_.inc(); + break; + default: + stats_.response_error_.inc(); + ENVOY_LOG(error, "dubbo response status: {}", + static_cast(metadata->response_status().value())); + break; + } + + complete_ = true; + ENVOY_LOG(debug, "dubbo response: complete processing of upstream response messages, id is {}", + metadata->request_id()); + + return Network::FilterStatus::Continue; +} + +DecoderEventHandler* ResponseDecoder::newDecoderEventHandler() { return this; } + +// class ActiveMessageDecoderFilter +ActiveMessageDecoderFilter::ActiveMessageDecoderFilter(ActiveMessage& parent, + DubboFilters::DecoderFilterSharedPtr filter) + : parent_(parent), handle_(filter) {} + +uint64_t ActiveMessageDecoderFilter::requestId() const { return parent_.requestId(); } + +uint64_t ActiveMessageDecoderFilter::streamId() const { return parent_.streamId(); } + +const Network::Connection* ActiveMessageDecoderFilter::connection() const { + return parent_.connection(); +} + +void ActiveMessageDecoderFilter::continueDecoding() { + const Network::FilterStatus status = parent_.applyDecoderFilters(this); + if (status == Network::FilterStatus::Continue) { + // All filters have been executed for the current decoder state. + if (parent_.pending_transport_end()) { + // If the filter stack was paused during messageEnd, handle end-of-request details. + parent_.finalizeRequest(); + } + parent_.continueDecoding(); + } +} + +Router::RouteConstSharedPtr ActiveMessageDecoderFilter::route() { return parent_.route(); } + +SerializationType ActiveMessageDecoderFilter::downstreamSerializationType() const { + return parent_.downstreamSerializationType(); +} + +ProtocolType ActiveMessageDecoderFilter::downstreamProtocolType() const { + return parent_.downstreamProtocolType(); +} + +void ActiveMessageDecoderFilter::sendLocalReply(const DubboFilters::DirectResponse& response, + bool end_stream) { + parent_.sendLocalReply(response, end_stream); +} + +void ActiveMessageDecoderFilter::startUpstreamResponse(Deserializer& deserializer, + Protocol& protocol) { + parent_.startUpstreamResponse(deserializer, protocol); +} + +DubboFilters::UpstreamResponseStatus +ActiveMessageDecoderFilter::upstreamData(Buffer::Instance& buffer) { + return parent_.upstreamData(buffer); +} + +void ActiveMessageDecoderFilter::resetDownstreamConnection() { + parent_.resetDownstreamConnection(); +} + +void ActiveMessageDecoderFilter::resetStream() { parent_.resetStream(); } + +StreamInfo::StreamInfo& ActiveMessageDecoderFilter::streamInfo() { return parent_.streamInfo(); } + +// class ActiveMessage +ActiveMessage::ActiveMessage(ConnectionManager& parent) + : parent_(parent), request_timer_(std::make_unique( + parent_.stats().request_time_ms_, parent.time_system())), + request_id_(-1), stream_id_(parent.random_generator().random()), + stream_info_(parent.time_system()), pending_transport_end_(false), + local_response_sent_(false) { + parent_.stats().request_active_.inc(); + stream_info_.setDownstreamLocalAddress(parent_.connection().localAddress()); + stream_info_.setDownstreamRemoteAddress(parent_.connection().remoteAddress()); +} + +ActiveMessage::~ActiveMessage() { + parent_.stats().request_active_.dec(); + request_timer_->complete(); + for (auto& filter : decoder_filters_) { + filter->handler()->onDestroy(); + } + ENVOY_LOG(debug, "ActiveMessage::~ActiveMessage()"); +} + +Network::FilterStatus ActiveMessage::transportBegin() { + filter_action_ = [](DubboFilters::DecoderFilter* filter) -> Network::FilterStatus { + return filter->transportBegin(); + }; + + return this->applyDecoderFilters(nullptr); +} + +Network::FilterStatus ActiveMessage::transportEnd() { + filter_action_ = [](DubboFilters::DecoderFilter* filter) -> Network::FilterStatus { + return filter->transportEnd(); + }; + + Network::FilterStatus status = applyDecoderFilters(nullptr); + if (status == Network::FilterStatus::StopIteration) { + pending_transport_end_ = true; + return status; + } + + finalizeRequest(); + + ENVOY_LOG(debug, "dubbo request: complete processing of downstream request messages, id is {}", + request_id_); + + return status; +} + +Network::FilterStatus ActiveMessage::messageBegin(MessageType type, int64_t message_id, + SerializationType serialization_type) { + request_id_ = message_id; + filter_action_ = [type, message_id, serialization_type]( + DubboFilters::DecoderFilter* filter) -> Network::FilterStatus { + return filter->messageBegin(type, message_id, serialization_type); + }; + + return applyDecoderFilters(nullptr); +} + +Network::FilterStatus ActiveMessage::messageEnd(MessageMetadataSharedPtr metadata) { + ASSERT(metadata->message_type() == MessageType::Request || + metadata->message_type() == MessageType::Oneway); + + // Currently only hessian serialization is implemented. + ASSERT(metadata->serialization_type() == SerializationType::Hessian); + + ENVOY_LOG(debug, "dubbo request: start processing downstream request messages, id is {}", + metadata->request_id()); + + parent_.stats().request_decoding_success_.inc(); + + metadata_ = metadata; + filter_action_ = [metadata](DubboFilters::DecoderFilter* filter) -> Network::FilterStatus { + return filter->messageEnd(metadata); + }; + + return applyDecoderFilters(nullptr); +} + +Network::FilterStatus ActiveMessage::transferHeaderTo(Buffer::Instance& header_buf, size_t size) { + filter_action_ = [&header_buf, + size](DubboFilters::DecoderFilter* filter) -> Network::FilterStatus { + return filter->transferHeaderTo(header_buf, size); + }; + + // If a local reply is generated, the filter callback is skipped and + // the buffer data needs to be actively released. + if (local_response_sent_) { + header_buf.drain(size); + } + + return applyDecoderFilters(nullptr); +} + +Network::FilterStatus ActiveMessage::transferBodyTo(Buffer::Instance& body_buf, size_t size) { + filter_action_ = [&body_buf, size](DubboFilters::DecoderFilter* filter) -> Network::FilterStatus { + return filter->transferBodyTo(body_buf, size); + }; + + // If a local reply is generated, the filter callback is skipped and + // the buffer data needs to be actively released. + if (local_response_sent_) { + body_buf.drain(size); + } + + return applyDecoderFilters(nullptr); +} + +void ActiveMessage::finalizeRequest() { + pending_transport_end_ = false; + parent_.stats().request_.inc(); + bool is_one_way = false; + switch (metadata_->message_type()) { + case MessageType::Request: + parent_.stats().request_twoway_.inc(); + break; + case MessageType::Oneway: + parent_.stats().request_oneway_.inc(); + is_one_way = true; + break; + default: + break; + } + + if (local_response_sent_ || is_one_way) { + parent_.deferredMessage(*this); + } +} + +void ActiveMessage::createFilterChain() { + parent_.config().filterFactory().createFilterChain(*this); +} + +DubboProxy::Router::RouteConstSharedPtr ActiveMessage::route() { + if (cached_route_) { + return cached_route_.value(); + } + + if (metadata_ != nullptr) { + DubboProxy::Router::RouteConstSharedPtr route = + parent_.config().routerConfig().route(*metadata_, stream_id_); + cached_route_ = route; + return cached_route_.value(); + } + + return nullptr; +} + +Network::FilterStatus ActiveMessage::applyDecoderFilters(ActiveMessageDecoderFilter* filter) { + ASSERT(filter_action_ != nullptr); + + if (!local_response_sent_) { + std::list::iterator entry; + if (!filter) { + entry = decoder_filters_.begin(); + } else { + entry = std::next(filter->entry()); + } + + for (; entry != decoder_filters_.end(); entry++) { + const Network::FilterStatus status = filter_action_((*entry)->handler().get()); + if (local_response_sent_) { + break; + } + + if (status != Network::FilterStatus::Continue) { + return status; + } + } + } + + filter_action_ = nullptr; + + return Network::FilterStatus::Continue; +} + +void ActiveMessage::sendLocalReply(const DubboFilters::DirectResponse& response, bool end_stream) { + if (!metadata_) { + // If the sendLocalReply function is called before the messageEnd callback, + // metadata_ is nullptr, metadata object needs to be created in order to generate a local reply. + metadata_ = std::make_shared(); + } + metadata_->setRequestId(request_id_); + parent_.sendLocalReply(*metadata_, response, end_stream); + + if (end_stream) { + return; + } + + local_response_sent_ = true; +} + +void ActiveMessage::startUpstreamResponse(Deserializer& deserializer, Protocol& protocol) { + ENVOY_LOG(debug, "dubbo response: start upstream"); + + ASSERT(response_decoder_ == nullptr); + + // Create a response message decoder. + response_decoder_ = std::make_unique( + response_buffer_, parent_.stats(), parent_.connection(), deserializer, protocol); +} + +DubboFilters::UpstreamResponseStatus ActiveMessage::upstreamData(Buffer::Instance& buffer) { + ASSERT(response_decoder_ != nullptr); + + try { + if (response_decoder_->onData(buffer)) { + if (requestId() != response_decoder_->requestId()) { + throw EnvoyException(fmt::format("dubbo response: request ID is not equal, {}:{}", + requestId(), response_decoder_->requestId())); + } + + // Completed upstream response. + parent_.deferredMessage(*this); + return DubboFilters::UpstreamResponseStatus::Complete; + } + return DubboFilters::UpstreamResponseStatus::MoreData; + } catch (const DownstreamConnectionCloseException& ex) { + ENVOY_CONN_LOG(error, "dubbo response: exception ({})", parent_.connection(), ex.what()); + onReset(); + parent_.stats().response_error_caused_connection_close_.inc(); + return DubboFilters::UpstreamResponseStatus::Reset; + } catch (const EnvoyException& ex) { + ENVOY_CONN_LOG(error, "dubbo response: exception ({})", parent_.connection(), ex.what()); + parent_.stats().response_decoding_error_.inc(); + + onError(ex.what()); + return DubboFilters::UpstreamResponseStatus::Reset; + } +} + +void ActiveMessage::resetDownstreamConnection() { + parent_.connection().close(Network::ConnectionCloseType::NoFlush); +} + +void ActiveMessage::resetStream() { parent_.deferredMessage(*this); } + +uint64_t ActiveMessage::requestId() const { + return metadata_ != nullptr ? metadata_->request_id() : 0; +} + +uint64_t ActiveMessage::streamId() const { return stream_id_; } + +void ActiveMessage::continueDecoding() { parent_.continueDecoding(); } + +SerializationType ActiveMessage::downstreamSerializationType() const { + return parent_.downstreamSerializationType(); +} + +ProtocolType ActiveMessage::downstreamProtocolType() const { + return parent_.downstreamProtocolType(); +} + +StreamInfo::StreamInfo& ActiveMessage::streamInfo() { return stream_info_; } + +const Network::Connection* ActiveMessage::connection() const { return &parent_.connection(); } + +void ActiveMessage::addDecoderFilter(DubboFilters::DecoderFilterSharedPtr filter) { + ActiveMessageDecoderFilterPtr wrapper = + std::make_unique(*this, filter); + filter->setDecoderFilterCallbacks(*wrapper); + wrapper->moveIntoListBack(std::move(wrapper), decoder_filters_); +} + +void ActiveMessage::onReset() { parent_.deferredMessage(*this); } + +void ActiveMessage::onError(const std::string& what) { + if (!metadata_) { + // It's possible that an error occurred before the decoder generated metadata, + // and a metadata object needs to be created in order to generate a local reply. + metadata_ = std::make_shared(); + } + + ASSERT(metadata_); + ENVOY_LOG(error, "Bad response: {}", what); + sendLocalReply(AppException(ResponseStatus::BadResponse, what), false); + parent_.deferredMessage(*this); +} + +} // namespace DubboProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/dubbo_proxy/active_message.h b/source/extensions/filters/network/dubbo_proxy/active_message.h new file mode 100644 index 0000000000000..8ecda9ea74aa3 --- /dev/null +++ b/source/extensions/filters/network/dubbo_proxy/active_message.h @@ -0,0 +1,170 @@ +#pragma once + +#include "envoy/event/deferred_deletable.h" +#include "envoy/network/connection.h" +#include "envoy/network/filter.h" +#include "envoy/stats/timespan.h" + +#include "common/buffer/buffer_impl.h" +#include "common/common/linked_object.h" +#include "common/common/logger.h" +#include "common/stream_info/stream_info_impl.h" + +#include "extensions/filters/network/dubbo_proxy/decoder.h" +#include "extensions/filters/network/dubbo_proxy/decoder_event_handler.h" +#include "extensions/filters/network/dubbo_proxy/filters/filter.h" +#include "extensions/filters/network/dubbo_proxy/metadata.h" +#include "extensions/filters/network/dubbo_proxy/router/router.h" +#include "extensions/filters/network/dubbo_proxy/stats.h" + +#include "absl/types/optional.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace DubboProxy { + +class ConnectionManager; +class ActiveMessage; + +class ResponseDecoder : public DecoderCallbacks, + public DecoderEventHandler, + Logger::Loggable { +public: + ResponseDecoder(Buffer::Instance& buffer, DubboFilterStats& stats, + Network::Connection& connection, Deserializer& deserializer, Protocol& protocol); + ~ResponseDecoder() override = default; + + bool onData(Buffer::Instance& data); + + // DecoderEventHandler + Network::FilterStatus transportBegin() override; + Network::FilterStatus transportEnd() override; + Network::FilterStatus messageBegin(MessageType type, int64_t message_id, + SerializationType serialization_type) override; + Network::FilterStatus messageEnd(MessageMetadataSharedPtr metadata) override; + + // DecoderCallbacks + DecoderEventHandler* newDecoderEventHandler() override; + + uint64_t requestId() const { return metadata_ ? metadata_->request_id() : 0; } + +private: + Buffer::Instance& response_buffer_; + DubboFilterStats& stats_; + Network::Connection& response_connection_; + DecoderPtr decoder_; + MessageMetadataSharedPtr metadata_; + bool complete_ : 1; +}; + +typedef std::unique_ptr ResponseDecoderPtr; + +// Wraps a DecoderFilter and acts as the DecoderFilterCallbacks for the filter, enabling filter +// chain continuation. +class ActiveMessageDecoderFilter : public DubboFilters::DecoderFilterCallbacks, + public LinkedObject { +public: + ActiveMessageDecoderFilter(ActiveMessage& parent, DubboFilters::DecoderFilterSharedPtr filter); + ~ActiveMessageDecoderFilter() override = default; + + // DubboFilters::DecoderFilterCallbacks + uint64_t requestId() const override; + uint64_t streamId() const override; + const Network::Connection* connection() const override; + void continueDecoding() override; + DubboProxy::Router::RouteConstSharedPtr route() override; + SerializationType downstreamSerializationType() const override; + ProtocolType downstreamProtocolType() const override; + void sendLocalReply(const DubboFilters::DirectResponse& response, bool end_stream) override; + void startUpstreamResponse(Deserializer& deserializer, Protocol& protocol) override; + DubboFilters::UpstreamResponseStatus upstreamData(Buffer::Instance& buffer) override; + void resetDownstreamConnection() override; + StreamInfo::StreamInfo& streamInfo() override; + void resetStream() override; + + DubboFilters::DecoderFilterSharedPtr handler() { return handle_; } + +private: + ActiveMessage& parent_; + DubboFilters::DecoderFilterSharedPtr handle_; +}; + +typedef std::unique_ptr ActiveMessageDecoderFilterPtr; + +// ActiveMessage tracks downstream requests for which no response has been received. +class ActiveMessage : public LinkedObject, + public Event::DeferredDeletable, + public DecoderEventHandler, + public DubboFilters::DecoderFilterCallbacks, + public DubboFilters::FilterChainFactoryCallbacks, + Logger::Loggable { +public: + ActiveMessage(ConnectionManager& parent); + ~ActiveMessage() override; + + // Dubbo::FilterChainFactoryCallbacks + void addDecoderFilter(DubboFilters::DecoderFilterSharedPtr filter) override; + + // DecoderEventHandler + Network::FilterStatus transportBegin() override; + Network::FilterStatus transportEnd() override; + Network::FilterStatus messageBegin(MessageType type, int64_t message_id, + SerializationType serialization_type) override; + Network::FilterStatus messageEnd(MessageMetadataSharedPtr metadata) override; + Network::FilterStatus transferHeaderTo(Buffer::Instance& header_buf, size_t size) override; + Network::FilterStatus transferBodyTo(Buffer::Instance& body_buf, size_t size) override; + + // DubboFilters::DecoderFilterCallbacks + uint64_t requestId() const override; + uint64_t streamId() const override; + const Network::Connection* connection() const override; + void continueDecoding() override; + SerializationType downstreamSerializationType() const override; + ProtocolType downstreamProtocolType() const override; + StreamInfo::StreamInfo& streamInfo() override; + Router::RouteConstSharedPtr route() override; + void sendLocalReply(const DubboFilters::DirectResponse& response, bool end_stream) override; + void startUpstreamResponse(Deserializer& deserializer, Protocol& protocol) override; + DubboFilters::UpstreamResponseStatus upstreamData(Buffer::Instance& buffer) override; + void resetDownstreamConnection() override; + void resetStream() override; + + void createFilterChain(); + Network::FilterStatus applyDecoderFilters(ActiveMessageDecoderFilter* filter); + void finalizeRequest(); + void onReset(); + void onError(const std::string& what); + MessageMetadataSharedPtr metadata() const { return metadata_; } + bool pending_transport_end() const { return pending_transport_end_; } + +private: + ConnectionManager& parent_; + + MessageMetadataSharedPtr metadata_; + Stats::TimespanPtr request_timer_; + ResponseDecoderPtr response_decoder_; + + absl::optional cached_route_; + + std::list decoder_filters_; + std::function filter_action_; + + int32_t request_id_; + + // This value is used in the calculation of the weighted cluster. + uint64_t stream_id_; + StreamInfo::StreamInfoImpl stream_info_; + + Buffer::OwnedImpl response_buffer_; + + bool pending_transport_end_ : 1; + bool local_response_sent_ : 1; +}; + +typedef std::unique_ptr ActiveMessagePtr; + +} // namespace DubboProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/dubbo_proxy/app_exception.cc b/source/extensions/filters/network/dubbo_proxy/app_exception.cc new file mode 100644 index 0000000000000..8c35ce60c492a --- /dev/null +++ b/source/extensions/filters/network/dubbo_proxy/app_exception.cc @@ -0,0 +1,47 @@ +#include "extensions/filters/network/dubbo_proxy/app_exception.h" + +#include "common/buffer/buffer_impl.h" + +#include "extensions/filters/network/dubbo_proxy/message.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace DubboProxy { + +AppException::AppException(ResponseStatus status, const std::string& what) + : EnvoyException(what), status_(status), + response_type_(RpcResponseType::ResponseWithException) {} + +AppException::ResponseType AppException::encode(MessageMetadata& metadata, + DubboProxy::Protocol& protocol, + Deserializer& deserializer, + Buffer::Instance& buffer) const { + ASSERT(buffer.length() == 0); + + ENVOY_LOG(debug, "err {}", what()); + + // Serialize the response content to get the serialized response length. + const std::string& response = what(); + size_t serialized_body_size = deserializer.serializeRpcResult(buffer, response, response_type_); + + metadata.setResponseStatus(status_); + metadata.setMessageType(MessageType::Response); + + Buffer::OwnedImpl protocol_buffer; + if (!protocol.encode(protocol_buffer, serialized_body_size, metadata)) { + throw EnvoyException("failed to encode local reply message"); + } + + buffer.prepend(protocol_buffer); + + return DirectResponse::ResponseType::Exception; +} + +DownstreamConnectionCloseException::DownstreamConnectionCloseException(const std::string& what) + : EnvoyException(what) {} + +} // namespace DubboProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/dubbo_proxy/app_exception.h b/source/extensions/filters/network/dubbo_proxy/app_exception.h new file mode 100644 index 0000000000000..ae68fb47d5935 --- /dev/null +++ b/source/extensions/filters/network/dubbo_proxy/app_exception.h @@ -0,0 +1,36 @@ +#pragma once + +#include "envoy/common/exception.h" + +#include "extensions/filters/network/dubbo_proxy/deserializer.h" +#include "extensions/filters/network/dubbo_proxy/filters/filter.h" +#include "extensions/filters/network/dubbo_proxy/metadata.h" +#include "extensions/filters/network/dubbo_proxy/protocol.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace DubboProxy { + +struct AppException : public EnvoyException, + public DubboFilters::DirectResponse, + Logger::Loggable { + AppException(ResponseStatus status, const std::string& what); + AppException(const AppException& ex) = default; + + using ResponseType = DubboFilters::DirectResponse::ResponseType; + ResponseType encode(MessageMetadata& metadata, Protocol& protocol, Deserializer& deserializer, + Buffer::Instance& buffer) const override; + + const ResponseStatus status_; + const RpcResponseType response_type_; +}; + +struct DownstreamConnectionCloseException : public EnvoyException { + DownstreamConnectionCloseException(const std::string& what); +}; + +} // namespace DubboProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/dubbo_proxy/buffer_helper.h b/source/extensions/filters/network/dubbo_proxy/buffer_helper.h index 8b78cc7ff2875..d3020c39c0f96 100644 --- a/source/extensions/filters/network/dubbo_proxy/buffer_helper.h +++ b/source/extensions/filters/network/dubbo_proxy/buffer_helper.h @@ -10,60 +10,6 @@ namespace Extensions { namespace NetworkFilters { namespace DubboProxy { -/** - * BufferWrapper provides a partial implementation of Buffer::Instance that is sufficient for - * BufferHelper to read protocol data without draining the buffer's contents. - */ -class BufferWrapper : public Buffer::Instance { -public: - BufferWrapper(Buffer::Instance& underlying) : underlying_(underlying) {} - - uint64_t position() { return position_; } - - // Buffer::Instance - void copyOut(size_t start, uint64_t size, void* data) const override { - ASSERT(position_ + start + size <= underlying_.length()); - underlying_.copyOut(start + position_, size, data); - } - void drain(uint64_t size) override { - ASSERT(position_ + size <= underlying_.length()); - position_ += size; - } - uint64_t length() const override { - ASSERT(underlying_.length() >= position_); - return underlying_.length() - position_; - } - void* linearize(uint32_t size) override { - ASSERT(position_ + size <= underlying_.length()); - uint8_t* p = static_cast(underlying_.linearize(position_ + size)); - return p + position_; - } - - std::string toString() const override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - void add(const void*, uint64_t) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - void addBufferFragment(Buffer::BufferFragment&) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - void add(absl::string_view) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - void add(const Buffer::Instance&) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - void commit(Buffer::RawSlice*, uint64_t) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - void prepend(absl::string_view) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - void prepend(Instance&) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - uint64_t getRawSlices(Buffer::RawSlice*, uint64_t) const override { - NOT_IMPLEMENTED_GCOVR_EXCL_LINE; - } - void move(Buffer::Instance&) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - void move(Buffer::Instance&, uint64_t) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - Api::SysCallIntResult read(int, uint64_t) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - uint64_t reserve(uint64_t, Buffer::RawSlice*, uint64_t) override { - NOT_IMPLEMENTED_GCOVR_EXCL_LINE; - } - ssize_t search(const void*, uint64_t, size_t) const override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - Api::SysCallIntResult write(int) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - -private: - Buffer::Instance& underlying_; - uint64_t position_{0}; -}; - /** * BufferHelper provides buffer operations for reading bytes and numbers in the various encodings * used by protocols. diff --git a/source/extensions/filters/network/dubbo_proxy/config.cc b/source/extensions/filters/network/dubbo_proxy/config.cc index 15e7f6db81b8c..9ad115c44e39b 100644 --- a/source/extensions/filters/network/dubbo_proxy/config.cc +++ b/source/extensions/filters/network/dubbo_proxy/config.cc @@ -2,7 +2,12 @@ #include "envoy/registry/registry.h" -#include "extensions/filters/network/dubbo_proxy/filter.h" +#include "common/config/utility.h" + +#include "extensions/filters/network/dubbo_proxy/conn_manager.h" +#include "extensions/filters/network/dubbo_proxy/filters/factory_base.h" +#include "extensions/filters/network/dubbo_proxy/filters/well_known_names.h" +#include "extensions/filters/network/dubbo_proxy/stats.h" namespace Envoy { namespace Extensions { @@ -12,14 +17,11 @@ namespace DubboProxy { Network::FilterFactoryCb DubboProxyFilterConfigFactory::createFilterFactoryFromProtoTyped( const envoy::config::filter::network::dubbo_proxy::v2alpha1::DubboProxy& proto_config, Server::Configuration::FactoryContext& context) { - ASSERT(!proto_config.stat_prefix().empty()); - - const std::string stat_prefix = fmt::format("dubbo.{}.", proto_config.stat_prefix()); + std::shared_ptr filter_config(std::make_shared(proto_config, context)); - return [stat_prefix, &proto_config, &context](Network::FilterManager& filter_manager) -> void { - filter_manager.addFilter(std::make_shared( - stat_prefix, proto_config.protocol_type(), proto_config.serialization_type(), - context.scope(), context.dispatcher().timeSystem())); + return [filter_config, &context](Network::FilterManager& filter_manager) -> void { + filter_manager.addReadFilter(std::make_shared( + *filter_config, context.random(), context.dispatcher().timeSource())); }; } @@ -29,6 +31,109 @@ Network::FilterFactoryCb DubboProxyFilterConfigFactory::createFilterFactoryFromP REGISTER_FACTORY(DubboProxyFilterConfigFactory, Server::Configuration::NamedNetworkFilterConfigFactory); +class ProtocolTypeMapper { +public: + using ConfigProtocolType = envoy::config::filter::network::dubbo_proxy::v2alpha1::ProtocolType; + typedef absl::flat_hash_map ProtocolTypeMap; + + static ProtocolType lookupProtocolType(ConfigProtocolType config_type) { + const auto& iter = protocolTypeMap().find(config_type); + ASSERT(iter != protocolTypeMap().end()); + return iter->second; + } + +private: + static const ProtocolTypeMap& protocolTypeMap() { + CONSTRUCT_ON_FIRST_USE(ProtocolTypeMap, { + {ConfigProtocolType::Dubbo, ProtocolType::Dubbo}, + }); + } +}; + +class SerializationTypeMapper { +public: + using ConfigSerializationType = + envoy::config::filter::network::dubbo_proxy::v2alpha1::SerializationType; + typedef absl::flat_hash_map SerializationTypeMap; + + static SerializationType lookupSerializationType(ConfigSerializationType type) { + const auto& iter = serializationTypeMap().find(type); + ASSERT(iter != serializationTypeMap().end()); + return iter->second; + } + +private: + static const SerializationTypeMap& serializationTypeMap() { + CONSTRUCT_ON_FIRST_USE(SerializationTypeMap, + { + {ConfigSerializationType::Hessian2, SerializationType::Hessian}, + }); + } +}; + +// class ConfigImpl. +ConfigImpl::ConfigImpl(const DubboProxyConfig& config, + Server::Configuration::FactoryContext& context) + : context_(context), stats_prefix_(fmt::format("dubbo.{}.", config.stat_prefix())), + stats_(DubboFilterStats::generateStats(stats_prefix_, context_.scope())), + serialization_type_( + SerializationTypeMapper::lookupSerializationType(config.serialization_type())), + protocol_type_(ProtocolTypeMapper::lookupProtocolType(config.protocol_type())), + route_matcher_(std::make_unique(config.route_config())) { + if (config.dubbo_filters().empty()) { + ENVOY_LOG(debug, "using default router filter"); + + envoy::config::filter::network::dubbo_proxy::v2alpha1::DubboFilter router_config; + router_config.set_name(DubboFilters::DubboFilterNames::get().ROUTER); + registerFilter(router_config); + } else { + for (const auto& filter_config : config.dubbo_filters()) { + registerFilter(filter_config); + } + } +} + +void ConfigImpl::createFilterChain(DubboFilters::FilterChainFactoryCallbacks& callbacks) { + for (const DubboFilters::FilterFactoryCb& factory : filter_factories_) { + factory(callbacks); + } +} + +Router::RouteConstSharedPtr ConfigImpl::route(const MessageMetadata& metadata, + uint64_t random_value) const { + return route_matcher_->route(metadata, random_value); +} + +ProtocolPtr ConfigImpl::createProtocol() { + return NamedProtocolConfigFactory::getFactory(protocol_type_).createProtocol(); +} + +DeserializerPtr ConfigImpl::createDeserializer() { + return NamedDeserializerConfigFactory::getFactory(serialization_type_).createDeserializer(); +} + +void ConfigImpl::registerFilter(const DubboFilterConfig& proto_config) { + const std::string& string_name = proto_config.name(); + + ENVOY_LOG(debug, " dubbo filter #{}", filter_factories_.size()); + ENVOY_LOG(debug, " name: {}", string_name); + + const Json::ObjectSharedPtr filter_config = + MessageUtil::getJsonObjectFromMessage(proto_config.config()); + ENVOY_LOG(debug, " config: {}", filter_config->asJsonString()); + + auto& factory = + Envoy::Config::Utility::getAndCheckFactory( + string_name); + ProtobufTypes::MessagePtr message = factory.createEmptyConfigProto(); + Envoy::Config::Utility::translateOpaqueConfig(proto_config.config(), + ProtobufWkt::Struct::default_instance(), *message); + DubboFilters::FilterFactoryCb callback = + factory.createFilterFactoryFromProto(*message, stats_prefix_, context_); + + filter_factories_.push_back(callback); +} + } // namespace DubboProxy } // namespace NetworkFilters } // namespace Extensions diff --git a/source/extensions/filters/network/dubbo_proxy/config.h b/source/extensions/filters/network/dubbo_proxy/config.h index 0963ce0019494..076298217bf87 100644 --- a/source/extensions/filters/network/dubbo_proxy/config.h +++ b/source/extensions/filters/network/dubbo_proxy/config.h @@ -1,9 +1,15 @@ #pragma once +#include + #include "envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.pb.h" #include "envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.pb.validate.h" #include "extensions/filters/network/common/factory_base.h" +#include "extensions/filters/network/dubbo_proxy/conn_manager.h" +#include "extensions/filters/network/dubbo_proxy/filters/filter.h" +#include "extensions/filters/network/dubbo_proxy/router/route_matcher.h" +#include "extensions/filters/network/dubbo_proxy/router/router_impl.h" #include "extensions/filters/network/well_known_names.h" namespace Envoy { @@ -26,6 +32,44 @@ class DubboProxyFilterConfigFactory Server::Configuration::FactoryContext& context) override; }; +class ConfigImpl : public Config, + public Router::Config, + public DubboFilters::FilterChainFactory, + Logger::Loggable { +public: + using DubboProxyConfig = envoy::config::filter::network::dubbo_proxy::v2alpha1::DubboProxy; + using DubboFilterConfig = envoy::config::filter::network::dubbo_proxy::v2alpha1::DubboFilter; + + ConfigImpl(const DubboProxyConfig& config, Server::Configuration::FactoryContext& context); + ~ConfigImpl() override = default; + + // DubboFilters::FilterChainFactory + void createFilterChain(DubboFilters::FilterChainFactoryCallbacks& callbacks) override; + + // Router::Config + Router::RouteConstSharedPtr route(const MessageMetadata& metadata, + uint64_t random_value) const override; + + // Config + DubboFilterStats& stats() override { return stats_; } + DubboFilters::FilterChainFactory& filterFactory() override { return *this; } + Router::Config& routerConfig() override { return *this; } + ProtocolPtr createProtocol() override; + DeserializerPtr createDeserializer() override; + +private: + void registerFilter(const DubboFilterConfig& proto_config); + + Server::Configuration::FactoryContext& context_; + const std::string stats_prefix_; + DubboFilterStats stats_; + const SerializationType serialization_type_; + const ProtocolType protocol_type_; + std::unique_ptr route_matcher_; + + std::list filter_factories_; +}; + } // namespace DubboProxy } // namespace NetworkFilters } // namespace Extensions diff --git a/source/extensions/filters/network/dubbo_proxy/conn_manager.cc b/source/extensions/filters/network/dubbo_proxy/conn_manager.cc new file mode 100644 index 0000000000000..94f935df3ca64 --- /dev/null +++ b/source/extensions/filters/network/dubbo_proxy/conn_manager.cc @@ -0,0 +1,208 @@ +#include "extensions/filters/network/dubbo_proxy/conn_manager.h" + +#include + +#include "envoy/common/exception.h" + +#include "common/common/fmt.h" + +#include "extensions/filters/network/dubbo_proxy/app_exception.h" +#include "extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.h" +#include "extensions/filters/network/dubbo_proxy/heartbeat_response.h" +#include "extensions/filters/network/dubbo_proxy/hessian_deserializer_impl.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace DubboProxy { + +constexpr uint32_t BufferLimit = UINT32_MAX; + +ConnectionManager::ConnectionManager(Config& config, Runtime::RandomGenerator& random_generator, + TimeSource& time_system) + : config_(config), time_system_(time_system), stats_(config_.stats()), + random_generator_(random_generator), deserializer_(config.createDeserializer()), + protocol_(config.createProtocol()), + decoder_(std::make_unique(*protocol_.get(), *deserializer_.get(), *this)) {} + +Network::FilterStatus ConnectionManager::onData(Buffer::Instance& data, bool end_stream) { + ENVOY_LOG(trace, "dubbo: read {} bytes", data.length()); + request_buffer_.move(data); + dispatch(); + + if (end_stream) { + ENVOY_CONN_LOG(trace, "downstream half-closed", read_callbacks_->connection()); + + // Downstream has closed. Unless we're waiting for an upstream connection to complete a oneway + // request, close. The special case for oneway requests allows them to complete before the + // ConnectionManager is destroyed. + if (stopped_) { + ASSERT(!active_message_list_.empty()); + auto metadata = (*active_message_list_.begin())->metadata(); + if (metadata && metadata->message_type() == MessageType::Oneway) { + ENVOY_CONN_LOG(trace, "waiting for one-way completion", read_callbacks_->connection()); + half_closed_ = true; + return Network::FilterStatus::StopIteration; + } + } + + ENVOY_LOG(debug, "dubbo: end data processing"); + resetAllMessages(false); + read_callbacks_->connection().close(Network::ConnectionCloseType::FlushWrite); + } + + return Network::FilterStatus::StopIteration; +} + +Network::FilterStatus ConnectionManager::onNewConnection() { + return Network::FilterStatus::Continue; +} + +void ConnectionManager::initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) { + read_callbacks_ = &callbacks; + read_callbacks_->connection().addConnectionCallbacks(*this); + read_callbacks_->connection().enableHalfClose(true); + read_callbacks_->connection().setBufferLimits(BufferLimit); +} + +void ConnectionManager::onEvent(Network::ConnectionEvent event) { + resetAllMessages(event == Network::ConnectionEvent::LocalClose); +} + +void ConnectionManager::onAboveWriteBufferHighWatermark() { + ENVOY_CONN_LOG(debug, "onAboveWriteBufferHighWatermark", read_callbacks_->connection()); + read_callbacks_->connection().readDisable(true); +} + +void ConnectionManager::onBelowWriteBufferLowWatermark() { + ENVOY_CONN_LOG(debug, "onBelowWriteBufferLowWatermark", read_callbacks_->connection()); + read_callbacks_->connection().readDisable(false); +} + +DecoderEventHandler* ConnectionManager::newDecoderEventHandler() { + ENVOY_LOG(debug, "dubbo: create the new docoder event handler"); + + ActiveMessagePtr new_message(std::make_unique(*this)); + new_message->createFilterChain(); + new_message->moveIntoList(std::move(new_message), active_message_list_); + return (*active_message_list_.begin()).get(); +} + +void ConnectionManager::onHeartbeat(MessageMetadataSharedPtr metadata) { + stats_.request_event_.inc(); + + if (read_callbacks_->connection().state() != Network::Connection::State::Open) { + ENVOY_LOG(warn, "dubbo: downstream connection is closed or closing"); + return; + } + + metadata->setResponseStatus(ResponseStatus::Ok); + metadata->setMessageType(MessageType::Response); + metadata->setEventFlag(true); + + HeartbeatResponse heartbeat; + Buffer::OwnedImpl response_buffer; + heartbeat.encode(*metadata, *protocol_, *deserializer_, response_buffer); + + read_callbacks_->connection().write(response_buffer, false); +} + +void ConnectionManager::dispatch() { + if (0 == request_buffer_.length()) { + ENVOY_LOG(warn, "dubbo: it's empty data"); + return; + } + + if (stopped_) { + ENVOY_CONN_LOG(debug, "dubbo: dubbo filter stopped", read_callbacks_->connection()); + return; + } + + try { + bool underflow = false; + while (!underflow) { + Network::FilterStatus status = decoder_->onData(request_buffer_, underflow); + if (status == Network::FilterStatus::StopIteration) { + stopped_ = true; + break; + } + } + return; + } catch (const EnvoyException& ex) { + ENVOY_CONN_LOG(error, "dubbo error: {}", read_callbacks_->connection(), ex.what()); + read_callbacks_->connection().close(Network::ConnectionCloseType::NoFlush); + stats_.request_decoding_error_.inc(); + } + resetAllMessages(true); +} + +void ConnectionManager::sendLocalReply(MessageMetadata& metadata, + const DubboFilters::DirectResponse& response, + bool end_stream) { + if (read_callbacks_->connection().state() != Network::Connection::State::Open) { + return; + } + + Buffer::OwnedImpl buffer; + const DubboFilters::DirectResponse::ResponseType result = + response.encode(metadata, *protocol_, *deserializer_, buffer); + read_callbacks_->connection().write(buffer, end_stream); + + if (end_stream) { + read_callbacks_->connection().close(Network::ConnectionCloseType::FlushWrite); + } + + switch (result) { + case DubboFilters::DirectResponse::ResponseType::SuccessReply: + stats_.local_response_success_.inc(); + break; + case DubboFilters::DirectResponse::ResponseType::ErrorReply: + stats_.local_response_error_.inc(); + break; + case DubboFilters::DirectResponse::ResponseType::Exception: + stats_.local_response_business_exception_.inc(); + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + +void ConnectionManager::continueDecoding() { + ENVOY_CONN_LOG(debug, "dubbo filter continued", read_callbacks_->connection()); + stopped_ = false; + dispatch(); + + if (!stopped_ && half_closed_) { + // If we're half closed, but not stopped waiting for an upstream, + // reset any pending rpcs and close the connection. + resetAllMessages(false); + read_callbacks_->connection().close(Network::ConnectionCloseType::FlushWrite); + } +} + +void ConnectionManager::deferredMessage(ActiveMessage& message) { + if (!message.inserted()) { + return; + } + read_callbacks_->connection().dispatcher().deferredDelete( + message.removeFromList(active_message_list_)); +} + +void ConnectionManager::resetAllMessages(bool local_reset) { + while (!active_message_list_.empty()) { + if (local_reset) { + ENVOY_CONN_LOG(debug, "local close with active request", read_callbacks_->connection()); + stats_.cx_destroy_local_with_active_rq_.inc(); + } else { + ENVOY_CONN_LOG(debug, "remote close with active request", read_callbacks_->connection()); + stats_.cx_destroy_remote_with_active_rq_.inc(); + } + + active_message_list_.front()->onReset(); + } +} + +} // namespace DubboProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/dubbo_proxy/conn_manager.h b/source/extensions/filters/network/dubbo_proxy/conn_manager.h new file mode 100644 index 0000000000000..c46417862b8c6 --- /dev/null +++ b/source/extensions/filters/network/dubbo_proxy/conn_manager.h @@ -0,0 +1,107 @@ +#pragma once + +#include "envoy/common/time.h" +#include "envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.pb.h" +#include "envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.pb.validate.h" +#include "envoy/network/connection.h" +#include "envoy/network/filter.h" +#include "envoy/stats/scope.h" +#include "envoy/stats/stats.h" +#include "envoy/stats/stats_macros.h" +#include "envoy/stats/timespan.h" + +#include "common/common/logger.h" + +#include "extensions/filters/network/dubbo_proxy/active_message.h" +#include "extensions/filters/network/dubbo_proxy/decoder.h" +#include "extensions/filters/network/dubbo_proxy/decoder_event_handler.h" +#include "extensions/filters/network/dubbo_proxy/deserializer.h" +#include "extensions/filters/network/dubbo_proxy/filters/filter.h" +#include "extensions/filters/network/dubbo_proxy/protocol.h" +#include "extensions/filters/network/dubbo_proxy/stats.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace DubboProxy { + +/** + * Config is a configuration interface for ConnectionManager. + */ +class Config { +public: + virtual ~Config() = default; + + virtual DubboFilters::FilterChainFactory& filterFactory() PURE; + virtual DubboFilterStats& stats() PURE; + virtual ProtocolPtr createProtocol() PURE; + virtual DeserializerPtr createDeserializer() PURE; + virtual Router::Config& routerConfig() PURE; +}; + +// class ActiveMessagePtr; +class ConnectionManager : public Network::ReadFilter, + public Network::ConnectionCallbacks, + public DecoderCallbacks, + Logger::Loggable { +public: + using ConfigProtocolType = envoy::config::filter::network::dubbo_proxy::v2alpha1::ProtocolType; + using ConfigSerializationType = + envoy::config::filter::network::dubbo_proxy::v2alpha1::SerializationType; + + ConnectionManager(Config& config, Runtime::RandomGenerator& random_generator, + TimeSource& time_system); + ~ConnectionManager() override = default; + + // Network::ReadFilter + Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override; + Network::FilterStatus onNewConnection() override; + void initializeReadFilterCallbacks(Network::ReadFilterCallbacks&) override; + + // Network::ConnectionCallbacks + void onEvent(Network::ConnectionEvent) override; + void onAboveWriteBufferHighWatermark() override; + void onBelowWriteBufferLowWatermark() override; + + // DecoderCallbacks + DecoderEventHandler* newDecoderEventHandler() override; + void onHeartbeat(MessageMetadataSharedPtr metadata) override; + + DubboFilterStats& stats() const { return stats_; } + Network::Connection& connection() const { return read_callbacks_->connection(); } + TimeSource& time_system() const { return time_system_; } + Runtime::RandomGenerator& random_generator() const { return random_generator_; } + Config& config() const { return config_; } + SerializationType downstreamSerializationType() const { return deserializer_->type(); } + ProtocolType downstreamProtocolType() const { return protocol_->type(); } + + void continueDecoding(); + void deferredMessage(ActiveMessage& message); + void sendLocalReply(MessageMetadata& metadata, const DubboFilters::DirectResponse& response, + bool end_stream); + +private: + void dispatch(); + void resetAllMessages(bool local_reset); + + Buffer::OwnedImpl request_buffer_; + std::list active_message_list_; + + bool stopped_{false}; + bool half_closed_{false}; + + Config& config_; + TimeSource& time_system_; + DubboFilterStats& stats_; + Runtime::RandomGenerator& random_generator_; + + DeserializerPtr deserializer_; + ProtocolPtr protocol_; + DecoderPtr decoder_; + Network::ReadFilterCallbacks* read_callbacks_{}; +}; + +} // namespace DubboProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/dubbo_proxy/decoder.cc b/source/extensions/filters/network/dubbo_proxy/decoder.cc index d5f99b97f1a4b..c5be03feabbed 100644 --- a/source/extensions/filters/network/dubbo_proxy/decoder.cc +++ b/source/extensions/filters/network/dubbo_proxy/decoder.cc @@ -2,47 +2,179 @@ #include "common/common/macros.h" +#include "extensions/filters/network/dubbo_proxy/heartbeat_response.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { namespace DubboProxy { -Decoder::Decoder(ProtocolPtr&& protocol, DeserializerPtr&& deserializer, - DecoderCallbacks& decoder_callbacks) - : deserializer_(std::move(deserializer)), protocol_(std::move(protocol)), - decoder_callbacks_(decoder_callbacks) {} - -void Decoder::onData(Buffer::Instance& data) { - ENVOY_LOG(debug, "dubbo: {} bytes available", data.length()); - while (true) { - if (!decode_ended_) { - if (!protocol_->decode(data, &context_)) { - ENVOY_LOG(debug, "dubbo: need more data for {} protocol", protocol_->name()); - return; - } - - decode_ended_ = true; - ENVOY_LOG(debug, "dubbo: {} protocol decode ended", protocol_->name()); +DecoderStateMachine::DecoderStatus +DecoderStateMachine::onTransportBegin(Buffer::Instance& buffer, Protocol::Context& context) { + if (!protocol_.decode(buffer, &context, metadata_)) { + ENVOY_LOG(debug, "dubbo decoder: need more data for {} protocol", protocol_.name()); + return DecoderStatus(ProtocolState::WaitForData); + } + + if (context.is_heartbeat_) { + ENVOY_LOG(debug, "dubbo decoder: this is the {} heartbeat message", protocol_.name()); + buffer.drain(context.header_size_); + decoder_callbacks_.onHeartbeat(metadata_); + return DecoderStatus(ProtocolState::Done, Network::FilterStatus::Continue); + } else { + handler_ = decoder_callbacks_.newDecoderEventHandler(); + } + return DecoderStatus(ProtocolState::OnTransferHeaderTo, handler_->transportBegin()); +} + +DecoderStateMachine::DecoderStatus DecoderStateMachine::onTransportEnd() { + ENVOY_LOG(debug, "dubbo decoder: complete protocol processing"); + return DecoderStatus(ProtocolState::Done, handler_->transportEnd()); +} + +DecoderStateMachine::DecoderStatus DecoderStateMachine::onTransferHeaderTo(Buffer::Instance& buffer, + size_t length) { + ENVOY_LOG(debug, "dubbo decoder: transfer protocol header, buffer size {}, header size {}", + buffer.length(), length); + return DecoderStatus(ProtocolState::OnMessageBegin, handler_->transferHeaderTo(buffer, length)); +} + +DecoderStateMachine::DecoderStatus DecoderStateMachine::onTransferBodyTo(Buffer::Instance& buffer, + int32_t length) { + ENVOY_LOG(debug, "dubbo decoder: transfer protocol body, buffer size {}, body size {}", + buffer.length(), length); + return DecoderStatus(ProtocolState::OnTransportEnd, handler_->transferBodyTo(buffer, length)); +} + +DecoderStateMachine::DecoderStatus DecoderStateMachine::onMessageBegin() { + ENVOY_LOG(debug, "dubbo decoder: start deserializing messages, deserializer name {}", + deserializer_.name()); + return DecoderStatus(ProtocolState::OnMessageEnd, + handler_->messageBegin(metadata_->message_type(), metadata_->request_id(), + metadata_->serialization_type())); +} + +DecoderStateMachine::DecoderStatus DecoderStateMachine::onMessageEnd(Buffer::Instance& buffer, + int32_t message_size) { + ENVOY_LOG(debug, "dubbo decoder: expected body size is {}", message_size); + + if (buffer.length() < static_cast(message_size)) { + ENVOY_LOG(debug, "dubbo decoder: need more data for {} deserialization, current size {}", + deserializer_.name(), buffer.length()); + return DecoderStatus(ProtocolState::WaitForData); + } + + switch (metadata_->message_type()) { + case MessageType::Oneway: + case MessageType::Request: + deserializer_.deserializeRpcInvocation(buffer, message_size, metadata_); + break; + case MessageType::Response: { + auto info = deserializer_.deserializeRpcResult(buffer, message_size); + if (info->hasException()) { + metadata_->setMessageType(MessageType::Exception); } + break; + } + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + + ENVOY_LOG(debug, "dubbo decoder: ends the deserialization of the message"); + return DecoderStatus(ProtocolState::OnTransferBodyTo, handler_->messageEnd(metadata_)); +} - ENVOY_LOG(debug, "dubbo: expected body size is {}", context_.body_size_); +DecoderStateMachine::DecoderStatus DecoderStateMachine::handleState(Buffer::Instance& buffer) { + switch (state_) { + case ProtocolState::OnTransportBegin: + return onTransportBegin(buffer, context_); + case ProtocolState::OnTransferHeaderTo: + return onTransferHeaderTo(buffer, context_.header_size_); + case ProtocolState::OnMessageBegin: + return onMessageBegin(); + case ProtocolState::OnMessageEnd: + return onMessageEnd(buffer, context_.body_size_); + case ProtocolState::OnTransferBodyTo: + return onTransferBodyTo(buffer, context_.body_size_); + case ProtocolState::OnTransportEnd: + return onTransportEnd(); + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + +ProtocolState DecoderStateMachine::run(Buffer::Instance& buffer) { + while (state_ != ProtocolState::Done) { + ENVOY_LOG(trace, "dubbo decoder: state {}, {} bytes available", + ProtocolStateNameValues::name(state_), buffer.length()); - if (data.length() < context_.body_size_) { - ENVOY_LOG(debug, "dubbo: need more data for {} deserialization", deserializer_->name()); - return; + DecoderStatus s = handleState(buffer); + if (s.next_state_ == ProtocolState::WaitForData) { + return ProtocolState::WaitForData; } - if (context_.is_request_) { - decoder_callbacks_.onRpcInvocation( - deserializer_->deserializeRpcInvocation(data, context_.body_size_)); - ENVOY_LOG(debug, "dubbo: {} RpcInvocation deserialize ended", deserializer_->name()); - } else { - decoder_callbacks_.onRpcResult( - deserializer_->deserializeRpcResult(data, context_.body_size_)); - ENVOY_LOG(debug, "dubbo: {} RpcResult deserialize ended", deserializer_->name()); + state_ = s.next_state_; + + ASSERT(s.filter_status_.has_value()); + if (s.filter_status_.value() == Network::FilterStatus::StopIteration) { + return ProtocolState::StopIteration; } - decode_ended_ = false; } + + return state_; +} + +typedef std::unique_ptr DecoderStateMachinePtr; + +Decoder::Decoder(Protocol& protocol, Deserializer& deserializer, + DecoderCallbacks& decoder_callbacks) + : deserializer_(deserializer), protocol_(protocol), decoder_callbacks_(decoder_callbacks) {} + +Network::FilterStatus Decoder::onData(Buffer::Instance& data, bool& buffer_underflow) { + ENVOY_LOG(debug, "dubbo decoder: {} bytes available", data.length()); + buffer_underflow = false; + + if (!decode_started_) { + start(); + } + + ASSERT(state_machine_ != nullptr); + + ENVOY_LOG(debug, "dubbo decoder: protocol {}, state {}, {} bytes available", protocol_.name(), + ProtocolStateNameValues::name(state_machine_->currentState()), data.length()); + + ProtocolState rv = state_machine_->run(data); + switch (rv) { + case ProtocolState::WaitForData: + ENVOY_LOG(debug, "dubbo decoder: wait for data"); + buffer_underflow = true; + return Network::FilterStatus::Continue; + case ProtocolState::StopIteration: + ENVOY_LOG(debug, "dubbo decoder: wait for continuation"); + return Network::FilterStatus::StopIteration; + default: + break; + } + + ASSERT(rv == ProtocolState::Done); + + complete(); + buffer_underflow = (data.length() == 0); + ENVOY_LOG(debug, "dubbo decoder: data length {}", data.length()); + return Network::FilterStatus::Continue; +} + +void Decoder::start() { + metadata_ = std::make_shared(); + state_machine_ = std::make_unique(protocol_, deserializer_, metadata_, + decoder_callbacks_); + decode_started_ = true; +} + +void Decoder::complete() { + metadata_.reset(); + state_machine_.reset(); + decode_started_ = false; } } // namespace DubboProxy diff --git a/source/extensions/filters/network/dubbo_proxy/decoder.h b/source/extensions/filters/network/dubbo_proxy/decoder.h index b311877e71198..71bde4016c665 100644 --- a/source/extensions/filters/network/dubbo_proxy/decoder.h +++ b/source/extensions/filters/network/dubbo_proxy/decoder.h @@ -5,6 +5,7 @@ #include "common/buffer/buffer_impl.h" #include "common/common/logger.h" +#include "extensions/filters/network/dubbo_proxy/decoder_event_handler.h" #include "extensions/filters/network/dubbo_proxy/deserializer.h" #include "extensions/filters/network/dubbo_proxy/protocol.h" @@ -13,20 +14,108 @@ namespace Extensions { namespace NetworkFilters { namespace DubboProxy { -class DecoderCallbacks { +#define ALL_PROTOCOL_STATES(FUNCTION) \ + FUNCTION(StopIteration) \ + FUNCTION(WaitForData) \ + FUNCTION(OnTransportBegin) \ + FUNCTION(OnTransportEnd) \ + FUNCTION(OnMessageBegin) \ + FUNCTION(OnMessageEnd) \ + FUNCTION(OnTransferHeaderTo) \ + FUNCTION(OnTransferBodyTo) \ + FUNCTION(Done) + +/** + * ProtocolState represents a set of states used in a state machine to decode Dubbo requests + * and responses. + */ +enum class ProtocolState { ALL_PROTOCOL_STATES(GENERATE_ENUM) }; + +class ProtocolStateNameValues { +public: + static const std::string& name(ProtocolState state) { + size_t i = static_cast(state); + ASSERT(i < names().size()); + return names()[i]; + } + +private: + static const std::vector& names() { + CONSTRUCT_ON_FIRST_USE(std::vector, {ALL_PROTOCOL_STATES(GENERATE_STRING)}); + } +}; + +class DecoderStateMachine : public Logger::Loggable { public: - virtual ~DecoderCallbacks() {} - virtual void onRpcInvocation(RpcInvocationPtr&& invo) PURE; - virtual void onRpcResult(RpcResultPtr&& res) PURE; + DecoderStateMachine(Protocol& protocol, Deserializer& deserializer, + MessageMetadataSharedPtr& metadata, DecoderCallbacks& decoder_callbacks) + : protocol_(protocol), deserializer_(deserializer), metadata_(metadata), + decoder_callbacks_(decoder_callbacks), state_(ProtocolState::OnTransportBegin) {} + + /** + * Consumes as much data from the configured Buffer as possible and executes the decoding state + * machine. Returns ProtocolState::WaitForData if more data is required to complete processing of + * a message. Returns ProtocolState::Done when the end of a message is successfully processed. + * Once the Done state is reached, further invocations of run return immediately with Done. + * + * @param buffer a buffer containing the remaining data to be processed + * @return ProtocolState returns with ProtocolState::WaitForData or ProtocolState::Done + * @throw Envoy Exception if thrown by the underlying Protocol + */ + ProtocolState run(Buffer::Instance& buffer); + + /** + * @return the current ProtocolState + */ + ProtocolState currentState() const { return state_; } + + /** + * Set the current state. Used for testing only. + */ + void setCurrentState(ProtocolState state) { state_ = state; } + +private: + struct DecoderStatus { + DecoderStatus() = default; + DecoderStatus(ProtocolState next_state) : next_state_(next_state), filter_status_{} {}; + DecoderStatus(ProtocolState next_state, Network::FilterStatus filter_status) + : next_state_(next_state), filter_status_(filter_status){}; + + ProtocolState next_state_; + absl::optional filter_status_; + }; + + // These functions map directly to the matching ProtocolState values. Each returns the next state + // or ProtocolState::WaitForData if more data is required. + DecoderStatus onTransportBegin(Buffer::Instance& buffer, Protocol::Context& context); + DecoderStatus onTransportEnd(); + DecoderStatus onTransferHeaderTo(Buffer::Instance& buffer, size_t length); + DecoderStatus onTransferBodyTo(Buffer::Instance& buffer, int32_t length); + DecoderStatus onMessageBegin(); + DecoderStatus onMessageEnd(Buffer::Instance& buffer, int32_t message_size); + + // handleState delegates to the appropriate method based on state_. + DecoderStatus handleState(Buffer::Instance& buffer); + + Protocol& protocol_; + Deserializer& deserializer_; + MessageMetadataSharedPtr metadata_; + DecoderCallbacks& decoder_callbacks_; + + ProtocolState state_; + Protocol::Context context_; + + DecoderEventHandler* handler_; }; +typedef std::unique_ptr DecoderStateMachinePtr; + /** * Decoder encapsulates a configured and ProtocolPtr and SerializationPtr. */ class Decoder : public Logger::Loggable { public: - Decoder(ProtocolPtr&& protocol, DeserializerPtr&& deserializer, - DecoderCallbacks& decoder_callbacks); + Decoder(Protocol& protocol, Deserializer& deserializer, DecoderCallbacks& decoder_callbacks); /** * Drains data from the given buffer @@ -34,16 +123,20 @@ class Decoder : public Logger::Loggable { * @param data a Buffer containing Dubbo protocol data * @throw EnvoyException on Dubbo protocol errors */ - void onData(Buffer::Instance& data); + Network::FilterStatus onData(Buffer::Instance& data, bool& buffer_underflow); - const Deserializer& serializer() { return *deserializer_; } - const Protocol& protocol() { return *protocol_; } + const Deserializer& serializer() { return deserializer_; } + const Protocol& protocol() { return protocol_; } private: - DeserializerPtr deserializer_; - ProtocolPtr protocol_; - bool decode_ended_ = false; - Protocol::Context context_; + void start(); + void complete(); + + MessageMetadataSharedPtr metadata_; + Deserializer& deserializer_; + Protocol& protocol_; + DecoderStateMachinePtr state_machine_; + bool decode_started_ = false; DecoderCallbacks& decoder_callbacks_; }; diff --git a/source/extensions/filters/network/dubbo_proxy/deserializer.h b/source/extensions/filters/network/dubbo_proxy/deserializer.h index f842fcb1df8c5..2a153ecc23f31 100644 --- a/source/extensions/filters/network/dubbo_proxy/deserializer.h +++ b/source/extensions/filters/network/dubbo_proxy/deserializer.h @@ -10,6 +10,7 @@ #include "common/singleton/const_singleton.h" #include "extensions/filters/network/dubbo_proxy/message.h" +#include "extensions/filters/network/dubbo_proxy/metadata.h" namespace Envoy { namespace Extensions { @@ -30,7 +31,6 @@ class DeserializerNameValues { const DeserializerTypeNameMap deserializerTypeNameMap = { {SerializationType::Hessian, "hessian"}, - {SerializationType::Json, "json"}, }; const std::string& fromType(SerializationType type) const { @@ -52,7 +52,7 @@ typedef ConstSingleton DeserializerNames; */ class RpcInvocation { public: - virtual ~RpcInvocation() {} + virtual ~RpcInvocation() = default; virtual const std::string& getMethodName() const PURE; virtual const std::string& getServiceName() const PURE; virtual const std::string& getServiceVersion() const PURE; @@ -67,7 +67,7 @@ typedef std::unique_ptr RpcInvocationPtr; */ class RpcResult { public: - virtual ~RpcResult() {} + virtual ~RpcResult() = default; virtual bool hasException() const PURE; }; @@ -75,7 +75,7 @@ typedef std::unique_ptr RpcResultPtr; class Deserializer { public: - virtual ~Deserializer() {} + virtual ~Deserializer() = default; /** * Return this Deserializer's name * @@ -96,8 +96,8 @@ class Deserializer { * @body_size the complete RpcInvocation size * @throws EnvoyException if the data is not valid for this serialization */ - virtual RpcInvocationPtr deserializeRpcInvocation(Buffer::Instance& buffer, - size_t body_size) PURE; + virtual void deserializeRpcInvocation(Buffer::Instance& buffer, size_t body_size, + MessageMetadataSharedPtr metadata) PURE; /** * deserialize result of an rpc call * If successful, the RpcResult removed from the buffer @@ -107,6 +107,18 @@ class Deserializer { * @throws EnvoyException if the data is not valid for this serialization */ virtual RpcResultPtr deserializeRpcResult(Buffer::Instance& buffer, size_t body_size) PURE; + + /** + * serialize result of an rpc call + * If successful, the output_buffer is written to the serialized data + * + * @param output_buffer store the serialized data + * @param content the rpc response content + * @param type the rpc response type + * @return size_t the length of the serialized content + */ + virtual size_t serializeRpcResult(Buffer::Instance& output_buffer, const std::string& content, + RpcResponseType type) PURE; }; typedef std::unique_ptr DeserializerPtr; @@ -117,7 +129,7 @@ typedef std::unique_ptr DeserializerPtr; */ class NamedDeserializerConfigFactory { public: - virtual ~NamedDeserializerConfigFactory() {} + virtual ~NamedDeserializerConfigFactory() = default; /** * Create a particular Dubbo deserializer. diff --git a/source/extensions/filters/network/dubbo_proxy/deserializer_impl.cc b/source/extensions/filters/network/dubbo_proxy/deserializer_impl.cc index 353655af5f4ae..985c0d32fd977 100644 --- a/source/extensions/filters/network/dubbo_proxy/deserializer_impl.cc +++ b/source/extensions/filters/network/dubbo_proxy/deserializer_impl.cc @@ -3,15 +3,7 @@ namespace Envoy { namespace Extensions { namespace NetworkFilters { -namespace DubboProxy { - -RpcInvocationImpl::~RpcInvocationImpl() {} -RpcInvocationImpl::RpcInvocationImpl(const std::string& method_name, - const std::string& service_name, - const std::string& service_version) - : method_name_(method_name), service_name_(service_name), service_version_(service_version) {} - -} // namespace DubboProxy +namespace DubboProxy {} // namespace DubboProxy } // namespace NetworkFilters } // namespace Extensions } // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/dubbo_proxy/deserializer_impl.h b/source/extensions/filters/network/dubbo_proxy/deserializer_impl.h index a0f962c3cce13..252143c3454c7 100644 --- a/source/extensions/filters/network/dubbo_proxy/deserializer_impl.h +++ b/source/extensions/filters/network/dubbo_proxy/deserializer_impl.h @@ -7,21 +7,6 @@ namespace Extensions { namespace NetworkFilters { namespace DubboProxy { -class RpcInvocationImpl : public RpcInvocation { -public: - ~RpcInvocationImpl(); - RpcInvocationImpl(const std::string& method_name, const std::string& service_name, - const std::string& service_version); - virtual const std::string& getMethodName() const override { return method_name_; } - virtual const std::string& getServiceName() const override { return service_name_; } - virtual const std::string& getServiceVersion() const override { return service_version_; } - -private: - std::string method_name_; - std::string service_name_; - std::string service_version_; -}; - class RpcResultImpl : public RpcResult { public: RpcResultImpl() {} diff --git a/source/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.cc b/source/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.cc index 4682163ae01fd..f7b6f20a73f7e 100644 --- a/source/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.cc +++ b/source/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.cc @@ -53,29 +53,42 @@ bool isValidResponseStatus(ResponseStatus status) { return true; } -void RequestMessageImpl::fromBuffer(Buffer::Instance& data) { +void parseRequestInfoFromBuffer(Buffer::Instance& data, MessageMetadataSharedPtr metadata) { ASSERT(data.length() >= DubboProtocolImpl::MessageSize); uint8_t flag = data.peekInt(FlagOffset); - is_two_way_ = (flag & TwoWayMask) == TwoWayMask ? true : false; - type_ = static_cast(flag & SerializationTypeMask); - if (!isValidSerializationType(type_)) { + bool is_two_way = (flag & TwoWayMask) == TwoWayMask ? true : false; + SerializationType type = static_cast(flag & SerializationTypeMask); + if (!isValidSerializationType(type)) { throw EnvoyException( fmt::format("invalid dubbo message serialization type {}", - static_cast::type>(type_))); + static_cast::type>(type))); } + + if (!is_two_way) { + metadata->setMessageType(MessageType::Oneway); + } + + metadata->setSerializationType(type); } -void ResponseMessageImpl::fromBuffer(Buffer::Instance& buffer) { +void parseResponseInfoFromBuffer(Buffer::Instance& buffer, MessageMetadataSharedPtr metadata) { ASSERT(buffer.length() >= DubboProtocolImpl::MessageSize); - status_ = static_cast(buffer.peekInt(StatusOffset)); - if (!isValidResponseStatus(status_)) { + ResponseStatus status = static_cast(buffer.peekInt(StatusOffset)); + if (!isValidResponseStatus(status)) { throw EnvoyException( fmt::format("invalid dubbo message response status {}", - static_cast::type>(status_))); + static_cast::type>(status))); } + + metadata->setResponseStatus(status); } -bool DubboProtocolImpl::decode(Buffer::Instance& buffer, Protocol::Context* context) { +bool DubboProtocolImpl::decode(Buffer::Instance& buffer, Protocol::Context* context, + MessageMetadataSharedPtr metadata) { + if (!metadata) { + throw EnvoyException("invalid metadata parameter"); + } + if (buffer.length() < DubboProtocolImpl::MessageSize) { return false; } @@ -92,29 +105,52 @@ bool DubboProtocolImpl::decode(Buffer::Instance& buffer, Protocol::Context* cont int64_t request_id = buffer.peekBEInt(RequestIDOffset); int32_t body_size = buffer.peekBEInt(BodySizeOffset); - if (body_size > MaxBodySize || body_size <= 0) { + // The body size of the heartbeat message is zero. + if (body_size > MaxBodySize || body_size < 0) { throw EnvoyException(fmt::format("invalid dubbo message size {}", body_size)); } - context->body_size_ = body_size; + metadata->setMessageType(type); + metadata->setRequestId(request_id); if (type == MessageType::Request) { - RequestMessageImplPtr req = - std::make_unique(request_id, body_size, is_event); - req->fromBuffer(buffer); - context->is_request_ = true; - callbacks_.onRequestMessage(std::move(req)); + parseRequestInfoFromBuffer(buffer, metadata); } else { - ResponseMessageImplPtr res = - std::make_unique(request_id, body_size, is_event); - res->fromBuffer(buffer); - callbacks_.onResponseMessage(std::move(res)); + parseResponseInfoFromBuffer(buffer, metadata); } - buffer.drain(MessageSize); + context->header_size_ = DubboProtocolImpl::MessageSize; + context->body_size_ = body_size; + context->is_heartbeat_ = is_event; + return true; } +bool DubboProtocolImpl::encode(Buffer::Instance& buffer, int32_t body_size, + const MessageMetadata& metadata) { + switch (metadata.message_type()) { + case MessageType::Response: { + ASSERT(metadata.response_status().has_value()); + buffer.writeBEInt(MagicNumber); + uint8_t flag = static_cast(metadata.serialization_type()); + if (metadata.is_event()) { + ASSERT(0 == body_size); + flag = flag ^ EventMask; + } + buffer.writeByte(flag); + buffer.writeByte(static_cast(metadata.response_status().value())); + buffer.writeBEInt(metadata.request_id()); + buffer.writeBEInt(body_size); + return true; + } + case MessageType::Request: { + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + class DubboProtocolConfigFactory : public ProtocolFactoryBase { public: DubboProtocolConfigFactory() : ProtocolFactoryBase(ProtocolType::Dubbo) {} diff --git a/source/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.h b/source/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.h index 34de7dfa33292..6146df34f5114 100644 --- a/source/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.h +++ b/source/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.h @@ -7,77 +7,18 @@ namespace Extensions { namespace NetworkFilters { namespace DubboProxy { -class MessageImpl : public virtual Message { -public: - MessageImpl(int64_t request_id, int32_t body_size, bool is_event) - : request_id_(request_id), body_size_(body_size), is_event_(is_event) {} - virtual ~MessageImpl() {} - virtual int32_t bodySize() const override { return body_size_; } - - // Is a normal message or event - virtual bool isEvent() const override { return is_event_; } - - virtual int64_t requestId() const override { return request_id_; } - - virtual std::string toString() const override { - return fmt::format("body size:{}, is event:{}, request id: {}", body_size_, is_event_, - request_id_); - } - -protected: - int64_t request_id_; - int32_t body_size_; - bool is_event_; -}; - -class RequestMessageImpl : public MessageImpl, public RequestMessage { -public: - using MessageImpl::MessageImpl; - - virtual ~RequestMessageImpl() {} - void fromBuffer(Buffer::Instance& data); - virtual MessageType messageType() const override { return MessageType::Request; } - - virtual SerializationType serializationType() const override { return type_; } - - virtual bool isTwoWay() const override { return is_two_way_; } - -private: - SerializationType type_; - bool is_two_way_; -}; - -typedef std::unique_ptr RequestMessageImplPtr; - -class ResponseMessageImpl : public MessageImpl, public ResponseMessage { -public: - using MessageImpl::MessageImpl; - - virtual ~ResponseMessageImpl() {} - void fromBuffer(Buffer::Instance& data); - - virtual MessageType messageType() const override { return MessageType::Response; } - - virtual ResponseStatus responseStatus() const override { return status_; } - -private: - ResponseStatus status_; -}; - -typedef std::unique_ptr ResponseMessageImplPtr; - class DubboProtocolImpl : public Protocol { public: - DubboProtocolImpl(ProtocolCallbacks& callbacks) : callbacks_(callbacks) {} + DubboProtocolImpl() = default; const std::string& name() const override { return ProtocolNames::get().fromType(type()); } ProtocolType type() const override { return ProtocolType::Dubbo; } - virtual bool decode(Buffer::Instance& buffer, Protocol::Context* context) override; + bool decode(Buffer::Instance& buffer, Protocol::Context* context, + MessageMetadataSharedPtr metadata) override; + bool encode(Buffer::Instance& buffer, int32_t body_size, + const MessageMetadata& metadata) override; static constexpr uint8_t MessageSize = 16; static constexpr int32_t MaxBodySize = 16 * 1024 * 1024; - -private: - ProtocolCallbacks& callbacks_; }; } // namespace DubboProxy diff --git a/source/extensions/filters/network/dubbo_proxy/filter.cc b/source/extensions/filters/network/dubbo_proxy/filter.cc deleted file mode 100644 index 823b389143ef8..0000000000000 --- a/source/extensions/filters/network/dubbo_proxy/filter.cc +++ /dev/null @@ -1,238 +0,0 @@ -#include "extensions/filters/network/dubbo_proxy/filter.h" - -#include "envoy/common/exception.h" - -#include "common/common/fmt.h" - -#include "extensions/filters/network/dubbo_proxy/buffer_helper.h" -#include "extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.h" -#include "extensions/filters/network/dubbo_proxy/hessian_deserializer_impl.h" - -namespace Envoy { -namespace Extensions { -namespace NetworkFilters { -namespace DubboProxy { - -namespace { - -using ConfigProtocolType = envoy::config::filter::network::dubbo_proxy::v2alpha1::ProtocolType; - -typedef std::map ProtocolTypeMap; - -static const ProtocolTypeMap& protocolTypeMap() { - CONSTRUCT_ON_FIRST_USE(ProtocolTypeMap, { - {ConfigProtocolType::Dubbo, ProtocolType::Dubbo}, - }); -} - -ProtocolType lookupProtocolType(ConfigProtocolType config_type) { - const auto& iter = protocolTypeMap().find(config_type); - if (iter == protocolTypeMap().end()) { - throw EnvoyException(fmt::format( - "unknown protocol {}", - envoy::config::filter::network::dubbo_proxy::v2alpha1::ProtocolType_Name(config_type))); - } - return iter->second; -} - -using ConfigSerializationType = - envoy::config::filter::network::dubbo_proxy::v2alpha1::SerializationType; - -typedef std::map SerializationTypeMap; - -static const SerializationTypeMap& serializationTypeMap() { - CONSTRUCT_ON_FIRST_USE(SerializationTypeMap, - { - {ConfigSerializationType::Hessian2, SerializationType::Hessian}, - }); -} - -SerializationType lookupSerializationType(ConfigSerializationType type) { - const auto& iter = serializationTypeMap().find(type); - if (iter == serializationTypeMap().end()) { - throw EnvoyException(fmt::format( - "unknown deserializer {}", - envoy::config::filter::network::dubbo_proxy::v2alpha1::SerializationType_Name(type))); - } - - return iter->second; -} - -} // namespace - -Filter::Filter(const std::string& stat_prefix, ConfigProtocolType protocol_type, - ConfigSerializationType serialization_type, Stats::Scope& scope, - TimeSource& time_source) - : stats_(DubboFilterStats::generateStats(stat_prefix, scope)), - protocol_type_(lookupProtocolType(protocol_type)), - serialization_type_(lookupSerializationType(serialization_type)), time_source_(time_source) {} - -Filter::~Filter() = default; - -Network::FilterStatus Filter::onData(Buffer::Instance& data, bool) { - if (!sniffing_) { - if (request_buffer_.length() > 0) { - // Stopped sniffing during response (in onWrite). Make sure leftover request_buffer_ contents - // are at the start of data or the upstream will see a corrupted request. - request_buffer_.move(data); - data.move(request_buffer_); - ASSERT(request_buffer_.length() == 0); - } - - return Network::FilterStatus::Continue; - } - - ENVOY_LOG(trace, "dubbo: read {} bytes", data.length()); - request_buffer_.move(data); - - try { - if (!request_decoder_) { - request_decoder_ = createDecoder(*this); - } - - BufferWrapper wrapped(request_buffer_); - request_decoder_->onData(wrapped); - - // Move consumed portion of request back to data for the upstream to consume. - uint64_t pos = wrapped.position(); - if (pos > 0) { - data.move(request_buffer_, pos); - } - } catch (const EnvoyException& ex) { - ENVOY_LOG(error, "dubbo: error {}", ex.what()); - data.move(request_buffer_); - stats_.request_decoding_error_.inc(); - sniffing_ = false; - } - - return Network::FilterStatus::Continue; -} - -Network::FilterStatus Filter::onWrite(Buffer::Instance& data, bool) { - if (!sniffing_) { - if (response_buffer_.length() > 0) { - // Stopped sniffing during request (in onData). Make sure response_buffer_ contents are at the - // start of data or the downstream will see a corrupted response. - response_buffer_.move(data); - data.move(response_buffer_); - ASSERT(response_buffer_.length() == 0); - } - - return Network::FilterStatus::Continue; - } - - ENVOY_LOG(trace, "dubbo: wrote {} bytes", data.length()); - response_buffer_.move(data); - - try { - if (!response_decoder_) { - response_decoder_ = createDecoder(*this); - } - - BufferWrapper wrapped(response_buffer_); - response_decoder_->onData(wrapped); - - // Move consumed portion of response back to data for the downstream to consume. - uint64_t pos = wrapped.position(); - if (pos > 0) { - data.move(response_buffer_, pos); - } - } catch (const EnvoyException& ex) { - ENVOY_LOG(error, "dubbo: error {}", ex.what()); - data.move(response_buffer_); - stats_.response_decoding_error_.inc(); - sniffing_ = false; - } - - return Network::FilterStatus::Continue; -} - -void Filter::onEvent(Network::ConnectionEvent event) { - if (active_call_map_.empty()) { - return; - } - - if (event == Network::ConnectionEvent::RemoteClose) { - stats_.cx_destroy_local_with_active_rq_.inc(); - } - - if (event == Network::ConnectionEvent::LocalClose) { - stats_.cx_destroy_remote_with_active_rq_.inc(); - } -} - -void Filter::onRequestMessage(RequestMessagePtr&& message) { - ASSERT(message); - ASSERT(message->messageType() == MessageType::Request); - - stats_.request_.inc(); - message->isTwoWay() ? stats_.request_twoway_.inc() : stats_.request_oneway_.inc(); - - if (message->isEvent()) { - stats_.request_event_.inc(); - } - - ENVOY_LOG(debug, "dubbo request: started {} message", message->requestId()); - - // One-way messages do not receive responses. - if (!message->isTwoWay()) { - return; - } - - auto request = std::make_unique(*this, message->requestId()); - active_call_map_.emplace(message->requestId(), std::move(request)); -} - -void Filter::onResponseMessage(ResponseMessagePtr&& message) { - ASSERT(message); - ASSERT(message->messageType() == MessageType::Response); - - auto itor = active_call_map_.find(message->requestId()); - if (itor == active_call_map_.end()) { - throw EnvoyException(fmt::format("unknown request id {}", message->requestId())); - } - active_call_map_.erase(itor); - - ENVOY_LOG(debug, "dubbo response: ended {} message", message->requestId()); - - stats_.response_.inc(); - switch (message->responseStatus()) { - case ResponseStatus::Ok: - stats_.response_success_.inc(); - break; - default: - stats_.response_error_.inc(); - ENVOY_LOG(error, "dubbo response status: {}", static_cast(message->responseStatus())); - break; - } -} - -void Filter::onRpcInvocation(RpcInvocationPtr&& invo) { - ENVOY_LOG(debug, "dubbo request: method name is {}, service name is {}, service version {}", - invo->getMethodName(), invo->getServiceName(), invo->getServiceVersion()); -} - -void Filter::onRpcResult(RpcResultPtr&& res) { - if (res->hasException()) { - stats_.response_exception_.inc(); - } -} - -DecoderPtr Filter::createDecoder(ProtocolCallbacks& prot_callback) { - auto parser = createProtocol(prot_callback); - auto serializer = createDeserializer(); - return std::make_unique(std::move(parser), std::move(serializer), *this); -} - -ProtocolPtr Filter::createProtocol(ProtocolCallbacks& callback) { - return NamedProtocolConfigFactory::getFactory(protocol_type_).createProtocol(callback); -} - -DeserializerPtr Filter::createDeserializer() { - return NamedDeserializerConfigFactory::getFactory(serialization_type_).createDeserializer(); -} - -} // namespace DubboProxy -} // namespace NetworkFilters -} // namespace Extensions -} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/dubbo_proxy/filter.h b/source/extensions/filters/network/dubbo_proxy/filter.h deleted file mode 100644 index c8e24d0a58c44..0000000000000 --- a/source/extensions/filters/network/dubbo_proxy/filter.h +++ /dev/null @@ -1,108 +0,0 @@ -#pragma once - -#include "envoy/common/time.h" -#include "envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.pb.h" -#include "envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.pb.validate.h" -#include "envoy/network/connection.h" -#include "envoy/network/filter.h" -#include "envoy/stats/scope.h" -#include "envoy/stats/stats.h" -#include "envoy/stats/stats_macros.h" -#include "envoy/stats/timespan.h" - -#include "common/common/logger.h" - -#include "extensions/filters/network/dubbo_proxy/decoder.h" -#include "extensions/filters/network/dubbo_proxy/deserializer.h" -#include "extensions/filters/network/dubbo_proxy/protocol.h" -#include "extensions/filters/network/dubbo_proxy/stats.h" - -namespace Envoy { -namespace Extensions { -namespace NetworkFilters { -namespace DubboProxy { - -class Filter : public Network::Filter, - public Network::ConnectionCallbacks, - public ProtocolCallbacks, - public DecoderCallbacks, - Logger::Loggable { -public: - using ConfigProtocolType = envoy::config::filter::network::dubbo_proxy::v2alpha1::ProtocolType; - using ConfigSerializationType = - envoy::config::filter::network::dubbo_proxy::v2alpha1::SerializationType; - - Filter(const std::string& stat_prefix, ConfigProtocolType protocol_type, - ConfigSerializationType serialization_type, Stats::Scope& scope, TimeSource& time_source); - virtual ~Filter(); - - // Network::ReadFilter - Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override; - Network::FilterStatus onNewConnection() override { return Network::FilterStatus::Continue; } - void initializeReadFilterCallbacks(Network::ReadFilterCallbacks&) override {} - - // Network::WriteFilter - Network::FilterStatus onWrite(Buffer::Instance& data, bool end_stream) override; - - // Network::ConnectionCallbacks - void onEvent(Network::ConnectionEvent) override; - void onAboveWriteBufferHighWatermark() override {} - void onBelowWriteBufferLowWatermark() override {} - - // ProtocolCallbacks - void onRequestMessage(RequestMessagePtr&& message) override; - void onResponseMessage(ResponseMessagePtr&& message) override; - - // DecoderCallbacks - void onRpcInvocation(RpcInvocationPtr&& invo) override; - void onRpcResult(RpcResultPtr&& res) override; - -private: - DecoderPtr createDecoder(ProtocolCallbacks& prot_callback); - ProtocolPtr createProtocol(ProtocolCallbacks& callback); - DeserializerPtr createDeserializer(); - - // ActiveMessage tracks downstream requests for which no response has been received. - struct ActiveMessage { - ActiveMessage(Filter& parent, int32_t request_id) - : parent_(parent), request_timer_(new Stats::Timespan(parent_.stats_.request_time_ms_, - parent_.time_source_)), - request_id_(request_id) { - parent_.stats_.request_active_.inc(); - } - ~ActiveMessage() { - parent_.stats_.request_active_.dec(); - request_timer_->complete(); - } - - Filter& parent_; - Stats::TimespanPtr request_timer_; - const int32_t request_id_; - absl::optional success_{}; - }; - typedef std::unique_ptr ActiveMessagePtr; - - // Downstream request decoder, callbacks, and buffer. - DecoderPtr request_decoder_; - Buffer::OwnedImpl request_buffer_; - - // Upstream response decoder, callbacks, and buffer. - DecoderPtr response_decoder_; - Buffer::OwnedImpl response_buffer_; - - // List of active request messages. - std::unordered_map active_call_map_; - - bool sniffing_{true}; - DubboFilterStats stats_; - - ProtocolType protocol_type_; - SerializationType serialization_type_; - - TimeSource& time_source_; -}; - -} // namespace DubboProxy -} // namespace NetworkFilters -} // namespace Extensions -} // namespace Envoy diff --git a/source/extensions/filters/network/dubbo_proxy/filters/filter.h b/source/extensions/filters/network/dubbo_proxy/filters/filter.h index c6786b3d161b7..28e1fed8d37f6 100644 --- a/source/extensions/filters/network/dubbo_proxy/filters/filter.h +++ b/source/extensions/filters/network/dubbo_proxy/filters/filter.h @@ -132,6 +132,11 @@ class DecoderFilterCallbacks { * @return StreamInfo for logging purposes. */ virtual StreamInfo::StreamInfo& streamInfo() PURE; + + /** + * Reset the underlying stream. + */ + virtual void resetStream() PURE; }; /** diff --git a/source/extensions/filters/network/dubbo_proxy/heartbeat_response.cc b/source/extensions/filters/network/dubbo_proxy/heartbeat_response.cc new file mode 100644 index 0000000000000..f966f9f86f8d0 --- /dev/null +++ b/source/extensions/filters/network/dubbo_proxy/heartbeat_response.cc @@ -0,0 +1,27 @@ +#include "extensions/filters/network/dubbo_proxy/heartbeat_response.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace DubboProxy { + +DubboFilters::DirectResponse::ResponseType +HeartbeatResponse::encode(MessageMetadata& metadata, DubboProxy::Protocol& protocol, Deserializer&, + Buffer::Instance& buffer) const { + ASSERT(metadata.response_status().value() == ResponseStatus::Ok); + ASSERT(metadata.message_type() == MessageType::Response); + ASSERT(metadata.is_event()); + + const size_t serialized_body_size = 0; + if (!protocol.encode(buffer, serialized_body_size, metadata)) { + throw EnvoyException("failed to encode heartbeat message"); + } + + ENVOY_LOG(debug, "buffer length {}", buffer.length()); + return DirectResponse::ResponseType::SuccessReply; +} + +} // namespace DubboProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/dubbo_proxy/heartbeat_response.h b/source/extensions/filters/network/dubbo_proxy/heartbeat_response.h new file mode 100644 index 0000000000000..4f53691c7f9bb --- /dev/null +++ b/source/extensions/filters/network/dubbo_proxy/heartbeat_response.h @@ -0,0 +1,26 @@ +#pragma once + +#include "extensions/filters/network/dubbo_proxy/deserializer.h" +#include "extensions/filters/network/dubbo_proxy/filters/filter.h" +#include "extensions/filters/network/dubbo_proxy/metadata.h" +#include "extensions/filters/network/dubbo_proxy/protocol.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace DubboProxy { + +struct HeartbeatResponse : public DubboFilters::DirectResponse, + Logger::Loggable { + HeartbeatResponse() = default; + ~HeartbeatResponse() override = default; + + using ResponseType = DubboFilters::DirectResponse::ResponseType; + ResponseType encode(MessageMetadata& metadata, Protocol& protocol, Deserializer& deserializer, + Buffer::Instance& buffer) const override; +}; + +} // namespace DubboProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/dubbo_proxy/hessian_deserializer_impl.cc b/source/extensions/filters/network/dubbo_proxy/hessian_deserializer_impl.cc index c86a297c48f78..e095ee4fe9bb2 100644 --- a/source/extensions/filters/network/dubbo_proxy/hessian_deserializer_impl.cc +++ b/source/extensions/filters/network/dubbo_proxy/hessian_deserializer_impl.cc @@ -14,18 +14,9 @@ namespace Extensions { namespace NetworkFilters { namespace DubboProxy { -enum class RpcResponseType : uint8_t { - ResponseWithException = 0, - ResponseWithValue = 1, - ResponseWithNullValue = 2, - ResponseWithExceptionWithAttachments = 3, - ResponseValueWithAttachments = 4, - ResponseNullValueWithAttachments = 5, -}; - -RpcInvocationPtr HessianDeserializerImpl::deserializeRpcInvocation(Buffer::Instance& buffer, - size_t body_size) { - ASSERT(buffer.length() >= body_size); +void HessianDeserializerImpl::deserializeRpcInvocation(Buffer::Instance& buffer, size_t body_size, + MessageMetadataSharedPtr metadata) { + ASSERT(buffer.length() >= static_cast(body_size)); size_t total_size = 0, size; // TODO(zyfjeff): Add format checker std::string dubbo_version = HessianUtils::peekString(buffer, &size); @@ -37,12 +28,14 @@ RpcInvocationPtr HessianDeserializerImpl::deserializeRpcInvocation(Buffer::Insta std::string method_name = HessianUtils::peekString(buffer, &size, total_size); total_size = total_size + size; - if (body_size < total_size) { + if (static_cast(body_size) < total_size) { throw EnvoyException( fmt::format("RpcInvocation size({}) large than body size({})", total_size, body_size)); } - buffer.drain(body_size); - return std::make_unique(method_name, service_name, service_version); + + metadata->setServiceName(service_name); + metadata->setServiceVersion(service_version); + metadata->setMethodName(method_name); } RpcResultPtr HessianDeserializerImpl::deserializeRpcResult(Buffer::Instance& buffer, @@ -57,9 +50,9 @@ RpcResultPtr HessianDeserializerImpl::deserializeRpcResult(Buffer::Instance& buf switch (type) { case RpcResponseType::ResponseWithException: case RpcResponseType::ResponseWithExceptionWithAttachments: + case RpcResponseType::ResponseWithValue: result = std::make_unique(true); break; - case RpcResponseType::ResponseWithValue: case RpcResponseType::ResponseWithNullValue: has_value = false; FALLTHRU; @@ -81,10 +74,27 @@ RpcResultPtr HessianDeserializerImpl::deserializeRpcResult(Buffer::Instance& buf fmt::format("RpcResult is no value, but the rest of the body size({}) not equal 0", (body_size - total_size))); } - buffer.drain(body_size); + return result; } +size_t HessianDeserializerImpl::serializeRpcResult(Buffer::Instance& output_buffer, + const std::string& content, + RpcResponseType type) { + size_t origin_length = output_buffer.length(); + + // The serialized response type is compact int. + size_t serialized_size = HessianUtils::writeInt( + output_buffer, static_cast::type>(type)); + + // Serialized response content. + serialized_size += HessianUtils::writeString(output_buffer, content); + + ASSERT((output_buffer.length() - origin_length) == serialized_size); + + return serialized_size; +} + class HessianDeserializerConfigFactory : public DeserializerFactoryBase { public: HessianDeserializerConfigFactory() : DeserializerFactoryBase(SerializationType::Hessian) {} diff --git a/source/extensions/filters/network/dubbo_proxy/hessian_deserializer_impl.h b/source/extensions/filters/network/dubbo_proxy/hessian_deserializer_impl.h index b56c44fbf7774..0e3dbe363f9a2 100644 --- a/source/extensions/filters/network/dubbo_proxy/hessian_deserializer_impl.h +++ b/source/extensions/filters/network/dubbo_proxy/hessian_deserializer_impl.h @@ -14,9 +14,11 @@ class HessianDeserializerImpl : public Deserializer { return DeserializerNames::get().fromType(type()); } virtual SerializationType type() const override { return SerializationType::Hessian; } - virtual RpcInvocationPtr deserializeRpcInvocation(Buffer::Instance& buffer, - size_t body_size) override; + virtual void deserializeRpcInvocation(Buffer::Instance& buffer, size_t body_size, + MessageMetadataSharedPtr metadata) override; virtual RpcResultPtr deserializeRpcResult(Buffer::Instance& buffer, size_t body_size) override; + virtual size_t serializeRpcResult(Buffer::Instance& output_buffer, const std::string& content, + RpcResponseType type) override; }; } // namespace DubboProxy diff --git a/source/extensions/filters/network/dubbo_proxy/hessian_utils.cc b/source/extensions/filters/network/dubbo_proxy/hessian_utils.cc index 1dc6f55a2db9e..7d37531d71494 100644 --- a/source/extensions/filters/network/dubbo_proxy/hessian_utils.cc +++ b/source/extensions/filters/network/dubbo_proxy/hessian_utils.cc @@ -24,6 +24,54 @@ typename std::enable_if::value, T>::type leftShift(T left, uin return left << bit_number; } +inline void addByte(Buffer::Instance& buffer, const uint8_t value) { buffer.add(&value, 1); } + +void addSeq(Buffer::Instance& buffer, const std::initializer_list& values) { + for (const int8_t& value : values) { + buffer.add(&value, 1); + } +} + +size_t doWriteString(Buffer::Instance& instance, absl::string_view str_view) { + const size_t length = str_view.length(); + constexpr size_t str_max_length = 0xffff; + constexpr size_t two_octet_max_lenth = 1024; + + if (length < 32) { + addByte(instance, static_cast(length)); + instance.add(str_view.data(), str_view.length()); + return length + sizeof(uint8_t); + } + + if (length < two_octet_max_lenth) { + uint8_t code = length >> 8; // 0x30 + length / 0x100 must less than 0x34 + uint8_t remain = length & 0xff; + std::initializer_list values{static_cast(0x30 + code), remain}; + addSeq(instance, values); + instance.add(str_view.data(), str_view.length()); + return length + values.size(); + } + + if (length <= str_max_length) { + uint8_t code = length >> 8; + uint8_t remain = length & 0xff; + std::initializer_list values{'S', code, remain}; + addSeq(instance, values); + instance.add(str_view.data(), str_view.length()); + return length + values.size(); + } + + std::initializer_list values{0x52, 0xff, 0xff}; + addSeq(instance, values); + instance.add(str_view.data(), str_max_length); + size_t size = str_max_length + values.size(); + ASSERT(size == (str_max_length + values.size())); + + size_t child_size = + doWriteString(instance, str_view.substr(str_max_length, length - str_max_length)); + return child_size + size; +} + } // namespace /* @@ -516,6 +564,16 @@ std::string HessianUtils::readByte(Buffer::Instance& buffer) { return result; } +size_t HessianUtils::writeString(Buffer::Instance& buffer, absl::string_view str) { + return doWriteString(buffer, str); +} + +size_t HessianUtils::writeInt(Buffer::Instance& buffer, uint8_t value) { + // Compact int + buffer.writeByte(0x90 + value); + return sizeof(uint8_t); +} + } // namespace DubboProxy } // namespace NetworkFilters } // namespace Extensions diff --git a/source/extensions/filters/network/dubbo_proxy/hessian_utils.h b/source/extensions/filters/network/dubbo_proxy/hessian_utils.h index 39ca52cbbd701..88f250442cf20 100644 --- a/source/extensions/filters/network/dubbo_proxy/hessian_utils.h +++ b/source/extensions/filters/network/dubbo_proxy/hessian_utils.h @@ -35,6 +35,9 @@ class HessianUtils { static void readNull(Buffer::Instance& buffer); static std::chrono::milliseconds readDate(Buffer::Instance& buffer); static std::string readByte(Buffer::Instance& buffer); + + static size_t writeString(Buffer::Instance& buffer, absl::string_view str); + static size_t writeInt(Buffer::Instance& buffer, uint8_t value); }; } // namespace DubboProxy diff --git a/source/extensions/filters/network/dubbo_proxy/message.h b/source/extensions/filters/network/dubbo_proxy/message.h index de2c1163f0489..81ecdae6b2f22 100644 --- a/source/extensions/filters/network/dubbo_proxy/message.h +++ b/source/extensions/filters/network/dubbo_proxy/message.h @@ -20,6 +20,11 @@ enum class SerializationType : uint8_t { enum class MessageType : uint8_t { Response = 0, Request = 1, + Oneway = 2, + Exception = 3, + + // ATTENTION: MAKE SURE THIS REMAINS EQUAL TO THE LAST MESSAGE TYPE + LastMessageType = Exception, }; /** @@ -39,6 +44,15 @@ enum class ResponseStatus : uint8_t { ServerThreadpoolExhaustedError = 100, }; +enum class RpcResponseType : uint8_t { + ResponseWithException = 0, + ResponseWithValue = 1, + ResponseWithNullValue = 2, + ResponseWithExceptionWithAttachments = 3, + ResponseValueWithAttachments = 4, + ResponseNullValueWithAttachments = 5, +}; + class Message { public: virtual ~Message() {} diff --git a/source/extensions/filters/network/dubbo_proxy/metadata.h b/source/extensions/filters/network/dubbo_proxy/metadata.h index 6fad8767fb41a..d67cd21a4a6fd 100644 --- a/source/extensions/filters/network/dubbo_proxy/metadata.h +++ b/source/extensions/filters/network/dubbo_proxy/metadata.h @@ -8,7 +8,7 @@ #include "common/common/empty_string.h" #include "common/http/header_map_impl.h" -#include "extensions/filters/network/dubbo_proxy/protocol.h" +#include "extensions/filters/network/dubbo_proxy/message.h" #include "absl/types/optional.h" diff --git a/source/extensions/filters/network/dubbo_proxy/protocol.h b/source/extensions/filters/network/dubbo_proxy/protocol.h index a22c82dfd695a..26ae6f9c58d3c 100644 --- a/source/extensions/filters/network/dubbo_proxy/protocol.h +++ b/source/extensions/filters/network/dubbo_proxy/protocol.h @@ -11,6 +11,7 @@ #include "common/singleton/const_singleton.h" #include "extensions/filters/network/dubbo_proxy/message.h" +#include "extensions/filters/network/dubbo_proxy/metadata.h" namespace Envoy { namespace Extensions { @@ -56,7 +57,7 @@ typedef ConstSingleton ProtocolNames; */ class ProtocolCallbacks { public: - virtual ~ProtocolCallbacks() {} + virtual ~ProtocolCallbacks() = default; virtual void onRequestMessage(RequestMessagePtr&& req) PURE; virtual void onResponseMessage(ResponseMessagePtr&& res) PURE; }; @@ -69,9 +70,11 @@ class Protocol { struct Context { bool is_request_ = false; size_t body_size_ = 0; + size_t header_size_ = 0; + bool is_heartbeat_ = false; }; - virtual ~Protocol() {} - Protocol() {} + virtual ~Protocol() = default; + Protocol() = default; virtual const std::string& name() const PURE; /** @@ -84,12 +87,24 @@ class Protocol { * If successful, the message is removed from the buffer. * * @param buffer the currently buffered dubbo data. - * @param context save the meta data of current messages + * @param context save the meta data of current messages. + * @param metadata the meta data of current messages * @return bool true if a complete message was successfully consumed, false if more data * is required. * @throws EnvoyException if the data is not valid for this protocol. */ - virtual bool decode(Buffer::Instance& buffer, Context* context) PURE; + virtual bool decode(Buffer::Instance& buffer, Context* context, + MessageMetadataSharedPtr metadata) PURE; + + /* + * encodes the dubbo protocol message. + * + * @param buffer save the currently buffered dubbo data. + * @param metadata the meta data of dubbo protocol + * @return bool true if the protocol coding succeeds. + */ + virtual bool encode(Buffer::Instance& buffer, int32_t body_size, + const MessageMetadata& metadata) PURE; }; typedef std::unique_ptr ProtocolPtr; @@ -100,14 +115,13 @@ typedef std::unique_ptr ProtocolPtr; */ class NamedProtocolConfigFactory { public: - virtual ~NamedProtocolConfigFactory() {} + virtual ~NamedProtocolConfigFactory() = default; /** * Create a particular Dubbo protocol. - * @param callbacks the callbacks to be notified of protocol decodes. * @return protocol instance pointer. */ - virtual ProtocolPtr createProtocol(ProtocolCallbacks& callbacks) PURE; + virtual ProtocolPtr createProtocol() PURE; /** * @return std::string the identifying name for a particular implementation of Dubbo protocol @@ -130,9 +144,7 @@ class NamedProtocolConfigFactory { * ProtocolFactoryBase provides a template for a trivial NamedProtocolConfigFactory. */ template class ProtocolFactoryBase : public NamedProtocolConfigFactory { - ProtocolPtr createProtocol(ProtocolCallbacks& callbacks) override { - return std::make_unique(callbacks); - } + ProtocolPtr createProtocol() override { return std::make_unique(); } std::string name() override { return name_; } diff --git a/source/extensions/filters/network/dubbo_proxy/router/BUILD b/source/extensions/filters/network/dubbo_proxy/router/BUILD index 674713cb072e0..790e94d00e3a9 100644 --- a/source/extensions/filters/network/dubbo_proxy/router/BUILD +++ b/source/extensions/filters/network/dubbo_proxy/router/BUILD @@ -18,7 +18,7 @@ envoy_cc_library( ) envoy_cc_library( - name = "router_matcher", + name = "route_matcher", srcs = ["route_matcher.cc"], hdrs = ["route_matcher.h"], deps = [ @@ -32,3 +32,39 @@ envoy_cc_library( "@envoy_api//envoy/config/filter/network/dubbo_proxy/v2alpha1:dubbo_proxy_cc", ], ) + +envoy_cc_library( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + deps = [ + ":router_lib", + "//include/envoy/registry", + "//source/extensions/filters/network/dubbo_proxy/filters:factory_base_lib", + "//source/extensions/filters/network/dubbo_proxy/filters:filter_config_interface", + "//source/extensions/filters/network/dubbo_proxy/filters:well_known_names", + "@envoy_api//envoy/config/filter/dubbo/router/v2alpha1:router_cc", + ], +) + +envoy_cc_library( + name = "router_lib", + srcs = ["router_impl.cc"], + hdrs = ["router_impl.h"], + deps = [ + ":router_interface", + "//include/envoy/tcp:conn_pool_interface", + "//include/envoy/upstream:cluster_manager_interface", + "//include/envoy/upstream:load_balancer_interface", + "//include/envoy/upstream:thread_local_cluster_interface", + "//source/common/common:logger_lib", + "//source/common/http:header_utility_lib", + "//source/common/router:metadatamatchcriteria_lib", + "//source/common/upstream:load_balancer_lib", + "//source/extensions/filters/network/dubbo_proxy:app_exception_lib", + "//source/extensions/filters/network/dubbo_proxy:deserializer_interface", + "//source/extensions/filters/network/dubbo_proxy:protocol_interface", + "//source/extensions/filters/network/dubbo_proxy/filters:filter_interface", + "@envoy_api//envoy/config/filter/network/dubbo_proxy/v2alpha1:dubbo_proxy_cc", + ], +) diff --git a/source/extensions/filters/network/dubbo_proxy/router/config.cc b/source/extensions/filters/network/dubbo_proxy/router/config.cc new file mode 100644 index 0000000000000..4e4382a61bc3f --- /dev/null +++ b/source/extensions/filters/network/dubbo_proxy/router/config.cc @@ -0,0 +1,31 @@ +#include "extensions/filters/network/dubbo_proxy/router/config.h" + +#include "envoy/registry/registry.h" + +#include "extensions/filters/network/dubbo_proxy/router/router_impl.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace DubboProxy { +namespace Router { + +DubboFilters::FilterFactoryCb RouterFilterConfig::createFilterFactoryFromProtoTyped( + const envoy::config::filter::dubbo::router::v2alpha1::Router&, const std::string&, + Server::Configuration::FactoryContext& context) { + return [&context](DubboFilters::FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addDecoderFilter(std::make_shared(context.clusterManager())); + }; +} + +/** + * Static registration for the router filter. @see RegisterFactory. + */ +static Registry::RegisterFactory + register_; + +} // namespace Router +} // namespace DubboProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/dubbo_proxy/router/config.h b/source/extensions/filters/network/dubbo_proxy/router/config.h new file mode 100644 index 0000000000000..88145b4e21946 --- /dev/null +++ b/source/extensions/filters/network/dubbo_proxy/router/config.h @@ -0,0 +1,30 @@ +#pragma once + +#include "envoy/config/filter/dubbo/router/v2alpha1/router.pb.h" +#include "envoy/config/filter/dubbo/router/v2alpha1/router.pb.validate.h" + +#include "extensions/filters/network/dubbo_proxy/filters/factory_base.h" +#include "extensions/filters/network/dubbo_proxy/filters/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace DubboProxy { +namespace Router { + +class RouterFilterConfig + : public DubboFilters::FactoryBase { +public: + RouterFilterConfig() : FactoryBase(DubboFilters::DubboFilterNames::get().ROUTER) {} + +private: + DubboFilters::FilterFactoryCb createFilterFactoryFromProtoTyped( + const envoy::config::filter::dubbo::router::v2alpha1::Router& proto_config, + const std::string& stat_prefix, Server::Configuration::FactoryContext& context) override; +}; + +} // namespace Router +} // namespace DubboProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/dubbo_proxy/router/route_matcher.cc b/source/extensions/filters/network/dubbo_proxy/router/route_matcher.cc index 5b08076fe31e5..512b38e433989 100644 --- a/source/extensions/filters/network/dubbo_proxy/router/route_matcher.cc +++ b/source/extensions/filters/network/dubbo_proxy/router/route_matcher.cc @@ -64,15 +64,15 @@ ParameterRouteEntryImpl::ParameterRouteEntryImpl( ParameterRouteEntryImpl::~ParameterRouteEntryImpl() {} -bool ParameterRouteEntryImpl::matchParameter(const std::string& request_data, +bool ParameterRouteEntryImpl::matchParameter(absl::string_view request_data, const ParameterData& config_data) const { switch (config_data.match_type_) { case Http::HeaderUtility::HeaderMatchType::Value: return config_data.value_.empty() || request_data == config_data.value_; case Http::HeaderUtility::HeaderMatchType::Range: { int64_t value = 0; - return StringUtil::atol(request_data.c_str(), value, 10) && - value >= config_data.range_.start() && value < config_data.range_.end(); + return absl::SimpleAtoi(request_data, &value) && value >= config_data.range_.start() && + value < config_data.range_.end(); } default: NOT_REACHED_GCOVR_EXCL_LINE; diff --git a/source/extensions/filters/network/dubbo_proxy/router/route_matcher.h b/source/extensions/filters/network/dubbo_proxy/router/route_matcher.h index 54c4b9b9ce3c6..0cb6a27241735 100644 --- a/source/extensions/filters/network/dubbo_proxy/router/route_matcher.h +++ b/source/extensions/filters/network/dubbo_proxy/router/route_matcher.h @@ -34,7 +34,7 @@ class RouteEntryImplBase : public RouteEntry, // Router::RouteEntry const std::string& clusterName() const override; const Envoy::Router::MetadataMatchCriteria* metadataMatchCriteria() const override { - NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + return metadata_match_criteria_.get(); } // Router::Route @@ -107,7 +107,7 @@ class ParameterRouteEntryImpl : public RouteEntryImplBase { uint64_t random_value) const override; private: - bool matchParameter(const std::string& request_data, const ParameterData& config_data) const; + bool matchParameter(absl::string_view request_data, const ParameterData& config_data) const; std::vector parameter_data_list_; }; diff --git a/source/extensions/filters/network/dubbo_proxy/router/router_impl.cc b/source/extensions/filters/network/dubbo_proxy/router/router_impl.cc new file mode 100644 index 0000000000000..7788ed82befc0 --- /dev/null +++ b/source/extensions/filters/network/dubbo_proxy/router/router_impl.cc @@ -0,0 +1,354 @@ +#include "extensions/filters/network/dubbo_proxy/router/router_impl.h" + +#include "envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.pb.h" +#include "envoy/upstream/cluster_manager.h" +#include "envoy/upstream/thread_local_cluster.h" + +#include "extensions/filters/network/dubbo_proxy/app_exception.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace DubboProxy { +namespace Router { + +void Router::onDestroy() { + if (upstream_request_) { + upstream_request_->resetStream(); + } + cleanup(); +} + +void Router::setDecoderFilterCallbacks(DubboFilters::DecoderFilterCallbacks& callbacks) { + callbacks_ = &callbacks; +} + +Network::FilterStatus Router::transportBegin() { + upstream_request_buffer_.drain(upstream_request_buffer_.length()); + ProtocolDataPassthroughConverter::initProtocolConverter(upstream_request_buffer_); + return Network::FilterStatus::Continue; +} + +Network::FilterStatus Router::transportEnd() { + // If the connection fails, the callback of the filter will be suspended, + // so it is impossible to call the transportEnd interface. + // the encodeData function will be called only if the connection is successful. + ASSERT(upstream_request_); + ASSERT(upstream_request_->conn_data_); + + upstream_request_->encodeData(upstream_request_buffer_); + + if (upstream_request_->metadata_->message_type() == MessageType::Oneway) { + // No response expected + upstream_request_->onResponseComplete(); + cleanup(); + ENVOY_LOG(debug, "dubbo upstream request: the message is one-way and no response is required"); + } + + filter_complete_ = true; + + return Network::FilterStatus::Continue; +} + +Network::FilterStatus Router::messageBegin(MessageType, int64_t, SerializationType) { + return Network::FilterStatus::Continue; +} + +Network::FilterStatus Router::messageEnd(MessageMetadataSharedPtr metadata) { + route_ = callbacks_->route(); + if (!route_) { + ENVOY_STREAM_LOG(debug, "dubbo router: no cluster match for interface '{}'", *callbacks_, + metadata->service_name()); + callbacks_->sendLocalReply(AppException(ResponseStatus::ServiceNotFound, + fmt::format("dubbo router: no route for interface '{}'", + metadata->service_name())), + false); + return Network::FilterStatus::StopIteration; + } + + route_entry_ = route_->routeEntry(); + + Upstream::ThreadLocalCluster* cluster = cluster_manager_.get(route_entry_->clusterName()); + if (!cluster) { + ENVOY_STREAM_LOG(debug, "dubbo router: unknown cluster '{}'", *callbacks_, + route_entry_->clusterName()); + callbacks_->sendLocalReply( + AppException(ResponseStatus::ServerError, fmt::format("dubbo router: unknown cluster '{}'", + route_entry_->clusterName())), + false); + return Network::FilterStatus::StopIteration; + } + + cluster_ = cluster->info(); + ENVOY_STREAM_LOG(debug, "dubbo router: cluster '{}' match for interface '{}'", *callbacks_, + route_entry_->clusterName(), metadata->service_name()); + + if (cluster_->maintenanceMode()) { + callbacks_->sendLocalReply( + AppException(ResponseStatus::ServerError, + fmt::format("dubbo router: maintenance mode for cluster '{}'", + route_entry_->clusterName())), + false); + return Network::FilterStatus::StopIteration; + } + + Tcp::ConnectionPool::Instance* conn_pool = cluster_manager_.tcpConnPoolForCluster( + route_entry_->clusterName(), Upstream::ResourcePriority::Default, this, nullptr); + if (!conn_pool) { + callbacks_->sendLocalReply( + AppException( + ResponseStatus::ServerError, + fmt::format("dubbo router: no healthy upstream for '{}'", route_entry_->clusterName())), + false); + return Network::FilterStatus::StopIteration; + } + + ENVOY_STREAM_LOG(debug, "dubbo router: decoding request", *callbacks_); + + upstream_request_ = std::make_unique(*this, *conn_pool, metadata, + callbacks_->downstreamSerializationType(), + callbacks_->downstreamProtocolType()); + return upstream_request_->start(); +} + +void Router::onUpstreamData(Buffer::Instance& data, bool end_stream) { + ASSERT(!upstream_request_->response_complete_); + + ENVOY_STREAM_LOG(trace, "dubbo router: reading response: {} bytes", *callbacks_, data.length()); + + // Handle normal response. + if (!upstream_request_->response_started_) { + callbacks_->startUpstreamResponse(*upstream_request_->deserializer_.get(), + *upstream_request_->protocol_.get()); + upstream_request_->response_started_ = true; + } + + DubboFilters::UpstreamResponseStatus status = callbacks_->upstreamData(data); + if (status == DubboFilters::UpstreamResponseStatus::Complete) { + ENVOY_STREAM_LOG(debug, "dubbo router: response complete", *callbacks_); + upstream_request_->onResponseComplete(); + cleanup(); + return; + } else if (status == DubboFilters::UpstreamResponseStatus::Reset) { + ENVOY_STREAM_LOG(debug, "dubbo router: upstream reset", *callbacks_); + // When the upstreamData function returns Reset, + // the current stream is already released from the upper layer, + // so there is no need to call callbacks_->resetStream() to notify + // the upper layer to release the stream. + upstream_request_->resetStream(); + return; + } + + if (end_stream) { + // Response is incomplete, but no more data is coming. + ENVOY_STREAM_LOG(debug, "dubbo router: response underflow", *callbacks_); + upstream_request_->onResetStream( + Tcp::ConnectionPool::PoolFailureReason::RemoteConnectionFailure); + upstream_request_->onResponseComplete(); + cleanup(); + } +} + +void Router::onEvent(Network::ConnectionEvent event) { + if (!upstream_request_ || upstream_request_->response_complete_) { + // Client closed connection after completing response. + ENVOY_LOG(debug, "dubbo upstream request: the upstream request had completed"); + return; + } + + if (upstream_request_->stream_reset_ && event == Network::ConnectionEvent::LocalClose) { + ENVOY_LOG(debug, "dubbo upstream request: the stream reset"); + return; + } + + switch (event) { + case Network::ConnectionEvent::RemoteClose: + upstream_request_->onResetStream( + Tcp::ConnectionPool::PoolFailureReason::RemoteConnectionFailure); + break; + case Network::ConnectionEvent::LocalClose: + upstream_request_->onResetStream( + Tcp::ConnectionPool::PoolFailureReason::LocalConnectionFailure); + break; + default: + // Connected is consumed by the connection pool. + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + +const Network::Connection* Router::downstreamConnection() const { + return callbacks_ != nullptr ? callbacks_->connection() : nullptr; +} + +void Router::cleanup() { + if (upstream_request_) { + upstream_request_.reset(); + } +} + +Router::UpstreamRequest::UpstreamRequest(Router& parent, Tcp::ConnectionPool::Instance& pool, + MessageMetadataSharedPtr& metadata, + SerializationType serialization_type, + ProtocolType protocol_type) + : parent_(parent), conn_pool_(pool), metadata_(metadata), + deserializer_( + NamedDeserializerConfigFactory::getFactory(serialization_type).createDeserializer()), + protocol_(NamedProtocolConfigFactory::getFactory(protocol_type).createProtocol()), + request_complete_(false), response_started_(false), response_complete_(false), + stream_reset_(false) {} + +Router::UpstreamRequest::~UpstreamRequest() {} + +Network::FilterStatus Router::UpstreamRequest::start() { + Tcp::ConnectionPool::Cancellable* handle = conn_pool_.newConnection(*this); + if (handle) { + // Pause while we wait for a connection. + conn_pool_handle_ = handle; + return Network::FilterStatus::StopIteration; + } + + return Network::FilterStatus::Continue; +} + +void Router::UpstreamRequest::resetStream() { + stream_reset_ = true; + + if (conn_pool_handle_) { + ASSERT(!conn_data_); + conn_pool_handle_->cancel(Tcp::ConnectionPool::CancelPolicy::Default); + conn_pool_handle_ = nullptr; + ENVOY_LOG(debug, "dubbo upstream request: reset connection pool handler"); + } + + if (conn_data_) { + ASSERT(!conn_pool_handle_); + conn_data_->connection().close(Network::ConnectionCloseType::NoFlush); + conn_data_.reset(); + ENVOY_LOG(debug, "dubbo upstream request: reset connection data"); + } +} + +void Router::UpstreamRequest::encodeData(Buffer::Instance& data) { + ASSERT(conn_data_); + ASSERT(!conn_pool_handle_); + + ENVOY_STREAM_LOG(trace, "proxying {} bytes", *parent_.callbacks_, data.length()); + conn_data_->connection().write(data, false); +} + +void Router::UpstreamRequest::onPoolFailure(Tcp::ConnectionPool::PoolFailureReason reason, + Upstream::HostDescriptionConstSharedPtr host) { + conn_pool_handle_ = nullptr; + + // Mimic an upstream reset. + onUpstreamHostSelected(host); + onResetStream(reason); + + parent_.upstream_request_buffer_.drain(parent_.upstream_request_buffer_.length()); + + // If it is a connection error, it means that the connection pool returned + // the error asynchronously and the upper layer needs to be notified to continue decoding. + // If it is a non-connection error, it is returned synchronously from the connection pool + // and is still in the callback at the current Filter, nothing to do. + if (reason == Tcp::ConnectionPool::PoolFailureReason::Timeout || + reason == Tcp::ConnectionPool::PoolFailureReason::LocalConnectionFailure || + reason == Tcp::ConnectionPool::PoolFailureReason::RemoteConnectionFailure) { + parent_.callbacks_->continueDecoding(); + } +} + +void Router::UpstreamRequest::onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn_data, + Upstream::HostDescriptionConstSharedPtr host) { + ENVOY_LOG(debug, "dubbo upstream request: tcp connection has ready"); + + // Only invoke continueDecoding if we'd previously stopped the filter chain. + bool continue_decoding = conn_pool_handle_ != nullptr; + + onUpstreamHostSelected(host); + conn_data_ = std::move(conn_data); + conn_data_->addUpstreamCallbacks(parent_); + conn_pool_handle_ = nullptr; + + onRequestStart(continue_decoding); +} + +void Router::UpstreamRequest::onRequestStart(bool continue_decoding) { + ENVOY_LOG(debug, "dubbo upstream request: start sending data to the server {}", + upstream_host_->address()->asString()); + + if (continue_decoding) { + parent_.callbacks_->continueDecoding(); + } + onRequestComplete(); +} + +void Router::UpstreamRequest::onRequestComplete() { request_complete_ = true; } + +void Router::UpstreamRequest::onResponseComplete() { + response_complete_ = true; + conn_data_.reset(); +} + +void Router::UpstreamRequest::onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host) { + ENVOY_LOG(debug, "dubbo upstream request: selected upstream {}", host->address()->asString()); + upstream_host_ = host; +} + +void Router::UpstreamRequest::onResetStream(Tcp::ConnectionPool::PoolFailureReason reason) { + if (metadata_->message_type() == MessageType::Oneway) { + // For oneway requests, we should not attempt a response. Reset the downstream to signal + // an error. + ENVOY_LOG(debug, "dubbo upstream request: the request is oneway, reset downstream stream"); + parent_.callbacks_->resetStream(); + return; + } + + // When the filter's callback does not end, the sendLocalReply function call + // triggers the release of the current stream at the end of the filter's callback. + switch (reason) { + case Tcp::ConnectionPool::PoolFailureReason::Overflow: + parent_.callbacks_->sendLocalReply( + AppException(ResponseStatus::ServerError, + fmt::format("dubbo upstream request: too many connections")), + false); + break; + case Tcp::ConnectionPool::PoolFailureReason::LocalConnectionFailure: + // Should only happen if we closed the connection, due to an error condition, in which case + // we've already handled any possible downstream response. + parent_.callbacks_->sendLocalReply( + AppException(ResponseStatus::ServerError, + fmt::format("dubbo upstream request: local connection failure '{}'", + upstream_host_->address()->asString())), + false); + break; + case Tcp::ConnectionPool::PoolFailureReason::RemoteConnectionFailure: + parent_.callbacks_->sendLocalReply( + AppException(ResponseStatus::ServerError, + fmt::format("dubbo upstream request: remote connection failure '{}'", + upstream_host_->address()->asString())), + false); + break; + case Tcp::ConnectionPool::PoolFailureReason::Timeout: + parent_.callbacks_->sendLocalReply( + AppException(ResponseStatus::ServerError, + fmt::format("dubbo upstream request: connection failure '{}' due to timeout", + upstream_host_->address()->asString())), + false); + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + + if (parent_.filter_complete_ && !response_complete_) { + // When the filter's callback has ended and the reply message has not been processed, + // call resetStream to release the current stream. + // the resetStream eventually triggers the onDestroy function call. + parent_.callbacks_->resetStream(); + } +} + +} // namespace Router +} // namespace DubboProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/dubbo_proxy/router/router_impl.h b/source/extensions/filters/network/dubbo_proxy/router/router_impl.h new file mode 100644 index 0000000000000..63bcfa0e4ae4c --- /dev/null +++ b/source/extensions/filters/network/dubbo_proxy/router/router_impl.h @@ -0,0 +1,106 @@ +#pragma once + +#include +#include + +#include "envoy/buffer/buffer.h" +#include "envoy/tcp/conn_pool.h" + +#include "common/common/logger.h" +#include "common/upstream/load_balancer_impl.h" + +#include "extensions/filters/network/dubbo_proxy/filters/filter.h" +#include "extensions/filters/network/dubbo_proxy/router/router.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace DubboProxy { +namespace Router { + +class Router : public Tcp::ConnectionPool::UpstreamCallbacks, + public Upstream::LoadBalancerContextBase, + public DubboFilters::DecoderFilter, + Logger::Loggable { +public: + Router(Upstream::ClusterManager& cluster_manager) : cluster_manager_(cluster_manager) {} + ~Router() {} + + // DubboFilters::DecoderFilter + void onDestroy() override; + void setDecoderFilterCallbacks(DubboFilters::DecoderFilterCallbacks& callbacks) override; + Network::FilterStatus transportBegin() override; + Network::FilterStatus transportEnd() override; + Network::FilterStatus messageBegin(MessageType type, int64_t message_id, + SerializationType serialization_type) override; + Network::FilterStatus messageEnd(MessageMetadataSharedPtr metadata) override; + + // Upstream::LoadBalancerContextBase + const Envoy::Router::MetadataMatchCriteria* metadataMatchCriteria() override { return nullptr; } + const Network::Connection* downstreamConnection() const override; + + // Tcp::ConnectionPool::UpstreamCallbacks + void onUpstreamData(Buffer::Instance& data, bool end_stream) override; + void onEvent(Network::ConnectionEvent event) override; + void onAboveWriteBufferHighWatermark() override {} + void onBelowWriteBufferLowWatermark() override {} + +private: + struct UpstreamRequest : public Tcp::ConnectionPool::Callbacks { + UpstreamRequest(Router& parent, Tcp::ConnectionPool::Instance& pool, + MessageMetadataSharedPtr& metadata, SerializationType serialization_type, + ProtocolType protocol_type); + ~UpstreamRequest(); + + Network::FilterStatus start(); + void resetStream(); + void encodeData(Buffer::Instance& data); + + // Tcp::ConnectionPool::Callbacks + void onPoolFailure(Tcp::ConnectionPool::PoolFailureReason reason, + Upstream::HostDescriptionConstSharedPtr host) override; + void onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn, + Upstream::HostDescriptionConstSharedPtr host) override; + + void onRequestStart(bool continue_decoding); + void onRequestComplete(); + void onResponseComplete(); + void onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host); + void onResetStream(Tcp::ConnectionPool::PoolFailureReason reason); + + Router& parent_; + Tcp::ConnectionPool::Instance& conn_pool_; + MessageMetadataSharedPtr metadata_; + + Tcp::ConnectionPool::Cancellable* conn_pool_handle_{}; + Tcp::ConnectionPool::ConnectionDataPtr conn_data_; + Upstream::HostDescriptionConstSharedPtr upstream_host_; + DeserializerPtr deserializer_; + ProtocolPtr protocol_; + + bool request_complete_ : 1; + bool response_started_ : 1; + bool response_complete_ : 1; + bool stream_reset_ : 1; + }; + + void cleanup(); + + Upstream::ClusterManager& cluster_manager_; + + DubboFilters::DecoderFilterCallbacks* callbacks_{}; + RouteConstSharedPtr route_{}; + const RouteEntry* route_entry_{}; + Upstream::ClusterInfoConstSharedPtr cluster_; + + std::unique_ptr upstream_request_; + Envoy::Buffer::OwnedImpl upstream_request_buffer_; + + bool filter_complete_{false}; +}; + +} // namespace Router +} // namespace DubboProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/dubbo_proxy/stats.h b/source/extensions/filters/network/dubbo_proxy/stats.h index b488401854d73..c86225b4ac97e 100644 --- a/source/extensions/filters/network/dubbo_proxy/stats.h +++ b/source/extensions/filters/network/dubbo_proxy/stats.h @@ -19,19 +19,22 @@ namespace DubboProxy { COUNTER(request_twoway) \ COUNTER(request_oneway) \ COUNTER(request_event) \ - COUNTER(request_invalid_type) \ COUNTER(request_decoding_error) \ + COUNTER(request_decoding_success) \ GAUGE(request_active) \ HISTOGRAM(request_time_ms) \ COUNTER(response) \ COUNTER(response_success) \ COUNTER(response_error) \ - COUNTER(response_exception) \ + COUNTER(response_error_caused_connection_close) \ + COUNTER(response_business_exception) \ COUNTER(response_decoding_error) \ + COUNTER(response_decoding_success) \ + COUNTER(local_response_success) \ + COUNTER(local_response_error) \ + COUNTER(local_response_business_exception) \ COUNTER(cx_destroy_local_with_active_rq) \ COUNTER(cx_destroy_remote_with_active_rq) \ - COUNTER(downstream_flow_control_paused_reading_total) \ - COUNTER(downstream_flow_control_resumed_reading_total) \ // clang-format on /** diff --git a/source/extensions/filters/network/ext_authz/config.cc b/source/extensions/filters/network/ext_authz/config.cc index 3321a601ce1ee..a4e383538813e 100644 --- a/source/extensions/filters/network/ext_authz/config.cc +++ b/source/extensions/filters/network/ext_authz/config.cc @@ -31,7 +31,7 @@ Network::FilterFactoryCb ExtAuthzConfigFactory::createFilterFactoryFromProtoType grpc_service, context.scope(), true); auto client = std::make_unique( - async_client_factory->create(), std::chrono::milliseconds(timeout_ms)); + async_client_factory->create(), std::chrono::milliseconds(timeout_ms), false); filter_manager.addReadFilter(Network::ReadFilterSharedPtr{ std::make_shared(ext_authz_config, std::move(client))}); }; diff --git a/source/extensions/filters/network/http_connection_manager/config.cc b/source/extensions/filters/network/http_connection_manager/config.cc index dcc0a58531f92..92cce54bd3092 100644 --- a/source/extensions/filters/network/http_connection_manager/config.cc +++ b/source/extensions/filters/network/http_connection_manager/config.cc @@ -99,15 +99,14 @@ HttpConnectionManagerFilterConfigFactory::createFilterFactoryFromProtoTyped( filter_manager.addReadFilter(Network::ReadFilterSharedPtr{new Http::ConnectionManagerImpl( *filter_config, context.drainDecision(), context.random(), context.httpContext(), context.runtime(), context.localInfo(), context.clusterManager(), - &context.overloadManager(), context.dispatcher().timeSystem())}); + &context.overloadManager(), context.dispatcher().timeSource())}); }; } Network::FilterFactoryCb HttpConnectionManagerFilterConfigFactory::createFilterFactory( const Json::Object& json_config, Server::Configuration::FactoryContext& context) { envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager proto_config; - Config::FilterJson::translateHttpConnectionManager(json_config, proto_config, - context.scope().statsOptions()); + Config::FilterJson::translateHttpConnectionManager(json_config, proto_config); return createFilterFactoryFromProtoTyped(proto_config, context); } @@ -127,9 +126,7 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( config, Server::Configuration::FactoryContext& context, Http::DateProvider& date_provider, Router::RouteConfigProviderManager& route_config_provider_manager) - : context_(context), reverse_encode_order_(PROTOBUF_GET_WRAPPED_OR_DEFAULT( - config, bugfix_reverse_encode_order, true)), - stats_prefix_(fmt::format("http.{}.", config.stat_prefix())), + : context_(context), stats_prefix_(fmt::format("http.{}.", config.stat_prefix())), stats_(Http::ConnectionManagerImpl::generateStats(stats_prefix_, context_.scope())), tracing_stats_( Http::ConnectionManagerImpl::generateTracingStats(stats_prefix_, context_.scope())), @@ -152,7 +149,19 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( listener_stats_(Http::ConnectionManagerImpl::generateListenerStats(stats_prefix_, context_.listenerScope())), proxy_100_continue_(config.proxy_100_continue()), - delayed_close_timeout_(PROTOBUF_GET_MS_OR_DEFAULT(config, delayed_close_timeout, 1000)) { + delayed_close_timeout_(PROTOBUF_GET_MS_OR_DEFAULT(config, delayed_close_timeout, 1000)), + normalize_path_(PROTOBUF_GET_WRAPPED_OR_DEFAULT( + config, normalize_path, + // TODO(htuch): we should have a + // boolean variant of featureEnabled() + // here. + context.runtime().snapshot().featureEnabled("http_connection_manager.normalize_path", +#ifdef ENVOY_NORMALIZE_PATH_BY_DEFAULT + 100 +#else + 0 +#endif + ))) { route_config_provider_ = Router::RouteConfigProviderUtil::create(config, context_, stats_prefix_, route_config_provider_manager_); @@ -229,9 +238,10 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( uint64_t overall_sampling{ PROTOBUF_PERCENT_TO_ROUNDED_INTEGER_OR_DEFAULT(tracing_config, overall_sampling, 100, 100)}; - tracing_config_ = std::make_unique( - Http::TracingConnectionManagerConfig{tracing_operation_name, request_headers_for_tags, - client_sampling, random_sampling, overall_sampling}); + tracing_config_ = + std::make_unique(Http::TracingConnectionManagerConfig{ + tracing_operation_name, request_headers_for_tags, client_sampling, random_sampling, + overall_sampling, tracing_config.verbose()}); } for (const auto& access_log : config.access_log()) { @@ -273,7 +283,7 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( throw EnvoyException( fmt::format("Error: multiple upgrade configs with the same name: '{}'", name)); } - if (upgrade_config.filters().size() > 0) { + if (!upgrade_config.filters().empty()) { std::unique_ptr factories = std::make_unique(); for (int32_t i = 0; i < upgrade_config.filters().size(); i++) { processFilter(upgrade_config.filters(i), i, name, *factories); @@ -291,7 +301,7 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( void HttpConnectionManagerConfig::processFilter( const envoy::config::filter::network::http_connection_manager::v2::HttpFilter& proto_config, int i, absl::string_view prefix, std::list& filter_factories) { - const ProtobufTypes::String& string_name = proto_config.name(); + const std::string& string_name = proto_config.name(); ENVOY_LOG(debug, " {} filter #{}", prefix, i); ENVOY_LOG(debug, " name: {}", string_name); @@ -322,11 +332,11 @@ HttpConnectionManagerConfig::createCodec(Network::Connection& connection, Http::ServerConnectionCallbacks& callbacks) { switch (codec_type_) { case CodecType::HTTP1: - return Http::ServerConnectionPtr{ - new Http::Http1::ServerConnectionImpl(connection, callbacks, http1_settings_)}; + return std::make_unique( + connection, callbacks, http1_settings_, maxRequestHeadersKb()); case CodecType::HTTP2: - return Http::ServerConnectionPtr{new Http::Http2::ServerConnectionImpl( - connection, callbacks, context_.scope(), http2_settings_, maxRequestHeadersKb())}; + return std::make_unique( + connection, callbacks, context_.scope(), http2_settings_, maxRequestHeadersKb()); case CodecType::AUTO: return Http::ConnectionManagerUtility::autoCreateCodec(connection, data, callbacks, context_.scope(), http1_settings_, diff --git a/source/extensions/filters/network/http_connection_manager/config.h b/source/extensions/filters/network/http_connection_manager/config.h index 58a102f5005f9..c1e7332b653c1 100644 --- a/source/extensions/filters/network/http_connection_manager/config.h +++ b/source/extensions/filters/network/http_connection_manager/config.h @@ -99,7 +99,6 @@ class HttpConnectionManagerConfig : Logger::Loggable, Http::DateProvider& dateProvider() override { return date_provider_; } std::chrono::milliseconds drainTimeout() override { return drain_timeout_; } FilterChainFactory& filterFactory() override { return *this; } - bool reverseEncodeOrder() override { return reverse_encode_order_; } bool generateRequestId() override { return generate_request_id_; } uint32_t maxRequestHeadersKb() const override { return max_request_headers_kb_; } absl::optional idleTimeout() const override { return idle_timeout_; } @@ -128,6 +127,7 @@ class HttpConnectionManagerConfig : Logger::Loggable, Http::ConnectionManagerListenerStats& listenerStats() override { return listener_stats_; } bool proxy100Continue() const override { return proxy_100_continue_; } const Http::Http1Settings& http1Settings() const override { return http1_settings_; } + bool shouldNormalizePath() const override { return normalize_path_; } std::chrono::milliseconds delayedCloseTimeout() const override { return delayed_close_timeout_; } private: @@ -139,7 +139,6 @@ class HttpConnectionManagerConfig : Logger::Loggable, Server::Configuration::FactoryContext& context_; FilterFactoriesList filter_factories_; std::map upgrade_filter_factories_; - const bool reverse_encode_order_{}; std::list access_logs_; const std::string stats_prefix_; Http::ConnectionManagerStats stats_; @@ -169,6 +168,7 @@ class HttpConnectionManagerConfig : Logger::Loggable, Http::ConnectionManagerListenerStats listener_stats_; const bool proxy_100_continue_; std::chrono::milliseconds delayed_close_timeout_; + const bool normalize_path_; // Default idle timeout is 5 minutes if nothing is specified in the HCM config. static const uint64_t StreamIdleTimeoutMs = 5 * 60 * 1000; diff --git a/source/extensions/filters/network/kafka/BUILD b/source/extensions/filters/network/kafka/BUILD new file mode 100644 index 0000000000000..37bfa7a5daba7 --- /dev/null +++ b/source/extensions/filters/network/kafka/BUILD @@ -0,0 +1,140 @@ +licenses(["notice"]) # Apache 2 + +# Kafka network filter. +# Public docs: docs/root/configuration/network_filters/kafka_filter.rst + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "kafka_request_codec_lib", + srcs = ["request_codec.cc"], + hdrs = [ + "codec.h", + "request_codec.h", + ], + deps = [ + ":kafka_request_parser_lib", + "//source/common/buffer:buffer_lib", + ], +) + +envoy_cc_library( + name = "kafka_request_parser_lib", + srcs = [ + "external/kafka_request_resolver.cc", + "kafka_request_parser.cc", + ], + hdrs = [ + "external/requests.h", + "kafka_request_parser.h", + ], + deps = [ + ":kafka_request_lib", + ":parser_lib", + "//source/common/common:assert_lib", + "//source/common/common:minimal_logger_lib", + ], +) + +envoy_cc_library( + name = "kafka_request_lib", + srcs = [ + ], + hdrs = [ + "kafka_request.h", + ], + deps = [ + ":serialization_lib", + ], +) + +genrule( + name = "kafka_generated_source", + srcs = [ + "@kafka_source//:request_protocol_files", + ], + outs = [ + "external/requests.h", + "external/kafka_request_resolver.cc", + ], + cmd = """ + ./$(location :kafka_code_generator) generate-source \ + $(location external/requests.h) $(location external/kafka_request_resolver.cc) \ + $(SRCS) + """, + tools = [ + ":kafka_code_generator", + ], +) + +py_binary( + name = "kafka_code_generator", + srcs = ["protocol_code_generator/kafka_generator.py"], + data = glob(["protocol_code_generator/*.j2"]), + main = "protocol_code_generator/kafka_generator.py", + deps = ["@com_github_pallets_jinja//:jinja2"], +) + +envoy_cc_library( + name = "parser_lib", + hdrs = ["parser.h"], + deps = [ + "//source/common/common:minimal_logger_lib", + ], +) + +envoy_cc_library( + name = "serialization_lib", + srcs = [ + "serialization.cc", + ], + hdrs = [ + "external/serialization_composite.h", + "serialization.h", + ], + deps = [ + ":kafka_types_lib", + "//include/envoy/buffer:buffer_interface", + "//source/common/common:byte_order_lib", + ], +) + +genrule( + name = "serialization_composite_generated_source", + srcs = [], + outs = [ + "external/serialization_composite.h", + ], + cmd = """ + ./$(location :serialization_composite_generator) generate-source \ + $(location external/serialization_composite.h) + """, + tools = [ + ":serialization_composite_generator", + ], +) + +py_binary( + name = "serialization_composite_generator", + srcs = ["serialization_code_generator/serialization_composite_generator.py"], + data = glob(["serialization_code_generator/*.j2"]), + main = "serialization_code_generator/serialization_composite_generator.py", + deps = ["@com_github_pallets_jinja//:jinja2"], +) + +envoy_cc_library( + name = "kafka_types_lib", + hdrs = [ + "kafka_types.h", + ], + external_deps = ["abseil_optional"], + deps = [ + "//source/common/common:macros", + ], +) diff --git a/source/extensions/filters/network/kafka/codec.h b/source/extensions/filters/network/kafka/codec.h new file mode 100644 index 0000000000000..a58c284a052a1 --- /dev/null +++ b/source/extensions/filters/network/kafka/codec.h @@ -0,0 +1,43 @@ +#pragma once + +#include "envoy/buffer/buffer.h" +#include "envoy/common/pure.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { + +/** + * Kafka message decoder. + */ +class MessageDecoder { +public: + virtual ~MessageDecoder() = default; + + /** + * Processes given buffer attempting to decode messages contained within. + * @param data buffer instance. + */ + virtual void onData(Buffer::Instance& data) PURE; +}; + +/** + * Kafka message encoder. + * @param MessageType encoded message type (request or response). + */ +template class MessageEncoder { +public: + virtual ~MessageEncoder() = default; + + /** + * Encodes given message. + * @param message message to be encoded. + */ + virtual void encode(const MessageType& message) PURE; +}; + +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/kafka/kafka_request.h b/source/extensions/filters/network/kafka/kafka_request.h new file mode 100644 index 0000000000000..258012cfb1ec5 --- /dev/null +++ b/source/extensions/filters/network/kafka/kafka_request.h @@ -0,0 +1,134 @@ +#pragma once + +#include "envoy/common/exception.h" + +#include "extensions/filters/network/kafka/external/serialization_composite.h" +#include "extensions/filters/network/kafka/serialization.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { + +/** + * Represents fields that are present in every Kafka request message. + * @see http://kafka.apache.org/protocol.html#protocol_messages + */ +struct RequestHeader { + int16_t api_key_; + int16_t api_version_; + int32_t correlation_id_; + NullableString client_id_; + + bool operator==(const RequestHeader& rhs) const { + return api_key_ == rhs.api_key_ && api_version_ == rhs.api_version_ && + correlation_id_ == rhs.correlation_id_ && client_id_ == rhs.client_id_; + }; +}; + +/** + * Carries information that could be extracted during the failed parse. + */ +class RequestParseFailure { +public: + RequestParseFailure(const RequestHeader& request_header) : request_header_{request_header} {}; + + /** + * Request's header. + */ + const RequestHeader request_header_; +}; + +typedef std::shared_ptr RequestParseFailureSharedPtr; + +/** + * Abstract Kafka request. + * Contains data present in every request (the header with request key, version, etc.). + * @see http://kafka.apache.org/protocol.html#protocol_messages + */ +class AbstractRequest { +public: + virtual ~AbstractRequest() = default; + + /** + * Constructs a request with given header data. + * @param request_header request's header. + */ + AbstractRequest(const RequestHeader& request_header) : request_header_{request_header} {}; + + /** + * Computes the size of this request, if it were to be serialized. + * @return serialized size of request + */ + virtual uint32_t computeSize() const PURE; + + /** + * Encode the contents of this request into a given buffer. + * @param dst buffer instance to keep serialized message + */ + virtual uint32_t encode(Buffer::Instance& dst) const PURE; + + /** + * Request's header. + */ + const RequestHeader request_header_; +}; + +typedef std::shared_ptr AbstractRequestSharedPtr; + +/** + * Concrete request that carries data particular to given request type. + * @param Data concrete request data type. + */ +template class Request : public AbstractRequest { +public: + /** + * Request header fields need to be initialized by user in case of newly created requests. + */ + Request(const RequestHeader& request_header, const Data& data) + : AbstractRequest{request_header}, data_{data} {}; + + /** + * Compute the size of request, which includes both the request header and its real data. + */ + uint32_t computeSize() const override { + const EncodingContext context{request_header_.api_version_}; + uint32_t result{0}; + // Compute size of header. + result += context.computeSize(request_header_.api_key_); + result += context.computeSize(request_header_.api_version_); + result += context.computeSize(request_header_.correlation_id_); + result += context.computeSize(request_header_.client_id_); + // Compute size of request data. + result += context.computeSize(data_); + return result; + } + + /** + * Encodes given request into a buffer, with any extra configuration carried by the context. + */ + uint32_t encode(Buffer::Instance& dst) const override { + EncodingContext context{request_header_.api_version_}; + uint32_t written{0}; + // Encode request header. + written += context.encode(request_header_.api_key_, dst); + written += context.encode(request_header_.api_version_, dst); + written += context.encode(request_header_.correlation_id_, dst); + written += context.encode(request_header_.client_id_, dst); + // Encode request-specific data. + written += context.encode(data_, dst); + return written; + } + + bool operator==(const Request& rhs) const { + return request_header_ == rhs.request_header_ && data_ == rhs.data_; + }; + +private: + const Data data_; +}; + +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/kafka/kafka_request_parser.cc b/source/extensions/filters/network/kafka/kafka_request_parser.cc new file mode 100644 index 0000000000000..b98f5b9b696d9 --- /dev/null +++ b/source/extensions/filters/network/kafka/kafka_request_parser.cc @@ -0,0 +1,62 @@ +#include "extensions/filters/network/kafka/kafka_request_parser.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { + +const RequestParserResolver& RequestParserResolver::getDefaultInstance() { + CONSTRUCT_ON_FIRST_USE(RequestParserResolver); +} + +RequestParseResponse RequestStartParser::parse(absl::string_view& data) { + request_length_.feed(data); + if (request_length_.ready()) { + context_->remaining_request_size_ = request_length_.get(); + return RequestParseResponse::nextParser( + std::make_shared(parser_resolver_, context_)); + } else { + return RequestParseResponse::stillWaiting(); + } +} + +RequestParseResponse RequestHeaderParser::parse(absl::string_view& data) { + const absl::string_view orig_data = data; + try { + context_->remaining_request_size_ -= deserializer_->feed(data); + } catch (const EnvoyException& e) { + // We were unable to compute the request header, but we still need to consume rest of request + // (some of the data might have been consumed during this attempt). + const int32_t consumed = static_cast(orig_data.size() - data.size()); + context_->remaining_request_size_ -= consumed; + context_->request_header_ = {-1, -1, -1, absl::nullopt}; + return RequestParseResponse::nextParser(std::make_shared(context_)); + } + + if (deserializer_->ready()) { + RequestHeader request_header = deserializer_->get(); + context_->request_header_ = request_header; + RequestParserSharedPtr next_parser = parser_resolver_.createParser( + request_header.api_key_, request_header.api_version_, context_); + return RequestParseResponse::nextParser(next_parser); + } else { + return RequestParseResponse::stillWaiting(); + } +} + +RequestParseResponse SentinelParser::parse(absl::string_view& data) { + const uint32_t min = std::min(context_->remaining_request_size_, data.size()); + data = {data.data() + min, data.size() - min}; + context_->remaining_request_size_ -= min; + if (0 == context_->remaining_request_size_) { + return RequestParseResponse::parseFailure( + std::make_shared(context_->request_header_)); + } else { + return RequestParseResponse::stillWaiting(); + } +} + +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/kafka/kafka_request_parser.h b/source/extensions/filters/network/kafka/kafka_request_parser.h new file mode 100644 index 0000000000000..861d4dc4a3a9d --- /dev/null +++ b/source/extensions/filters/network/kafka/kafka_request_parser.h @@ -0,0 +1,194 @@ +#pragma once + +#include + +#include "envoy/common/exception.h" + +#include "common/common/assert.h" + +#include "extensions/filters/network/kafka/kafka_request.h" +#include "extensions/filters/network/kafka/parser.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { + +using RequestParseResponse = ParseResponse; +using RequestParser = Parser; +using RequestParserSharedPtr = std::shared_ptr; + +/** + * Context that is shared between parsers that are handling the same single message. + */ +struct RequestContext { + int32_t remaining_request_size_{0}; + RequestHeader request_header_{}; +}; + +typedef std::shared_ptr RequestContextSharedPtr; + +/** + * Request decoder configuration object. + * Resolves the parser that will be responsible for consuming the request-specific data. + * In other words: provides (api_key, api_version) -> Parser function. + */ +class RequestParserResolver { +public: + virtual ~RequestParserResolver() = default; + + /** + * Creates a parser that is going to process data specific for given api_key & api_version. + * @param api_key request type. + * @param api_version request version. + * @param context context to be used by parser. + * @return parser that is capable of processing data for given request type & version. + */ + virtual RequestParserSharedPtr createParser(int16_t api_key, int16_t api_version, + RequestContextSharedPtr context) const; + + /** + * Return default resolver, that uses request's api key and version to provide a matching parser. + */ + static const RequestParserResolver& getDefaultInstance(); +}; + +/** + * Request parser responsible for consuming request length and setting up context with this data. + * @see http://kafka.apache.org/protocol.html#protocol_common + */ +class RequestStartParser : public RequestParser { +public: + RequestStartParser(const RequestParserResolver& parser_resolver) + : parser_resolver_{parser_resolver}, context_{std::make_shared()} {}; + + /** + * Consumes 4 bytes (INT32) as request length and updates the context with that value. + * @return RequestHeaderParser instance to process request header. + */ + RequestParseResponse parse(absl::string_view& data) override; + + const RequestContextSharedPtr contextForTest() const { return context_; } + +private: + const RequestParserResolver& parser_resolver_; + const RequestContextSharedPtr context_; + Int32Deserializer request_length_; +}; + +/** + * Deserializer that extracts request header (4 fields). + * Can throw, as one of the fields (client-id) can throw (nullable string with invalid length). + * @see http://kafka.apache.org/protocol.html#protocol_messages + */ +class RequestHeaderDeserializer + : public CompositeDeserializerWith4Delegates {}; + +typedef std::unique_ptr RequestHeaderDeserializerPtr; + +/** + * Parser responsible for extracting the request header and putting it into context. + * On a successful parse the resolved data (api_key & api_version) is used to determine the next + * parser. + * @see http://kafka.apache.org/protocol.html#protocol_messages + */ +class RequestHeaderParser : public RequestParser { +public: + // Default constructor. + RequestHeaderParser(const RequestParserResolver& parser_resolver, RequestContextSharedPtr context) + : RequestHeaderParser{parser_resolver, context, + std::make_unique()} {}; + + // Constructor visible for testing (allows for initial parser injection). + RequestHeaderParser(const RequestParserResolver& parser_resolver, RequestContextSharedPtr context, + RequestHeaderDeserializerPtr deserializer) + : parser_resolver_{parser_resolver}, context_{context}, deserializer_{ + std::move(deserializer)} {}; + + /** + * Uses data provided to compute request header. + * @return Parser instance responsible for processing rest of the message + */ + RequestParseResponse parse(absl::string_view& data) override; + + const RequestContextSharedPtr contextForTest() const { return context_; } + +private: + const RequestParserResolver& parser_resolver_; + const RequestContextSharedPtr context_; + RequestHeaderDeserializerPtr deserializer_; +}; + +/** + * Sentinel parser that is responsible for consuming message bytes for messages that had unsupported + * api_key & api_version. It does not attempt to capture any data, just throws it away until end of + * message. + */ +class SentinelParser : public RequestParser { +public: + SentinelParser(RequestContextSharedPtr context) : context_{context} {}; + + /** + * Returns failed parse data. Ignores (jumps over) the data provided. + */ + RequestParseResponse parse(absl::string_view& data) override; + + const RequestContextSharedPtr contextForTest() const { return context_; } + +private: + const RequestContextSharedPtr context_; +}; + +/** + * Request parser uses a single deserializer to construct a request object. + * This parser is responsible for consuming request-specific data (e.g. topic names) and always + * returns a parsed message. + * @param RequestType request class. + * @param DeserializerType deserializer type corresponding to request class (should be subclass of + * Deserializer). + */ +template +class RequestDataParser : public RequestParser { +public: + /** + * Create a parser with given context. + * @param context parse context containing request header. + */ + RequestDataParser(RequestContextSharedPtr context) : context_{context} {}; + + /** + * Consume enough data to fill in deserializer and receive the parsed request. + * Fill in request's header with data stored in context. + */ + RequestParseResponse parse(absl::string_view& data) override { + context_->remaining_request_size_ -= deserializer.feed(data); + + if (deserializer.ready()) { + if (0 == context_->remaining_request_size_) { + // After a successful parse, there should be nothing left - we have consumed all the bytes. + AbstractRequestSharedPtr msg = + std::make_shared>(context_->request_header_, deserializer.get()); + return RequestParseResponse::parsedMessage(msg); + } else { + // The message makes no sense, the deserializer that matches the schema consumed all + // necessary data, but there are still bytes in this message. + return RequestParseResponse::nextParser(std::make_shared(context_)); + } + } else { + return RequestParseResponse::stillWaiting(); + } + } + + const RequestContextSharedPtr contextForTest() const { return context_; } + +protected: + RequestContextSharedPtr context_; + DeserializerType deserializer; // underlying request-specific deserializer +}; + +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/kafka/kafka_types.h b/source/extensions/filters/network/kafka/kafka_types.h new file mode 100644 index 0000000000000..71d1ce920a82d --- /dev/null +++ b/source/extensions/filters/network/kafka/kafka_types.h @@ -0,0 +1,37 @@ +#pragma once + +#include +#include +#include + +#include "absl/types/optional.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { + +/** + * Nullable string used by Kafka. + */ +typedef absl::optional NullableString; + +/** + * Bytes array used by Kafka. + */ +typedef std::vector Bytes; + +/** + * Nullable bytes array used by Kafka. + */ +typedef absl::optional NullableBytes; + +/** + * Kafka array of elements of type T. + */ +template using NullableArray = absl::optional>; + +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/kafka/parser.h b/source/extensions/filters/network/kafka/parser.h new file mode 100644 index 0000000000000..031aaaef1dbd3 --- /dev/null +++ b/source/extensions/filters/network/kafka/parser.h @@ -0,0 +1,94 @@ +#pragma once + +#include + +#include "common/common/logger.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { + +template class ParseResponse; + +/** + * Parser is responsible for consuming data relevant to some part of a message, and then returning + * the decision how the parsing should continue. + */ +template +class Parser : public Logger::Loggable { +public: + virtual ~Parser() = default; + + /** + * Submit data to be processed by parser, will consume as much data as it is necessary to reach + * the conclusion what should be the next parse step. + * @param data bytes to be processed, will be updated by parser if any have been consumed. + * @return parse status - decision what should be done with current parser (keep/replace). + */ + virtual ParseResponse parse(absl::string_view& data) PURE; +}; + +template +using ParserSharedPtr = std::shared_ptr>; + +/** + * Three-state holder representing one of: + * - parser still needs data (`stillWaiting`), + * - parser is finished, and following parser should be used to process the rest of data + * (`nextParser`), + * - parser is finished, and parse result is attached (`parsedMessage` or `parseFailure`). + */ +template class ParseResponse { +public: + /** + * Constructs a response that states that parser still needs data and should not be replaced. + */ + static ParseResponse stillWaiting() { return {nullptr, nullptr, nullptr}; } + + /** + * Constructs a response that states that parser is finished and should be replaced by given + * parser. + */ + static ParseResponse nextParser(ParserSharedPtr next_parser) { + return {next_parser, nullptr, nullptr}; + }; + + /** + * Constructs a response that states that parser is finished, the message is ready, and parsing + * can start anew for next message. + */ + static ParseResponse parsedMessage(MessageType message) { return {nullptr, message, nullptr}; }; + + /** + * Constructs a response that states that parser is finished, the message could not be parsed + * properly, and parsing can start anew for next message. + */ + static ParseResponse parseFailure(FailureDataType failure_data) { + return {nullptr, nullptr, failure_data}; + }; + + /** + * If response contains a next parser or a parse result. + */ + bool hasData() const { + return (next_parser_ != nullptr) || (message_ != nullptr) || (failure_data_ != nullptr); + } + +private: + ParseResponse(ParserSharedPtr parser, MessageType message, + FailureDataType failure_data) + : next_parser_{parser}, message_{message}, failure_data_{failure_data} {}; + +public: + ParserSharedPtr next_parser_; + MessageType message_; + FailureDataType failure_data_; +}; + +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/kafka/protocol_code_generator/complex_type_template.j2 b/source/extensions/filters/network/kafka/protocol_code_generator/complex_type_template.j2 new file mode 100644 index 0000000000000..5a8cc2c534b2b --- /dev/null +++ b/source/extensions/filters/network/kafka/protocol_code_generator/complex_type_template.j2 @@ -0,0 +1,81 @@ +{# + Template for structure representing a composite entity in Kafka protocol (e.g. FetchRequest). + Rendered templates for each structure in Kafka protocol will be put into 'requests.h' file. + + Each structure is capable of holding all versions of given entity (what means its fields are + actually a superset of union of all versions' fields). Each version has a dedicated deserializer + (named $requestV$versionDeserializer), which calls the matching constructor. + + To serialize, it is necessary to pass the encoding context (that contains the version that's + being serialized). Depending on the version, the fields will be written to the buffer. +#} +struct {{ complex_type.name }} { + + {# + Constructors invoked by deserializers. + Each constructor has a signature that matches the fields in at least one version (as sometimes + there are different Kafka versions that are actually composed of precisely the same fields). + #} + {% for field in complex_type.fields %} + const {{ field.field_declaration() }}_;{% endfor %} + {% for constructor in complex_type.compute_constructors() %} + // constructor used in versions: {{ constructor['versions'] }} + {{ constructor['full_declaration'] }}{% endfor %} + + {# For every field that's used in version, just compute its size using an encoder. #} + {% if complex_type.fields|length > 0 %} + uint32_t computeSize(const EncodingContext& encoder) const { + const int16_t api_version = encoder.apiVersion(); + uint32_t written{0};{% for field in complex_type.fields %} + if (api_version >= {{ field.version_usage[0] }} + && api_version < {{ field.version_usage[-1] + 1 }}) { + written += encoder.computeSize({{ field.name }}_); + }{% endfor %} + return written; + } + {% else %} + uint32_t computeSize(const EncodingContext&) const { + return 0; + } + {% endif %} + + {# For every field that's used in version, just serialize it. #} + {% if complex_type.fields|length > 0 %} + uint32_t encode(Buffer::Instance& dst, EncodingContext& encoder) const { + const int16_t api_version = encoder.apiVersion(); + uint32_t written{0};{% for field in complex_type.fields %} + if (api_version >= {{ field.version_usage[0] }} + && api_version < {{ field.version_usage[-1] + 1 }}) { + written += encoder.encode({{ field.name }}_, dst); + }{% endfor %} + return written; + } + {% else %} + uint32_t encode(Buffer::Instance&, EncodingContext&) const { + return 0; + } + {% endif %} + + {% if complex_type.fields|length > 0 %} + bool operator==(const {{ complex_type.name }}& rhs) const { + {% else %} + bool operator==(const {{ complex_type.name }}&) const { + {% endif %} + return true{% for field in complex_type.fields %} + && {{ field.name }}_ == rhs.{{ field.name }}_{% endfor %}; + }; + +}; + +{# + Each structure version has a deserializer that matches the structure's field list. +#} +{% for field_list in complex_type.compute_field_lists() %} +class {{ complex_type.name }}V{{ field_list.version }}Deserializer: + public CompositeDeserializerWith{{ field_list.field_count() }}Delegates< + {{ complex_type.name }} + {% for field in field_list.used_fields() %}, + {{ field.deserializer_name_in_version(field_list.version) }} + {% endfor %}>{}; +{% endfor %} + diff --git a/source/extensions/filters/network/kafka/protocol_code_generator/kafka_generator.py b/source/extensions/filters/network/kafka/protocol_code_generator/kafka_generator.py new file mode 100755 index 0000000000000..663c8a58fbf84 --- /dev/null +++ b/source/extensions/filters/network/kafka/protocol_code_generator/kafka_generator.py @@ -0,0 +1,532 @@ +#!/usr/bin/python + + +def main(): + """ + Kafka header generator script + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + Generates C++ headers from Kafka protocol specification. + Can generate both main source code, as well as test code. + + Usage: + kafka_generator.py COMMAND OUTPUT FILES INPUT_FILES + where: + COMMAND : 'generate-source', to generate source files, + 'generate-test', to generate test files. + OUTPUT_FILES : if generate-source: location of 'requests.h' and 'kafka_request_resolver.cc', + if generate-test: location of 'requests_test.cc', 'request_codec_request_test.cc'. + INPUT_FILES: Kafka protocol json files to be processed. + + Kafka spec files are provided in Kafka clients jar file. + When generating source code, it creates: + - requests.h - definition of all the structures/deserializers/parsers related to Kafka requests, + - kafka_request_resolver.cc - resolver that binds api_key & api_version to parsers from + requests.h. + When generating test code, it creates: + - requests_test.cc - serialization/deserialization tests for kafka structures, + - request_codec_request_test.cc - test for all request operations using the codec API. + + Templates used are: + - to create 'requests.h': requests_h.j2, complex_type_template.j2, request_parser.j2, + - to create 'kafka_request_resolver.cc': kafka_request_resolver_cc.j2, + - to create 'requests_test.cc': requests_test_cc.j2, + - to create 'request_codec_request_test.cc' - request_codec_request_test_cc.j2. + """ + + import sys + import os + + command = sys.argv[1] + if 'generate-source' == command: + requests_h_file = os.path.abspath(sys.argv[2]) + kafka_request_resolver_cc_file = os.path.abspath(sys.argv[3]) + input_files = sys.argv[4:] + elif 'generate-test' == command: + requests_test_cc_file = os.path.abspath(sys.argv[2]) + request_codec_request_test_cc_file = os.path.abspath(sys.argv[3]) + input_files = sys.argv[4:] + else: + raise ValueError('invalid command: ' + command) + + import re + import json + + requests = [] + + # For each request specification file, remove comments, and parse the remains. + for input_file in input_files: + with open(input_file, 'r') as fd: + raw_contents = fd.read() + without_comments = re.sub(r'//.*\n', '', raw_contents) + request_spec = json.loads(without_comments) + request = parse_request(request_spec) + requests.append(request) + + # Sort requests by api_key. + requests.sort(key=lambda x: x.get_extra('api_key')) + + # Generate main source code. + if 'generate-source' == command: + complex_type_template = RenderingHelper.get_template('complex_type_template.j2') + request_parsers_template = RenderingHelper.get_template('request_parser.j2') + + requests_h_contents = '' + + for request in requests: + # For each child structure that is used by request, render its corresponding C++ code. + for dependency in request.declaration_chain: + requests_h_contents += complex_type_template.render(complex_type=dependency) + # Each top-level structure (e.g. FetchRequest) is going to have corresponding parsers. + requests_h_contents += request_parsers_template.render(complex_type=request) + + # Full file with headers, namespace declaration etc. + template = RenderingHelper.get_template('requests_h.j2') + contents = template.render(contents=requests_h_contents) + + with open(requests_h_file, 'w') as fd: + fd.write(contents) + + template = RenderingHelper.get_template('kafka_request_resolver_cc.j2') + contents = template.render(request_types=requests) + + with open(kafka_request_resolver_cc_file, 'w') as fd: + fd.write(contents) + + # Generate test code. + if 'generate-test' == command: + template = RenderingHelper.get_template('requests_test_cc.j2') + contents = template.render(request_types=requests) + + with open(requests_test_cc_file, 'w') as fd: + fd.write(contents) + + template = RenderingHelper.get_template('request_codec_request_test_cc.j2') + contents = template.render(request_types=requests) + + with open(request_codec_request_test_cc_file, 'w') as fd: + fd.write(contents) + + +def parse_request(spec): + """ + Parse a given structure into a request. + Request is just a complex type, that has name & version information kept in differently named + fields, compared to sub-structures in a request. + """ + request_type_name = spec['name'] + request_versions = Statics.parse_version_string(spec['validVersions'], 2 << 16 - 1) + return parse_complex_type(request_type_name, spec, request_versions).with_extra( + 'api_key', spec['apiKey']) + + +def parse_complex_type(type_name, field_spec, versions): + """ + Parse given complex type, returning a structure that holds its name, field specification and + allowed versions. + """ + fields = [] + for child_field in field_spec['fields']: + child = parse_field(child_field, versions[-1]) + fields.append(child) + return Complex(type_name, fields, versions) + + +def parse_field(field_spec, highest_possible_version): + """ + Parse given field, returning a structure holding the name, type, and versions when this field is + actually used (nullable or not). Obviously, field cannot be used in version higher than its + type's usage. + """ + version_usage = Statics.parse_version_string(field_spec['versions'], highest_possible_version) + version_usage_as_nullable = Statics.parse_version_string( + field_spec['nullableVersions'], + highest_possible_version) if 'nullableVersions' in field_spec else range(-1) + parsed_type = parse_type(field_spec['type'], field_spec, highest_possible_version) + return FieldSpec(field_spec['name'], parsed_type, version_usage, version_usage_as_nullable) + + +def parse_type(type_name, field_spec, highest_possible_version): + """ + Parse a given type element - returns an array type, primitive (e.g. uint32_t) or complex one. + """ + if (type_name.startswith('[]')): + # In spec files, array types are defined as `[]underlying_type` instead of having its own + # element with type inside. + underlying_type = parse_type(type_name[2:], field_spec, highest_possible_version) + return Array(underlying_type) + else: + if (type_name in Primitive.PRIMITIVE_TYPE_NAMES): + return Primitive(type_name, field_spec.get('default')) + else: + versions = Statics.parse_version_string(field_spec['versions'], highest_possible_version) + return parse_complex_type(type_name, field_spec, versions) + + +class Statics: + + @staticmethod + def parse_version_string(raw_versions, highest_possible_version): + """ + Return integer range that corresponds to version string in spec file. + """ + if raw_versions.endswith('+'): + return range(int(raw_versions[:-1]), highest_possible_version + 1) + else: + if '-' in raw_versions: + tokens = raw_versions.split('-', 1) + return range(int(tokens[0]), int(tokens[1]) + 1) + else: + single_version = int(raw_versions) + return range(single_version, single_version + 1) + + +class FieldList: + """ + List of fields used by given entity (request or child structure) in given request version + (as fields get added or removed across versions). + """ + + def __init__(self, version, fields): + self.version = version + self.fields = fields + + def used_fields(self): + """ + Return list of fields that are actually used in this version of structure. + """ + return filter(lambda x: x.used_in_version(self.version), self.fields) + + def constructor_signature(self): + """ + Return constructor signature. + Multiple versions of the same structure can have identical signatures (due to version bumps in + Kafka). + """ + parameter_spec = map(lambda x: x.parameter_declaration(self.version), self.used_fields()) + return ', '.join(parameter_spec) + + def constructor_init_list(self): + """ + Renders member initialization list in constructor. + Takes care of potential optional conversions (as field could be T in V1, but optional + in V2). + """ + init_list = [] + for field in self.fields: + if field.used_in_version(self.version): + if field.is_nullable(): + if field.is_nullable_in_version(self.version): + # Field is optional, and the parameter is optional in this version. + init_list_item = '%s_{%s}' % (field.name, field.name) + init_list.append(init_list_item) + else: + # Field is optional, and the parameter is T in this version. + init_list_item = '%s_{absl::make_optional(%s)}' % (field.name, field.name) + init_list.append(init_list_item) + else: + # Field is T, so parameter cannot be optional. + init_list_item = '%s_{%s}' % (field.name, field.name) + init_list.append(init_list_item) + else: + # Field is not used in this version, so we need to put in default value. + init_list_item = '%s_{%s}' % (field.name, field.default_value()) + init_list.append(init_list_item) + pass + return ', '.join(init_list) + + def field_count(self): + return len(self.used_fields()) + + def example_value(self): + return ', '.join(map(lambda x: x.example_value_for_test(self.version), self.used_fields())) + + +class FieldSpec: + """ + Represents a field present in a structure (request, or child structure thereof). + Contains name, type, and versions when it is used (nullable or not). + """ + + def __init__(self, name, type, version_usage, version_usage_as_nullable): + import re + separated = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) + self.name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', separated).lower() + self.type = type + self.version_usage = version_usage + self.version_usage_as_nullable = version_usage_as_nullable + + def is_nullable(self): + return len(self.version_usage_as_nullable) > 0 + + def is_nullable_in_version(self, version): + """ + Whether thie field is nullable in given version. + Fields can be non-nullable in earlier versions. + See https://github.com/apache/kafka/tree/2.2.0-rc0/clients/src/main/resources/common/message#nullable-fields + """ + return version in self.version_usage_as_nullable + + def used_in_version(self, version): + return version in self.version_usage + + def field_declaration(self): + if self.is_nullable(): + return 'absl::optional<%s> %s' % (self.type.name, self.name) + else: + return '%s %s' % (self.type.name, self.name) + + def parameter_declaration(self, version): + if self.is_nullable_in_version(version): + return 'absl::optional<%s> %s' % (self.type.name, self.name) + else: + return '%s %s' % (self.type.name, self.name) + + def default_value(self): + if self.is_nullable(): + return '{%s}' % self.type.default_value() + else: + return str(self.type.default_value()) + + def example_value_for_test(self, version): + if self.is_nullable(): + return 'absl::make_optional<%s>(%s)' % (self.type.name, + self.type.example_value_for_test(version)) + else: + return str(self.type.example_value_for_test(version)) + + def deserializer_name_in_version(self, version): + if self.is_nullable_in_version(version): + return 'Nullable%s' % self.type.deserializer_name_in_version(version) + else: + return self.type.deserializer_name_in_version(version) + + def is_printable(self): + return self.type.is_printable() + + +class TypeSpecification: + + def deserializer_name_in_version(self, version): + """ + Renders the deserializer name of given type, in request with given version. + """ + raise NotImplementedError() + + def default_value(self): + """ + Returns a default value for given type. + """ + raise NotImplementedError() + + def example_value_for_test(self, version): + raise NotImplementedError() + + def is_printable(self): + raise NotImplementedError() + + +class Array(TypeSpecification): + """ + Represents array complex type. + To use instance of this type, it is necessary to declare structures required by self.underlying + (e.g. to use Array, we need to have `struct Foo {...}`). + """ + + def __init__(self, underlying): + self.underlying = underlying + self.declaration_chain = self.underlying.declaration_chain + + @property + def name(self): + return 'std::vector<%s>' % self.underlying.name + + def deserializer_name_in_version(self, version): + return 'ArrayDeserializer<%s, %s>' % (self.underlying.name, + self.underlying.deserializer_name_in_version(version)) + + def default_value(self): + return '{}' + + def example_value_for_test(self, version): + return 'std::vector<%s>{ %s }' % (self.underlying.name, + self.underlying.example_value_for_test(version)) + + def is_printable(self): + return self.underlying.is_printable() + + +class Primitive(TypeSpecification): + """ + Represents a Kafka primitive value. + """ + + PRIMITIVE_TYPE_NAMES = ['bool', 'int8', 'int16', 'int32', 'int64', 'string', 'bytes'] + + KAFKA_TYPE_TO_ENVOY_TYPE = { + 'string': 'std::string', + 'bool': 'bool', + 'int8': 'int8_t', + 'int16': 'int16_t', + 'int32': 'int32_t', + 'int64': 'int64_t', + 'bytes': 'Bytes', + } + + KAFKA_TYPE_TO_DESERIALIZER = { + 'string': 'StringDeserializer', + 'bool': 'BooleanDeserializer', + 'int8': 'Int8Deserializer', + 'int16': 'Int16Deserializer', + 'int32': 'Int32Deserializer', + 'int64': 'Int64Deserializer', + 'bytes': 'BytesDeserializer', + } + + # See https://github.com/apache/kafka/tree/trunk/clients/src/main/resources/common/message#deserializing-messages + KAFKA_TYPE_TO_DEFAULT_VALUE = { + 'string': '""', + 'bool': 'false', + 'int8': '0', + 'int16': '0', + 'int32': '0', + 'int64': '0', + 'bytes': '{}', + } + + # Custom values that make test code more readable. + KAFKA_TYPE_TO_EXAMPLE_VALUE_FOR_TEST = { + 'string': '"string"', + 'bool': 'false', + 'int8': 'static_cast(8)', + 'int16': 'static_cast(16)', + 'int32': 'static_cast(32)', + 'int64': 'static_cast(64)', + 'bytes': 'Bytes({0, 1, 2, 3})', + } + + def __init__(self, name, custom_default_value): + self.original_name = name + self.name = Primitive.compute(name, Primitive.KAFKA_TYPE_TO_ENVOY_TYPE) + self.custom_default_value = custom_default_value + self.declaration_chain = [] + self.deserializer_name = Primitive.compute(name, Primitive.KAFKA_TYPE_TO_DESERIALIZER) + + @staticmethod + def compute(name, map): + if name in map: + return map[name] + else: + raise ValueError(name) + + def deserializer_name_in_version(self, version): + return self.deserializer_name + + def default_value(self): + if self.custom_default_value is not None: + return self.custom_default_value + else: + return Primitive.compute(self.original_name, Primitive.KAFKA_TYPE_TO_DEFAULT_VALUE) + + def example_value_for_test(self, version): + return Primitive.compute(self.original_name, Primitive.KAFKA_TYPE_TO_EXAMPLE_VALUE_FOR_TEST) + + def is_printable(self): + return self.name not in ['Bytes'] + + +class Complex(TypeSpecification): + """ + Represents a complex type (multiple types aggregated into one). + This type gets mapped to a C++ struct. + """ + + def __init__(self, name, fields, versions): + self.name = name + self.fields = fields + self.versions = versions + self.declaration_chain = self.__compute_declaration_chain() + self.attributes = {} + + def __compute_declaration_chain(self): + """ + Computes all dependendencies, what means all non-primitive types used by this type. + They need to be declared before this struct is declared. + """ + result = [] + for field in self.fields: + result.extend(field.type.declaration_chain) + result.append(self) + return result + + def with_extra(self, key, value): + self.attributes[key] = value + return self + + def get_extra(self, key): + return self.attributes[key] + + def compute_constructors(self): + """ + Field lists for different versions may not differ (as Kafka can bump version without any + changes). But constructors need to be unique, so we need to remove duplicates if the signatures + match. + """ + signature_to_constructor = {} + for field_list in self.compute_field_lists(): + signature = field_list.constructor_signature() + constructor = signature_to_constructor.get(signature) + if constructor is None: + entry = {} + entry['versions'] = [field_list.version] + entry['signature'] = signature + if (len(signature) > 0): + entry['full_declaration'] = '%s(%s): %s {};' % (self.name, signature, + field_list.constructor_init_list()) + else: + entry['full_declaration'] = '%s() {};' % self.name + signature_to_constructor[signature] = entry + else: + constructor['versions'].append(field_list.version) + return sorted(signature_to_constructor.values(), key=lambda x: x['versions'][0]) + + def compute_field_lists(self): + """ + Return field lists representing each of structure versions. + """ + field_lists = [] + for version in self.versions: + field_list = FieldList(version, self.fields) + field_lists.append(field_list) + return field_lists + + def deserializer_name_in_version(self, version): + return '%sV%dDeserializer' % (self.name, version) + + def default_value(self): + raise NotImplementedError('unable to create default value of complex type') + + def example_value_for_test(self, version): + field_list = next(fl for fl in self.compute_field_lists() if fl.version == version) + example_values = map(lambda x: x.example_value_for_test(version), field_list.used_fields()) + return '%s(%s)' % (self.name, ', '.join(example_values)) + + def is_printable(self): + return True + + +class RenderingHelper: + """ + Helper for jinja templates. + """ + + @staticmethod + def get_template(template): + import jinja2 + import os + env = jinja2.Environment( + loader=jinja2.FileSystemLoader(searchpath=os.path.dirname(os.path.abspath(__file__)))) + return env.get_template(template) + + +if __name__ == "__main__": + main() diff --git a/source/extensions/filters/network/kafka/protocol_code_generator/kafka_request_resolver_cc.j2 b/source/extensions/filters/network/kafka/protocol_code_generator/kafka_request_resolver_cc.j2 new file mode 100644 index 0000000000000..d73f76955adca --- /dev/null +++ b/source/extensions/filters/network/kafka/protocol_code_generator/kafka_request_resolver_cc.j2 @@ -0,0 +1,37 @@ +{# + Template for 'kafka_request_resolver.cc'. + Defines default Kafka request resolver, that uses request parsers in (also generated) + 'requests.h'. +#} +#include "extensions/filters/network/kafka/external/requests.h" +#include "extensions/filters/network/kafka/kafka_request_parser.h" +#include "extensions/filters/network/kafka/parser.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { + +/** + * Creates a parser that corresponds to provided key and version. + * If corresponding parser cannot be found (what means a newer version of Kafka protocol), + * a sentinel parser is returned. + * @param api_key Kafka request key + * @param api_version Kafka request's version + * @param context parse context + */ +RequestParserSharedPtr RequestParserResolver::createParser(int16_t api_key, int16_t api_version, + RequestContextSharedPtr context) const { + +{% for request_type in request_types %}{% for field_list in request_type.compute_field_lists() %} + if ({{ request_type.get_extra('api_key') }} == api_key + && {{ field_list.version }} == api_version) { + return std::make_shared<{{ request_type.name }}V{{ field_list.version }}Parser>(context); + }{% endfor %}{% endfor %} + return std::make_shared(context); +} + +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/kafka/protocol_code_generator/request_codec_request_test_cc.j2 b/source/extensions/filters/network/kafka/protocol_code_generator/request_codec_request_test_cc.j2 new file mode 100644 index 0000000000000..c853563f8f8a9 --- /dev/null +++ b/source/extensions/filters/network/kafka/protocol_code_generator/request_codec_request_test_cc.j2 @@ -0,0 +1,89 @@ +{# + Template for 'request_codec_request_test.cc'. + + Provides integration tests using Kafka codec. + The tests do the following: + - create the message, + - serialize the message into buffer, + - pass the buffer to the codec, + - capture messages received in callback, + - verify that captured messages are identical to the ones sent. +#} +#include "extensions/filters/network/kafka/external/requests.h" +#include "extensions/filters/network/kafka/request_codec.h" + +#include "test/extensions/filters/network/kafka/serialization_utilities.h" +#include "test/mocks/server/mocks.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace RequestCodecRequestTest { + +class RequestCodecRequestTest : public testing::Test { +protected: + template void putInBuffer(T arg); + + Buffer::OwnedImpl buffer_; +}; + +{% for request_type in request_types %} + +// Integration test for {{ request_type.name }} messages. + +TEST_F(RequestCodecRequestTest, shouldHandle{{ request_type.name }}Messages) { + // given + using RequestUnderTest = Request<{{ request_type.name }}>; + + std::vector sent; + int32_t correlation = 0; + + {% for field_list in request_type.compute_field_lists() %} + for (int i = 0; i < 100; ++i ) { + const RequestHeader header = + { {{ request_type.get_extra('api_key') }}, {{ field_list.version }}, correlation++, "id" }; + const {{ request_type.name }} data = { {{ field_list.example_value() }} }; + const RequestUnderTest request = {header, data}; + putInBuffer(request); + sent.push_back(request); + } + {% endfor %} + + const InitialParserFactory& initial_parser_factory = InitialParserFactory::getDefaultInstance(); + const RequestParserResolver& request_parser_resolver = + RequestParserResolver::getDefaultInstance(); + const CapturingRequestCallbackSharedPtr request_callback = + std::make_shared(); + + RequestDecoder testee{initial_parser_factory, request_parser_resolver, {request_callback}}; + + // when + testee.onData(buffer_); + + // then + const std::vector& received = request_callback->getCaptured(); + ASSERT_EQ(received.size(), sent.size()); + + for (size_t i = 0; i < received.size(); ++i) { + const std::shared_ptr request = + std::dynamic_pointer_cast(received[i]); + ASSERT_NE(request, nullptr); + ASSERT_EQ(*request, sent[i]); + } +} +{% endfor %} + +template +void RequestCodecRequestTest::putInBuffer(const T arg) { + RequestEncoder encoder{buffer_}; + encoder.encode(arg); +} + +} // namespace RequestCodecRequestTest +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/kafka/protocol_code_generator/request_parser.j2 b/source/extensions/filters/network/kafka/protocol_code_generator/request_parser.j2 new file mode 100644 index 0000000000000..db14f9e2a55cf --- /dev/null +++ b/source/extensions/filters/network/kafka/protocol_code_generator/request_parser.j2 @@ -0,0 +1,20 @@ +{# + Template for top-level structure representing a request in Kafka protocol (e.g. ProduceRequest). + Rendered templates for each request in Kafka protocol will be put into 'requests.h' file. + + This template handles binding the top-level structure deserializer + (e.g. ProduceRequestV0Deserializer) with RequestDataParser. These parsers are then used by + RequestParserResolver instance depending on received Kafka api key & api version + (see 'kafka_request_resolver_cc.j2'). +#} + +{% for version in complex_type.versions %}class {{ complex_type.name }}V{{ version }}Parser: + public RequestDataParser< + {{ complex_type.name }}, {{ complex_type.name }}V{{ version }}Deserializer> +{ +public: + {{ complex_type.name }}V{{ version }}Parser(RequestContextSharedPtr ctx) : + RequestDataParser{ctx} {}; +}; + +{% endfor %} \ No newline at end of file diff --git a/source/extensions/filters/network/kafka/protocol_code_generator/requests_h.j2 b/source/extensions/filters/network/kafka/protocol_code_generator/requests_h.j2 new file mode 100644 index 0000000000000..ff85d19410d07 --- /dev/null +++ b/source/extensions/filters/network/kafka/protocol_code_generator/requests_h.j2 @@ -0,0 +1,35 @@ +{# + Main template for 'requests.h' file. + Gets filled in (by 'contents') with Kafka request structures, deserializers, and parsers. + + For each request we have the following: + - 1 top-level structure corresponding to the request (e.g. `struct FetchRequest`), + - N deserializers for top-level structure, one for each request version, + - N parsers binding each deserializer with parser, + - 0+ child structures (e.g. `struct FetchRequestTopic`, `FetchRequestPartition`) that are used by + the request's top-level structure, + - deserializers for each child structure. + + So for example, for FetchRequest we have: + - struct FetchRequest, + - FetchRequestV0Deserializer, FetchRequestV1Deserializer, FetchRequestV2Deserializer, etc., + - FetchRequestV0Parser, FetchRequestV1Parser, FetchRequestV2Parser, etc., + - struct FetchRequestTopic, + - FetchRequestTopicV0Deserializer, FetchRequestTopicV1Deserializer, etc. + (because topic data is present in every FetchRequest version), + - struct FetchRequestPartition, + - FetchRequestPartitionV0Deserializer, FetchRequestPartitionV1Deserializer, etc. + (because partition data is present in every FetchRequestTopic version). +#} +#pragma once +#include "extensions/filters/network/kafka/kafka_request.h" +#include "extensions/filters/network/kafka/kafka_request_parser.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { + +{{ contents }} + +}}}} diff --git a/source/extensions/filters/network/kafka/protocol_code_generator/requests_test_cc.j2 b/source/extensions/filters/network/kafka/protocol_code_generator/requests_test_cc.j2 new file mode 100644 index 0000000000000..d7ec7ae98ca4f --- /dev/null +++ b/source/extensions/filters/network/kafka/protocol_code_generator/requests_test_cc.j2 @@ -0,0 +1,79 @@ +{# + Template for request serialization/deserialization tests. + For every request, we want to check if it can be serialized and deserialized properly. +#} + +#include "extensions/filters/network/kafka/external/requests.h" +#include "extensions/filters/network/kafka/request_codec.h" + +#include "test/mocks/server/mocks.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace RequestTest { + +class RequestTest : public testing::Test { +public: + Buffer::OwnedImpl buffer_; + + template std::shared_ptr serializeAndDeserialize(T request); +}; + +class MockMessageListener : public RequestCallback { +public: + MOCK_METHOD1(onMessage, void(AbstractRequestSharedPtr)); + MOCK_METHOD1(onFailedParse, void(RequestParseFailureSharedPtr)); +}; + +/** + * Helper method. + * Takes an instance of a request, serializes it, then deserializes it. + * This method gets executed for every request * version pair. + */ +template std::shared_ptr RequestTest::serializeAndDeserialize(T request) { + RequestEncoder encoder{buffer_}; + encoder.encode(request); + + std::shared_ptr mock_listener = std::make_shared(); + RequestDecoder testee{RequestParserResolver::getDefaultInstance(), {mock_listener}}; + + AbstractRequestSharedPtr receivedMessage; + EXPECT_CALL(*mock_listener, onMessage(testing::_)) + .WillOnce(testing::SaveArg<0>(&receivedMessage)); + + testee.onData(buffer_); + + return std::dynamic_pointer_cast(receivedMessage); +}; + +{# + Concrete tests for each request_type and version (field_list). + Each request is naively constructed using some default values + (put "string" as std::string, 32 as uint32_t, etc.). +#} +{% for request_type in request_types %}{% for field_list in request_type.compute_field_lists() %} +TEST_F(RequestTest, shouldParse{{ request_type.name }}V{{ field_list.version }}) { + // given + {{ request_type.name }} data = { {{ field_list.example_value() }} }; + Request<{{ request_type.name }}> request = { { + {{ request_type.get_extra('api_key') }}, {{ field_list.version }}, 0, absl::nullopt }, data }; + + // when + auto received = serializeAndDeserialize(request); + + // then + ASSERT_NE(received, nullptr); + ASSERT_EQ(*received, request); +} +{% endfor %}{% endfor %} + +} // namespace RequestTest +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/kafka/request_codec.cc b/source/extensions/filters/network/kafka/request_codec.cc new file mode 100644 index 0000000000000..bf74e81e79d05 --- /dev/null +++ b/source/extensions/filters/network/kafka/request_codec.cc @@ -0,0 +1,91 @@ +#include "extensions/filters/network/kafka/request_codec.h" + +#include "common/buffer/buffer_impl.h" +#include "common/common/stack_array.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { + +class RequestStartParserFactory : public InitialParserFactory { + RequestParserSharedPtr create(const RequestParserResolver& parser_resolver) const override { + return std::make_shared(parser_resolver); + } +}; + +const InitialParserFactory& InitialParserFactory::getDefaultInstance() { + CONSTRUCT_ON_FIRST_USE(RequestStartParserFactory); +} + +void RequestDecoder::onData(Buffer::Instance& data) { + // Convert buffer to slices and pass them to `doParse`. + uint64_t num_slices = data.getRawSlices(nullptr, 0); + STACK_ARRAY(slices, Buffer::RawSlice, num_slices); + data.getRawSlices(slices.begin(), num_slices); + for (const Buffer::RawSlice& slice : slices) { + doParse(slice); + } +} + +/** + * Main parse loop: + * - forward data to current parser, + * - receive parser response: + * -- if still waiting, do nothing (we wait for more data), + * -- if a parser is given, replace current parser with the new one, and it the rest of the data + * -- if a message is given: + * --- notify callbacks, + * --- replace current parser with new start parser, as we are going to start parsing the next + * message. + */ +void RequestDecoder::doParse(const Buffer::RawSlice& slice) { + const char* bytes = reinterpret_cast(slice.mem_); + absl::string_view data = {bytes, slice.len_}; + + while (!data.empty()) { + + // Feed the data to the parser. + RequestParseResponse result = current_parser_->parse(data); + // This loop guarantees that parsers consuming 0 bytes also get processed in this invocation. + while (result.hasData()) { + if (!result.next_parser_) { + + // Next parser is not present, so we have finished parsing a message. + // Depending on whether the parse was successful, invoke the correct callback. + if (result.message_) { + for (auto& callback : callbacks_) { + callback->onMessage(result.message_); + } + } else { + for (auto& callback : callbacks_) { + callback->onFailedParse(result.failure_data_); + } + } + + // As we finished parsing this request, re-initialize the parser. + current_parser_ = factory_.create(parser_resolver_); + } else { + + // The next parser that's supposed to consume the rest of payload was given. + current_parser_ = result.next_parser_; + } + + // Keep parsing the data. + result = current_parser_->parse(data); + } + } +} + +void RequestEncoder::encode(const AbstractRequest& message) { + const uint32_t size = htobe32(message.computeSize()); + output_.add(&size, sizeof(size)); // Encode data length. + message.encode(output_); // Encode data. +} + +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/kafka/request_codec.h b/source/extensions/filters/network/kafka/request_codec.h new file mode 100644 index 0000000000000..c8a6b69f87973 --- /dev/null +++ b/source/extensions/filters/network/kafka/request_codec.h @@ -0,0 +1,127 @@ +#pragma once + +#include "envoy/buffer/buffer.h" +#include "envoy/common/pure.h" + +#include "extensions/filters/network/kafka/codec.h" +#include "extensions/filters/network/kafka/kafka_request.h" +#include "extensions/filters/network/kafka/kafka_request_parser.h" +#include "extensions/filters/network/kafka/parser.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { + +/** + * Callback invoked when request is successfully decoded. + */ +class RequestCallback { +public: + virtual ~RequestCallback() = default; + + /** + * Callback method invoked when request is successfully decoded. + * @param request request that has been decoded. + */ + virtual void onMessage(AbstractRequestSharedPtr request) PURE; + + /** + * Callback method invoked when request could not be decoded. + * Invoked after all request's bytes have been consumed. + */ + virtual void onFailedParse(RequestParseFailureSharedPtr failure_data) PURE; +}; + +typedef std::shared_ptr RequestCallbackSharedPtr; + +/** + * Provides initial parser for messages (class extracted to allow injecting test factories). + */ +class InitialParserFactory { +public: + virtual ~InitialParserFactory() = default; + + /** + * Creates default instance that returns RequestStartParser instances. + */ + static const InitialParserFactory& getDefaultInstance(); + + /** + * Creates parser with given context. + */ + virtual RequestParserSharedPtr create(const RequestParserResolver& parser_resolver) const PURE; +}; + +/** + * Decoder that decodes Kafka requests. + * When a request is decoded, the callbacks are notified, in order. + * + * This decoder uses chain of parsers to parse fragments of a request. + * Each parser along the line returns the fully parsed message or the next parser. + * Stores parse state (as large message's payload can be provided through multiple `onData` calls). + */ +class RequestDecoder : public MessageDecoder { +public: + /** + * Creates a decoder that can decode requests specified by RequestParserResolver, notifying + * callbacks on successful decoding. + * @param parserResolver supported parser resolver. + * @param callbacks callbacks to be invoked (in order). + */ + RequestDecoder(const RequestParserResolver& parserResolver, + const std::vector callbacks) + : RequestDecoder(InitialParserFactory::getDefaultInstance(), parserResolver, callbacks){}; + + /** + * Visible for testing. + * Allows injecting initial parser factory. + */ + RequestDecoder(const InitialParserFactory& factory, const RequestParserResolver& parserResolver, + const std::vector callbacks) + : factory_{factory}, parser_resolver_{parserResolver}, callbacks_{callbacks}, + current_parser_{factory_.create(parser_resolver_)} {}; + + /** + * Consumes all data present in a buffer. + * If a request can be successfully parsed, then callbacks get notified with parsed request. + * Updates decoder state. + * Impl note: similar to redis codec, which also keeps state. + */ + void onData(Buffer::Instance& data) override; + +private: + void doParse(const Buffer::RawSlice& slice); + + const InitialParserFactory& factory_; + + const RequestParserResolver& parser_resolver_; + + const std::vector callbacks_; + + RequestParserSharedPtr current_parser_; +}; + +/** + * Encodes requests into underlying buffer. + */ +class RequestEncoder : public MessageEncoder { +public: + /** + * Wraps buffer with encoder. + */ + RequestEncoder(Buffer::Instance& output) : output_(output) {} + + /** + * Encodes request into wrapped buffer. + */ + void encode(const AbstractRequest& message) override; + +private: + Buffer::Instance& output_; +}; + +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/kafka/serialization.cc b/source/extensions/filters/network/kafka/serialization.cc new file mode 100644 index 0000000000000..3282199a2a1d4 --- /dev/null +++ b/source/extensions/filters/network/kafka/serialization.cc @@ -0,0 +1,110 @@ +#include "extensions/filters/network/kafka/serialization.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { + +constexpr static int16_t NULL_STRING_LENGTH = -1; +constexpr static int32_t NULL_BYTES_LENGTH = -1; + +/** + * Helper method for deserializers that get the length of data, and then copy the given bytes into a + * local buffer. Templated as there are length and byte type differences. Impl note: This method + * modifies (sets up) most of Deserializer's fields. + * @param data bytes to deserialize. + * @param length_deserializer payload length deserializer. + * @param length_consumed_marker marker telling whether length has been extracted from + * length_deserializer, and underlying buffer has been initialized. + * @param required remaining bytes to consume. + * @param data_buffer buffer with capacity for 'required' bytes. + * @param ready marker telling whether this deserialized has finished processing. + * @param null_value_length value marking null values. + * @param allow_null_value whether null value if allowed. + * @return number of bytes consumed. + */ +template +uint32_t feedBytesIntoBuffers(absl::string_view& data, DeserializerType& length_deserializer, + bool& length_consumed_marker, LengthType& required, + std::vector& data_buffer, bool& ready, + const LengthType null_value_length, const bool allow_null_value) { + + const uint32_t length_consumed = length_deserializer.feed(data); + if (!length_deserializer.ready()) { + // Break early: we still need to fill in length buffer. + return length_consumed; + } + + if (!length_consumed_marker) { + // Length buffer is ready, but we have not yet processed the result. + // We need to extract the real data length and initialize buffer for it. + required = length_deserializer.get(); + + if (required >= 0) { + data_buffer = std::vector(required); + } + + if (required == null_value_length) { + if (allow_null_value) { + // We have received 'null' value in deserializer that allows it (e.g. NullableBytes), no + // more processing is necessary. + ready = true; + } else { + // Invalid payload: null length for non-null object. + throw EnvoyException(fmt::format("invalid length: {}", required)); + } + } + + if (required < null_value_length) { + throw EnvoyException(fmt::format("invalid length: {}", required)); + } + + length_consumed_marker = true; + } + + if (ready) { + // Break early: we might not need to consume any bytes for nullable values OR in case of repeat + // invocation on already-ready buffer. + return length_consumed; + } + + const uint32_t data_consumed = std::min(required, data.size()); + const uint32_t written = data_buffer.size() - required; + if (data_consumed > 0) { + memcpy(data_buffer.data() + written, data.data(), data_consumed); + required -= data_consumed; + data = {data.data() + data_consumed, data.size() - data_consumed}; + } + + // We have consumed all the bytes, mark the deserializer as ready. + if (required == 0) { + ready = true; + } + + return length_consumed + data_consumed; +} + +uint32_t StringDeserializer::feed(absl::string_view& data) { + return feedBytesIntoBuffers( + data, length_buf_, length_consumed_, required_, data_buf_, ready_, NULL_STRING_LENGTH, false); +} + +uint32_t NullableStringDeserializer::feed(absl::string_view& data) { + return feedBytesIntoBuffers( + data, length_buf_, length_consumed_, required_, data_buf_, ready_, NULL_STRING_LENGTH, true); +} + +uint32_t BytesDeserializer::feed(absl::string_view& data) { + return feedBytesIntoBuffers( + data, length_buf_, length_consumed_, required_, data_buf_, ready_, NULL_BYTES_LENGTH, false); +} + +uint32_t NullableBytesDeserializer::feed(absl::string_view& data) { + return feedBytesIntoBuffers( + data, length_buf_, length_consumed_, required_, data_buf_, ready_, NULL_BYTES_LENGTH, true); +} + +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/kafka/serialization.h b/source/extensions/filters/network/kafka/serialization.h new file mode 100644 index 0000000000000..430cbe29e7067 --- /dev/null +++ b/source/extensions/filters/network/kafka/serialization.h @@ -0,0 +1,719 @@ +#pragma once + +#include +#include +#include +#include + +#include "envoy/buffer/buffer.h" +#include "envoy/common/exception.h" +#include "envoy/common/pure.h" + +#include "common/common/byte_order.h" +#include "common/common/fmt.h" + +#include "extensions/filters/network/kafka/kafka_types.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { + +/** + * Deserializer is a stateful entity that constructs a result of type T from bytes provided. + * It can be feed()-ed data until it is ready, filling the internal store. + * When ready(), it is safe to call get() to transform the internally stored bytes into result. + * Further feed()-ing should have no effect on a buffer (should return 0 and not move + * provided pointer). + * @param T type of deserialized data. + */ +template class Deserializer { +public: + virtual ~Deserializer() = default; + + /** + * Submit data to be processed, will consume as much data as it is necessary. + * If any bytes are consumed, then the provided string view is updated by stepping over consumed + * bytes. Invoking this method when deserializer is ready has no effect (consumes 0 bytes). + * @param data bytes to be processed, will be updated if any have been consumed. + * @return number of bytes consumed (equal to change in 'data'). + */ + virtual uint32_t feed(absl::string_view& data) PURE; + + /** + * Whether deserializer has consumed enough data to return result. + */ + virtual bool ready() const PURE; + + /** + * Returns the entity that is represented by bytes stored in this deserializer. + * Should be only called when deserializer is ready. + */ + virtual T get() const PURE; +}; + +/** + * Generic integer deserializer (uses array of sizeof(T) bytes). + * After all bytes are filled in, the value is converted from network byte-order and returned. + */ +template class IntDeserializer : public Deserializer { +public: + IntDeserializer() : written_{0}, ready_(false){}; + + uint32_t feed(absl::string_view& data) override { + const uint32_t available = std::min(sizeof(buf_) - written_, data.size()); + memcpy(buf_ + written_, data.data(), available); + written_ += available; + + if (written_ == sizeof(buf_)) { + ready_ = true; + } + + data = {data.data() + available, data.size() - available}; + + return available; + } + + bool ready() const override { return ready_; } + +protected: + char buf_[sizeof(T) / sizeof(char)]; + uint32_t written_; + bool ready_{false}; +}; + +/** + * Integer deserializer for int8_t. + */ +class Int8Deserializer : public IntDeserializer { +public: + int8_t get() const override { + int8_t result; + memcpy(&result, buf_, sizeof(result)); + return result; + } +}; + +/** + * Integer deserializer for int16_t. + */ +class Int16Deserializer : public IntDeserializer { +public: + int16_t get() const override { + int16_t result; + memcpy(&result, buf_, sizeof(result)); + return be16toh(result); + } +}; + +/** + * Integer deserializer for int32_t. + */ +class Int32Deserializer : public IntDeserializer { +public: + int32_t get() const override { + int32_t result; + memcpy(&result, buf_, sizeof(result)); + return be32toh(result); + } +}; + +/** + * Integer deserializer for uint32_t. + */ +class UInt32Deserializer : public IntDeserializer { +public: + uint32_t get() const override { + uint32_t result; + memcpy(&result, buf_, sizeof(result)); + return be32toh(result); + } +}; + +/** + * Integer deserializer for uint64_t. + */ +class Int64Deserializer : public IntDeserializer { +public: + int64_t get() const override { + int64_t result; + memcpy(&result, buf_, sizeof(result)); + return be64toh(result); + } +}; + +/** + * Deserializer for boolean values. + * Uses a single int8 deserializer, and checks whether the results equals 0. + * When reading a boolean value, any non-zero value is considered true. + * Impl note: could have been a subclass of IntDeserializer with a different get function, + * but it makes it harder to understand. + */ +class BooleanDeserializer : public Deserializer { +public: + BooleanDeserializer(){}; + + uint32_t feed(absl::string_view& data) override { return buffer_.feed(data); } + + bool ready() const override { return buffer_.ready(); } + + bool get() const override { return 0 != buffer_.get(); } + +private: + Int8Deserializer buffer_; +}; + +/** + * Deserializer of string value. + * First reads length (INT16) and then allocates the buffer of given length. + * + * From Kafka documentation: + * First the length N is given as an INT16. + * Then N bytes follow which are the UTF-8 encoding of the character sequence. + * Length must not be negative. + */ +class StringDeserializer : public Deserializer { +public: + /** + * Can throw EnvoyException if given string length is not valid. + */ + uint32_t feed(absl::string_view& data) override; + + bool ready() const override { return ready_; } + + std::string get() const override { return std::string(data_buf_.begin(), data_buf_.end()); } + +private: + Int16Deserializer length_buf_; + bool length_consumed_{false}; + + int16_t required_; + std::vector data_buf_; + + bool ready_{false}; +}; + +/** + * Deserializer of nullable string value. + * First reads length (INT16) and then allocates the buffer of given length. + * If length was -1, buffer allocation is omitted and deserializer is immediately ready (returning + * null value). + * + * From Kafka documentation: + * For non-null strings, first the length N is given as an INT16. + * Then N bytes follow which are the UTF-8 encoding of the character sequence. + * A null value is encoded with length of -1 and there are no following bytes. + */ +class NullableStringDeserializer : public Deserializer { +public: + /** + * Can throw EnvoyException if given string length is not valid. + */ + uint32_t feed(absl::string_view& data) override; + + bool ready() const override { return ready_; } + + NullableString get() const override { + return required_ >= 0 ? absl::make_optional(std::string(data_buf_.begin(), data_buf_.end())) + : absl::nullopt; + } + +private: + Int16Deserializer length_buf_; + bool length_consumed_{false}; + + int16_t required_; + std::vector data_buf_; + + bool ready_{false}; +}; + +/** + * Deserializer of bytes value. + * First reads length (INT32) and then allocates the buffer of given length. + * + * From Kafka documentation: + * First the length N is given as an INT32. Then N bytes follow. + */ +class BytesDeserializer : public Deserializer { +public: + /** + * Can throw EnvoyException if given bytes length is not valid. + */ + uint32_t feed(absl::string_view& data) override; + + bool ready() const override { return ready_; } + + Bytes get() const override { return data_buf_; } + +private: + Int32Deserializer length_buf_; + bool length_consumed_{false}; + int32_t required_; + + std::vector data_buf_; + bool ready_{false}; +}; + +/** + * Deserializer of nullable bytes value. + * First reads length (INT32) and then allocates the buffer of given length. + * If length was -1, buffer allocation is omitted and deserializer is immediately ready (returning + * null value). + * + * From Kafka documentation: + * For non-null values, first the length N is given as an INT32. Then N bytes follow. + * A null value is encoded with length of -1 and there are no following bytes. + */ +class NullableBytesDeserializer : public Deserializer { +public: + /** + * Can throw EnvoyException if given bytes length is not valid. + */ + uint32_t feed(absl::string_view& data) override; + + bool ready() const override { return ready_; } + + NullableBytes get() const override { + return required_ >= 0 ? absl::make_optional(data_buf_) : absl::nullopt; + } + +private: + Int32Deserializer length_buf_; + bool length_consumed_{false}; + int32_t required_; + + std::vector data_buf_; + bool ready_{false}; +}; + +/** + * Deserializer for array of objects of the same type. + * + * First reads the length of the array, then initializes N underlying deserializers of type + * DeserializerType. After the last of N deserializers is ready, the results of each of them are + * gathered and put in a vector. + * @param ResponseType result type returned by deserializer of type DeserializerType. + * @param DeserializerType underlying deserializer type. + * + * From Kafka documentation: + * Represents a sequence of objects of a given type T. Type T can be either a primitive type (e.g. + * STRING) or a structure. First, the length N is given as an int32_t. Then N instances of type T + * follow. A null array is represented with a length of -1. + */ +template +class ArrayDeserializer : public Deserializer> { +public: + /** + * Can throw EnvoyException if array length is invalid or if underlying deserializer can throw. + */ + uint32_t feed(absl::string_view& data) override { + + const uint32_t length_consumed = length_buf_.feed(data); + if (!length_buf_.ready()) { + // Break early: we still need to fill in length buffer. + return length_consumed; + } + + if (!length_consumed_) { + required_ = length_buf_.get(); + if (required_ >= 0) { + children_ = std::vector(required_); + } else { + throw EnvoyException(fmt::format("invalid ARRAY length: {}", required_)); + } + length_consumed_ = true; + } + + if (ready_) { + return length_consumed; + } + + uint32_t child_consumed{0}; + for (DeserializerType& child : children_) { + child_consumed += child.feed(data); + } + + bool children_ready_ = true; + for (DeserializerType& child : children_) { + children_ready_ &= child.ready(); + } + ready_ = children_ready_; + + return length_consumed + child_consumed; + } + + bool ready() const override { return ready_; } + + std::vector get() const override { + std::vector result{}; + result.reserve(children_.size()); + for (const DeserializerType& child : children_) { + const ResponseType child_result = child.get(); + result.push_back(child_result); + } + return result; + } + +private: + Int32Deserializer length_buf_; + bool length_consumed_{false}; + int32_t required_; + std::vector children_; + bool children_setup_{false}; + bool ready_{false}; +}; + +/** + * Deserializer for nullable array of objects of the same type. + * + * First reads the length of the array, then initializes N underlying deserializers of type + * DeserializerType. After the last of N deserializers is ready, the results of each of them are + * gathered and put in a vector. + * @param ResponseType result type returned by deserializer of type DeserializerType. + * @param DeserializerType underlying deserializer type. + * + * From Kafka documentation: + * Represents a sequence of objects of a given type T. Type T can be either a primitive type (e.g. + * STRING) or a structure. First, the length N is given as an int32_t. Then N instances of type T + * follow. A null array is represented with a length of -1. + */ +template +class NullableArrayDeserializer : public Deserializer> { +public: + /** + * Can throw EnvoyException if array length is invalid or if underlying deserializer can throw. + */ + uint32_t feed(absl::string_view& data) override { + + const uint32_t length_consumed = length_buf_.feed(data); + if (!length_buf_.ready()) { + // Break early: we still need to fill in length buffer. + return length_consumed; + } + + if (!length_consumed_) { + required_ = length_buf_.get(); + + if (required_ >= 0) { + children_ = std::vector(required_); + } + if (required_ == NULL_ARRAY_LENGTH) { + ready_ = true; + } + if (required_ < NULL_ARRAY_LENGTH) { + throw EnvoyException(fmt::format("invalid NULLABLE_ARRAY length: {}", required_)); + } + + length_consumed_ = true; + } + + if (ready_) { + return length_consumed; + } + + uint32_t child_consumed{0}; + for (DeserializerType& child : children_) { + child_consumed += child.feed(data); + } + + bool children_ready_ = true; + for (DeserializerType& child : children_) { + children_ready_ &= child.ready(); + } + ready_ = children_ready_; + + return length_consumed + child_consumed; + } + + bool ready() const override { return ready_; } + + NullableArray get() const override { + if (NULL_ARRAY_LENGTH != required_) { + std::vector result{}; + result.reserve(children_.size()); + for (const DeserializerType& child : children_) { + const ResponseType child_result = child.get(); + result.push_back(child_result); + } + return result; + } else { + return absl::nullopt; + } + } + +private: + constexpr static int32_t NULL_ARRAY_LENGTH{-1}; + + Int32Deserializer length_buf_; + bool length_consumed_{false}; + int32_t required_; + std::vector children_; + bool children_setup_{false}; + bool ready_{false}; +}; + +/** + * Encodes provided argument in Kafka format. + * In case of primitive types, this is done explicitly as per specification. + * In case of composite types, this is done by calling 'encode' on provided argument. + * + * This object also carries extra information that is used while traversing the request + * structure-tree during encoding (currently api_version, as different request versions serialize + * differently). + */ +// TODO(adamkotwasinski) that class might be split into Request/ResponseEncodingContext in future +class EncodingContext { +public: + EncodingContext(int16_t api_version) : api_version_{api_version} {}; + + /** + * Compute size of given reference, if it were to be encoded. + * @return serialized size of argument. + */ + template uint32_t computeSize(const T& arg) const; + + /** + * Compute size of given array, if it were to be encoded. + * @return serialized size of argument. + */ + template uint32_t computeSize(const std::vector& arg) const; + + /** + * Compute size of given nullable array, if it were to be encoded. + * @return serialized size of argument. + */ + template uint32_t computeSize(const NullableArray& arg) const; + + /** + * Encode given reference in a buffer. + * @return bytes written + */ + template uint32_t encode(const T& arg, Buffer::Instance& dst); + + /** + * Encode given array in a buffer. + * @return bytes written + */ + template uint32_t encode(const std::vector& arg, Buffer::Instance& dst); + + /** + * Encode given nullable array in a buffer. + * @return bytes written + */ + template uint32_t encode(const NullableArray& arg, Buffer::Instance& dst); + + int16_t apiVersion() const { return api_version_; } + +private: + const int16_t api_version_; +}; + +/** + * For non-primitive types, call `computeSize` on them, to delegate the work to the entity itself. + * The entity may use the information in context to decide which fields are included etc. + */ +template inline uint32_t EncodingContext::computeSize(const T& arg) const { + return arg.computeSize(*this); +} + +/** + * For primitive types, Kafka size == sizeof(x). + */ +#define COMPUTE_SIZE_OF_NUMERIC_TYPE(TYPE) \ + template <> constexpr uint32_t EncodingContext::computeSize(const TYPE&) const { \ + return sizeof(TYPE); \ + } + +COMPUTE_SIZE_OF_NUMERIC_TYPE(bool) +COMPUTE_SIZE_OF_NUMERIC_TYPE(int8_t) +COMPUTE_SIZE_OF_NUMERIC_TYPE(int16_t) +COMPUTE_SIZE_OF_NUMERIC_TYPE(int32_t) +COMPUTE_SIZE_OF_NUMERIC_TYPE(uint32_t) +COMPUTE_SIZE_OF_NUMERIC_TYPE(int64_t) + +/** + * Template overload for string. + * Kafka String's size is INT16 for header + N bytes. + */ +template <> inline uint32_t EncodingContext::computeSize(const std::string& arg) const { + return sizeof(int16_t) + arg.size(); +} + +/** + * Template overload for nullable string. + * Kafka NullableString's size is INT16 for header + N bytes (N >= 0). + */ +template <> inline uint32_t EncodingContext::computeSize(const NullableString& arg) const { + return sizeof(int16_t) + (arg ? arg->size() : 0); +} + +/** + * Template overload for byte array. + * Kafka byte array size is INT32 for header + N bytes. + */ +template <> inline uint32_t EncodingContext::computeSize(const Bytes& arg) const { + return sizeof(int32_t) + arg.size(); +} + +/** + * Template overload for nullable byte array. + * Kafka nullable byte array size is INT32 for header + N bytes (N >= 0). + */ +template <> inline uint32_t EncodingContext::computeSize(const NullableBytes& arg) const { + return sizeof(int32_t) + (arg ? arg->size() : 0); +} + +/** + * Template overload for Array of T. + * The size of array is size of header and all of its elements. + */ +template +inline uint32_t EncodingContext::computeSize(const std::vector& arg) const { + uint32_t result = sizeof(int32_t); + for (const T& el : arg) { + result += computeSize(el); + } + return result; +} + +/** + * Template overload for NullableArray of T. + * The size of array is size of header and all of its elements. + */ +template +inline uint32_t EncodingContext::computeSize(const NullableArray& arg) const { + return arg ? computeSize(*arg) : sizeof(int32_t); +} + +/** + * For non-primitive types, call `encode` on them, to delegate the serialization to the entity + * itself. + */ +template inline uint32_t EncodingContext::encode(const T& arg, Buffer::Instance& dst) { + return arg.encode(dst, *this); +} + +/** + * Template overload for int8_t. + * Encode a single byte. + */ +template <> inline uint32_t EncodingContext::encode(const int8_t& arg, Buffer::Instance& dst) { + dst.add(&arg, sizeof(int8_t)); + return sizeof(int8_t); +} + +/** + * Template overload for int16_t, int32_t, uint32_t, int64_t. + * Encode a N-byte integer, converting to network byte-order. + */ +#define ENCODE_NUMERIC_TYPE(TYPE, CONVERTER) \ + template <> inline uint32_t EncodingContext::encode(const TYPE& arg, Buffer::Instance& dst) { \ + const TYPE val = CONVERTER(arg); \ + dst.add(&val, sizeof(TYPE)); \ + return sizeof(TYPE); \ + } + +ENCODE_NUMERIC_TYPE(int16_t, htobe16); +ENCODE_NUMERIC_TYPE(int32_t, htobe32); +ENCODE_NUMERIC_TYPE(uint32_t, htobe32); +ENCODE_NUMERIC_TYPE(int64_t, htobe64); + +/** + * Template overload for bool. + * Encode boolean as a single byte. + */ +template <> inline uint32_t EncodingContext::encode(const bool& arg, Buffer::Instance& dst) { + int8_t val = arg; + dst.add(&val, sizeof(int8_t)); + return sizeof(int8_t); +} + +/** + * Template overload for std::string. + * Encode string as INT16 length + N bytes. + */ +template <> inline uint32_t EncodingContext::encode(const std::string& arg, Buffer::Instance& dst) { + int16_t string_length = arg.length(); + uint32_t header_length = encode(string_length, dst); + dst.add(arg.c_str(), string_length); + return header_length + string_length; +} + +/** + * Template overload for NullableString. + * Encode nullable string as INT16 length + N bytes (length = -1 for null). + */ +template <> +inline uint32_t EncodingContext::encode(const NullableString& arg, Buffer::Instance& dst) { + if (arg.has_value()) { + return encode(*arg, dst); + } else { + const int16_t len = -1; + return encode(len, dst); + } +} + +/** + * Template overload for Bytes. + * Encode byte array as INT32 length + N bytes. + */ +template <> inline uint32_t EncodingContext::encode(const Bytes& arg, Buffer::Instance& dst) { + const int32_t data_length = arg.size(); + const uint32_t header_length = encode(data_length, dst); + dst.add(arg.data(), arg.size()); + return header_length + data_length; +} + +/** + * Template overload for NullableBytes. + * Encode nullable byte array as INT32 length + N bytes (length = -1 for null value). + */ +template <> +inline uint32_t EncodingContext::encode(const NullableBytes& arg, Buffer::Instance& dst) { + if (arg.has_value()) { + return encode(*arg, dst); + } else { + const int32_t len = -1; + return encode(len, dst); + } +} + +/** + * Encode nullable object array to T as INT32 length + N elements. + * Each element of type T then serializes itself on its own. + */ +template +uint32_t EncodingContext::encode(const std::vector& arg, Buffer::Instance& dst) { + const NullableArray wrapped = {arg}; + return encode(wrapped, dst); +} + +/** + * Encode nullable object array to T as INT32 length + N elements (length = -1 for null value). + * Each element of type T then serializes itself on its own. + */ +template +uint32_t EncodingContext::encode(const NullableArray& arg, Buffer::Instance& dst) { + if (arg.has_value()) { + const int32_t len = arg->size(); + const uint32_t header_length = encode(len, dst); + uint32_t written{0}; + for (const T& el : *arg) { + // For each of array elements, resolve the correct method again. + // Elements could be primitives or complex types, so calling encode() on object won't work. + written += encode(el, dst); + } + return header_length + written; + } else { + const int32_t len = -1; + return encode(len, dst); + } +} + +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/kafka/serialization_code_generator/serialization_composite_generator.py b/source/extensions/filters/network/kafka/serialization_code_generator/serialization_composite_generator.py new file mode 100755 index 0000000000000..100bf7593c71e --- /dev/null +++ b/source/extensions/filters/network/kafka/serialization_code_generator/serialization_composite_generator.py @@ -0,0 +1,78 @@ +#!/usr/bin/python + + +def main(): + """ + Serialization composite generator script + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + Generates main&test source code files for composite deserializers. + The files are generated, as they are extremely repetitive (composite deserializer for 0..9 + sub-deserializers). + + Usage: + serialization_composite_generator.py COMMAND LOCATION_OF_OUTPUT_FILE + where: + COMMAND : 'generate-source', to generate source files, + 'generate-test', to generate test files. + LOCATION_OF_OUTPUT_FILE : if generate-source: location of 'serialization_composite.h', + if generate-test: location of 'serialization_composite_test.cc'. + + When generating source code, it creates: + - serialization_composite.h - header with declarations of CompositeDeserializerWith???Delegates + classes. + When generating test code, it creates: + - serialization_composite_test.cc - tests for these classes. + + Templates used are: + - to create 'serialization_composite.h': serialization_composite_h.j2, + - to create 'serialization_composite_test.cc': serialization_composite_test_cc.j2. + """ + + import sys + import os + + command = sys.argv[1] + if 'generate-source' == command: + serialization_composite_h_file = os.path.abspath(sys.argv[2]) + elif 'generate-test' == command: + serialization_composite_test_cc_file = os.path.abspath(sys.argv[2]) + else: + raise ValueError('invalid command: ' + command) + + import re + import json + + # Number of fields deserialized by each deserializer class. + field_counts = range(1, 10) + + # Generate main source code. + if 'generate-source' == command: + template = RenderingHelper.get_template('serialization_composite_h.j2') + contents = template.render(counts=field_counts) + with open(serialization_composite_h_file, 'w') as fd: + fd.write(contents) + + # Generate test code. + if 'generate-test' == command: + template = RenderingHelper.get_template('serialization_composite_test_cc.j2') + contents = template.render(counts=field_counts) + with open(serialization_composite_test_cc_file, 'w') as fd: + fd.write(contents) + + +class RenderingHelper: + """ + Helper for jinja templates. + """ + + @staticmethod + def get_template(template): + import jinja2 + import os + env = jinja2.Environment( + loader=jinja2.FileSystemLoader(searchpath=os.path.dirname(os.path.abspath(__file__)))) + return env.get_template(template) + + +if __name__ == "__main__": + main() diff --git a/source/extensions/filters/network/kafka/serialization_code_generator/serialization_composite_h.j2 b/source/extensions/filters/network/kafka/serialization_code_generator/serialization_composite_h.j2 new file mode 100644 index 0000000000000..83a7be7a25f35 --- /dev/null +++ b/source/extensions/filters/network/kafka/serialization_code_generator/serialization_composite_h.j2 @@ -0,0 +1,100 @@ +{# + Creates 'serialization_composite.h'. + + Template for composite serializers (the CompositeDeserializerWith_N_Delegates classes). + Covers the corner case of 0 delegates, and then uses templating to create declarations for 1..N + variants. +#} +#pragma once + +#include +#include +#include +#include + +#include "envoy/buffer/buffer.h" +#include "envoy/common/exception.h" +#include "envoy/common/pure.h" + +#include "common/common/byte_order.h" +#include "common/common/fmt.h" + +#include "extensions/filters/network/kafka/kafka_types.h" +#include "extensions/filters/network/kafka/serialization.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { + +/** + * This header contains only composite deserializers. + * The basic design is composite deserializer creating delegates DeserializerType1..N. + * Result of type ResponseType is constructed by getting results of each of delegates. + * These deserializers can throw, if any of the delegate deserializers can. + */ + +/** + * Composite deserializer that uses 0 deserializer(s) (corner case). + * Does not consume any bytes, and is always ready to return the result. + * Creates a result value using the no-arg ResponseType constructor. + * @param ResponseType type of deserialized data. + */ +template +class CompositeDeserializerWith0Delegates : public Deserializer { +public: + CompositeDeserializerWith0Delegates(){}; + uint32_t feed(absl::string_view&) override { return 0; } + bool ready() const override { return true; } + ResponseType get() const override { return {}; } +}; + +{% for field_count in counts %} +/** + * Composite deserializer that uses {{ field_count }} deserializer(s). + * Passes data to each of the underlying deserializers (deserializers that are already ready do not + * consume data, so it's safe). + * The composite deserializer is ready when the last deserializer is ready (what means that all + * deserializers before it are ready too). + * Constructs the result of type ResponseType using { delegate1_.get(), delegate2_.get() ... }. + * + * @param ResponseType type of deserialized data{% for field in range(1, field_count + 1) %}. + * @param DeserializerType{{ field }} deserializer {{ field }}. +{% endfor %} */ +template < + typename ResponseType{% for field in range(1, field_count + 1) %}, + typename DeserializerType{{ field }}{% endfor %} +> +class CompositeDeserializerWith{{ field_count }}Delegates : public Deserializer { +public: + CompositeDeserializerWith{{ field_count }}Delegates(){}; + + uint32_t feed(absl::string_view& data) override { + uint32_t consumed = 0; + {% for field in range(1, field_count + 1) %} + consumed += delegate{{ field }}_.feed(data); + {% endfor %} + return consumed; + } + + bool ready() const override { return delegate{{ field_count }}_.ready(); } + + ResponseType get() const override { + return { + {% for field in range(1, field_count + 1) %}delegate{{ field }}_.get(), + {% endfor %}}; + } + +protected: + {% for field in range(1, field_count + 1) %} + DeserializerType{{ field }} delegate{{ field }}_; + {% endfor %} +}; +{% endfor %} + +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/kafka/serialization_code_generator/serialization_composite_test_cc.j2 b/source/extensions/filters/network/kafka/serialization_code_generator/serialization_composite_test_cc.j2 new file mode 100644 index 0000000000000..1b52340a6cdab --- /dev/null +++ b/source/extensions/filters/network/kafka/serialization_code_generator/serialization_composite_test_cc.j2 @@ -0,0 +1,90 @@ +{# + Creates 'serialization_composite_test.cc'. + + Template for composite serializer tests (the CompositeDeserializerWith_N_Delegates classes). + Covers the corner case of 0 delegates, and then uses templating to create tests for 1..N cases. +#} + +#include "extensions/filters/network/kafka/external/serialization_composite.h" + +#include "test/extensions/filters/network/kafka/serialization_utilities.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { + +/** + * Tests in this class are supposed to check whether serialization operations on composite + * deserializers are correct. + */ + +// Tests for composite deserializer with 0 fields (corner case). + +struct CompositeResultWith0Fields { + uint32_t encode(Buffer::Instance&, EncodingContext&) const { return 0; } + bool operator==(const CompositeResultWith0Fields&) const { return true; } +}; + +typedef CompositeDeserializerWith0Delegates TestCompositeDeserializer0; + +// Composite with 0 delegates is special case: it's always ready. +TEST(CompositeDeserializerWith0Delegates, EmptyBufferShouldBeReady) { + // given + const TestCompositeDeserializer0 testee{}; + // when, then + ASSERT_EQ(testee.ready(), true); +} + +TEST(CompositeDeserializerWith0Delegates, ShouldDeserialize) { + const CompositeResultWith0Fields expected{}; + serializeThenDeserializeAndCheckEquality(expected); +} + +// Tests for composite deserializer with N+ fields. + +{% for field_count in counts %} +struct CompositeResultWith{{ field_count }}Fields { + {% for field in range(1, field_count + 1) %} + const std::string field{{ field }}_; + {% endfor %} + + uint32_t encode(Buffer::Instance& dst, EncodingContext& encoder) const { + uint32_t written{0}; + {% for field in range(1, field_count + 1) %} + written += encoder.encode(field{{ field }}_, dst); + {% endfor %} + return written; + } + + bool operator==(const CompositeResultWith{{ field_count }}Fields& rhs) const { + return true + {% for field in range(1, field_count + 1) %} && field{{ field }}_ == rhs.field{{ field }}_ + {% endfor %}; + } +}; + +typedef CompositeDeserializerWith{{ field_count }}Delegates< + CompositeResultWith{{ field_count }}Fields + {% for field in range(1, field_count + 1) %}, StringDeserializer{% endfor %} +> TestCompositeDeserializer{{ field_count }}; + +TEST(CompositeDeserializerWith{{ field_count }}Delegates, EmptyBufferShouldNotBeReady) { + // given + const TestCompositeDeserializer{{ field_count }} testee{}; + // when, then + ASSERT_EQ(testee.ready(), false); +} + +TEST(CompositeDeserializerWith{{ field_count }}Delegates, ShouldDeserialize) { + const CompositeResultWith{{ field_count }}Fields expected{ + {% for field in range(1, field_count + 1) %}"s{{ field }}", {% endfor %} + }; + serializeThenDeserializeAndCheckEquality(expected); +} +{% endfor %} + +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/mongo_proxy/BUILD b/source/extensions/filters/network/mongo_proxy/BUILD index 6891198b77b25..36a94de85abcb 100644 --- a/source/extensions/filters/network/mongo_proxy/BUILD +++ b/source/extensions/filters/network/mongo_proxy/BUILD @@ -75,6 +75,7 @@ envoy_cc_library( "//source/common/network:filter_lib", "//source/common/protobuf:utility_lib", "//source/common/singleton:const_singleton", + "//source/extensions/filters/common/fault:fault_config_lib", "//source/extensions/filters/network:well_known_names", "@envoy_api//envoy/config/filter/network/mongo_proxy/v2:mongo_proxy_cc", ], diff --git a/source/extensions/filters/network/mongo_proxy/config.cc b/source/extensions/filters/network/mongo_proxy/config.cc index 368ac9e9c5e8f..a8989947e75a6 100644 --- a/source/extensions/filters/network/mongo_proxy/config.cc +++ b/source/extensions/filters/network/mongo_proxy/config.cc @@ -24,14 +24,12 @@ Network::FilterFactoryCb MongoProxyFilterConfigFactory::createFilterFactoryFromP AccessLogSharedPtr access_log; if (!proto_config.access_log().empty()) { access_log.reset(new AccessLog(proto_config.access_log(), context.accessLogManager(), - context.dispatcher().timeSystem())); + context.dispatcher().timeSource())); } - FaultConfigSharedPtr fault_config; + Filters::Common::Fault::FaultDelayConfigSharedPtr fault_config; if (proto_config.has_delay()) { - auto delay = proto_config.delay(); - ASSERT(delay.has_fixed_delay()); - fault_config = std::make_shared(proto_config.delay()); + fault_config = std::make_shared(proto_config.delay()); } const bool emit_dynamic_metadata = proto_config.emit_dynamic_metadata(); @@ -39,8 +37,7 @@ Network::FilterFactoryCb MongoProxyFilterConfigFactory::createFilterFactoryFromP emit_dynamic_metadata](Network::FilterManager& filter_manager) -> void { filter_manager.addFilter(std::make_shared( stat_prefix, context.scope(), context.runtime(), access_log, fault_config, - context.drainDecision(), context.random(), context.dispatcher().timeSystem(), - emit_dynamic_metadata)); + context.drainDecision(), context.dispatcher().timeSource(), emit_dynamic_metadata)); }; } diff --git a/source/extensions/filters/network/mongo_proxy/proxy.cc b/source/extensions/filters/network/mongo_proxy/proxy.cc index 066f0e2e78508..59621b337928b 100644 --- a/source/extensions/filters/network/mongo_proxy/proxy.cc +++ b/source/extensions/filters/network/mongo_proxy/proxy.cc @@ -56,13 +56,12 @@ void AccessLog::logMessage(const Message& message, bool full, ProxyFilter::ProxyFilter(const std::string& stat_prefix, Stats::Scope& scope, Runtime::Loader& runtime, AccessLogSharedPtr access_log, - const FaultConfigSharedPtr& fault_config, - const Network::DrainDecision& drain_decision, - Runtime::RandomGenerator& generator, Event::TimeSystem& time_system, + const Filters::Common::Fault::FaultDelayConfigSharedPtr& fault_config, + const Network::DrainDecision& drain_decision, TimeSource& time_source, bool emit_dynamic_metadata) : stat_prefix_(stat_prefix), scope_(scope), stats_(generateStats(stat_prefix, scope)), - runtime_(runtime), drain_decision_(drain_decision), generator_(generator), - access_log_(access_log), fault_config_(fault_config), time_system_(time_system), + runtime_(runtime), drain_decision_(drain_decision), access_log_(access_log), + fault_config_(fault_config), time_source_(time_source), emit_dynamic_metadata_(emit_dynamic_metadata) { if (!runtime_.snapshot().featureEnabled(MongoRuntimeConfig::get().ConnectionLoggingEnabled, 100)) { @@ -283,7 +282,7 @@ void ProxyFilter::chargeReplyStats(ActiveQuery& active_query, const std::string& scope_.histogram(fmt::format("{}.reply_size", prefix)).recordValue(reply_documents_byte_size); scope_.histogram(fmt::format("{}.reply_time_ms", prefix)) .recordValue(std::chrono::duration_cast( - time_system_.monotonicTime() - active_query.start_time_) + time_source_.monotonicTime() - active_query.start_time_) .count()); } @@ -365,26 +364,30 @@ DecoderPtr ProdProxyFilter::createDecoder(DecoderCallbacks& callbacks) { return DecoderPtr{new DecoderImpl(callbacks)}; } -absl::optional ProxyFilter::delayDuration() { - absl::optional result; +absl::optional ProxyFilter::delayDuration() { + absl::optional result; if (!fault_config_) { return result; } if (!runtime_.snapshot().featureEnabled(MongoRuntimeConfig::get().FixedDelayPercent, - fault_config_->delayPercentage().numerator(), - generator_.random(), - ProtobufPercentHelper::fractionalPercentDenominatorToInt( - fault_config_->delayPercentage().denominator()))) { + fault_config_->percentage())) { return result; } - const uint64_t duration = runtime_.snapshot().getInteger( - MongoRuntimeConfig::get().FixedDelayDurationMs, fault_config_->delayDuration()); + // See if the delay provider has a default delay, if not there is no delay. + auto config_duration = fault_config_->duration(nullptr); + if (!config_duration.has_value()) { + return result; + } + + const std::chrono::milliseconds duration = + std::chrono::milliseconds(runtime_.snapshot().getInteger( + MongoRuntimeConfig::get().FixedDelayDurationMs, config_duration.value().count())); // Delay only if the duration is > 0ms. - if (duration > 0) { + if (duration.count() > 0) { result = duration; } @@ -405,12 +408,12 @@ void ProxyFilter::tryInjectDelay() { return; } - const absl::optional delay_ms = delayDuration(); + const absl::optional delay = delayDuration(); - if (delay_ms) { + if (delay) { delay_timer_ = read_callbacks_->connection().dispatcher().createTimer( [this]() -> void { delayInjectionTimerCallback(); }); - delay_timer_->enableTimer(std::chrono::milliseconds(delay_ms.value())); + delay_timer_->enableTimer(delay.value()); stats_.delays_injected_.inc(); } } diff --git a/source/extensions/filters/network/mongo_proxy/proxy.h b/source/extensions/filters/network/mongo_proxy/proxy.h index 17ff06639031e..6c9a308800f42 100644 --- a/source/extensions/filters/network/mongo_proxy/proxy.h +++ b/source/extensions/filters/network/mongo_proxy/proxy.h @@ -23,6 +23,7 @@ #include "common/protobuf/utility.h" #include "common/singleton/const_singleton.h" +#include "extensions/filters/common/fault/fault_config.h" #include "extensions/filters/network/mongo_proxy/codec.h" #include "extensions/filters/network/mongo_proxy/utility.h" @@ -93,29 +94,11 @@ class AccessLog { private: TimeSource& time_source_; - Filesystem::FileSharedPtr file_; + Envoy::AccessLog::AccessLogFileSharedPtr file_; }; typedef std::shared_ptr AccessLogSharedPtr; -/** - * Mongo fault configuration. - */ -class FaultConfig { -public: - FaultConfig(const envoy::config::filter::fault::v2::FaultDelay& fault_config) - : delay_percentage_(fault_config.percentage()), - duration_ms_(PROTOBUF_GET_MS_REQUIRED(fault_config, fixed_delay)) {} - envoy::type::FractionalPercent delayPercentage() const { return delay_percentage_; } - uint64_t delayDuration() const { return duration_ms_; } - -private: - envoy::type::FractionalPercent delay_percentage_; - const uint64_t duration_ms_; -}; - -typedef std::shared_ptr FaultConfigSharedPtr; - /** * A sniffing filter for mongo traffic. The current implementation makes a copy of read/written * data, decodes it, and generates stats. @@ -126,9 +109,10 @@ class ProxyFilter : public Network::Filter, Logger::Loggable { public: ProxyFilter(const std::string& stat_prefix, Stats::Scope& scope, Runtime::Loader& runtime, - AccessLogSharedPtr access_log, const FaultConfigSharedPtr& fault_config, - const Network::DrainDecision& drain_decision, Runtime::RandomGenerator& generator, - Event::TimeSystem& time_system, bool emit_dynamic_metadata); + AccessLogSharedPtr access_log, + const Filters::Common::Fault::FaultDelayConfigSharedPtr& fault_config, + const Network::DrainDecision& drain_decision, TimeSource& time_system, + bool emit_dynamic_metadata); ~ProxyFilter(); virtual DecoderPtr createDecoder(DecoderCallbacks& callbacks) PURE; @@ -163,7 +147,7 @@ class ProxyFilter : public Network::Filter, private: struct ActiveQuery { ActiveQuery(ProxyFilter& parent, const QueryMessage& query) - : parent_(parent), query_info_(query), start_time_(parent_.time_system_.monotonicTime()) { + : parent_(parent), query_info_(query), start_time_(parent_.time_source_.monotonicTime()) { parent_.stats_.op_query_active_.inc(); } @@ -188,7 +172,7 @@ class ProxyFilter : public Network::Filter, void doDecode(Buffer::Instance& buffer); void logMessage(Message& message, bool full); void onDrainClose(); - absl::optional delayDuration(); + absl::optional delayDuration(); void delayInjectionTimerCallback(); void tryInjectDelay(); @@ -198,17 +182,16 @@ class ProxyFilter : public Network::Filter, MongoProxyStats stats_; Runtime::Loader& runtime_; const Network::DrainDecision& drain_decision_; - Runtime::RandomGenerator& generator_; Buffer::OwnedImpl read_buffer_; Buffer::OwnedImpl write_buffer_; bool sniffing_{true}; std::list active_query_list_; AccessLogSharedPtr access_log_; Network::ReadFilterCallbacks* read_callbacks_{}; - const FaultConfigSharedPtr fault_config_; + const Filters::Common::Fault::FaultDelayConfigSharedPtr fault_config_; Event::TimerPtr delay_timer_; Event::TimerPtr drain_close_timer_; - Event::TimeSystem& time_system_; + TimeSource& time_source_; const bool emit_dynamic_metadata_; }; diff --git a/source/extensions/filters/network/ratelimit/BUILD b/source/extensions/filters/network/ratelimit/BUILD index 3d5b0a099e13f..5def3649b91f3 100644 --- a/source/extensions/filters/network/ratelimit/BUILD +++ b/source/extensions/filters/network/ratelimit/BUILD @@ -23,7 +23,6 @@ envoy_cc_library( "//include/envoy/stats:stats_macros", "//source/common/tracing:http_tracer_lib", "//source/extensions/filters/common/ratelimit:ratelimit_client_interface", - "//source/extensions/filters/common/ratelimit:ratelimit_registration_lib", "@envoy_api//envoy/config/filter/network/rate_limit/v2:rate_limit_cc", ], ) diff --git a/source/extensions/filters/network/ratelimit/config.cc b/source/extensions/filters/network/ratelimit/config.cc index d6ad81f9463d6..80d8e8ae1ac88 100644 --- a/source/extensions/filters/network/ratelimit/config.cc +++ b/source/extensions/filters/network/ratelimit/config.cc @@ -10,7 +10,6 @@ #include "common/protobuf/utility.h" #include "extensions/filters/common/ratelimit/ratelimit_impl.h" -#include "extensions/filters/common/ratelimit/ratelimit_registration.h" #include "extensions/filters/network/ratelimit/ratelimit.h" namespace Envoy { @@ -29,21 +28,14 @@ Network::FilterFactoryCb RateLimitConfigFactory::createFilterFactoryFromProtoTyp ConfigSharedPtr filter_config(new Config(proto_config, context.scope(), context.runtime())); const std::chrono::milliseconds timeout = std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(proto_config, timeout, 20)); - Filters::Common::RateLimit::ClientFactoryPtr client_factory = - Filters::Common::RateLimit::rateLimitClientFactory(context); - // If ratelimit service config is provided in both bootstrap and filter, we should validate that - // they are same. - Filters::Common::RateLimit::validateRateLimitConfig< - const envoy::config::filter::network::rate_limit::v2::RateLimit&>(proto_config, - client_factory); - return [client_factory, proto_config, &context, timeout, + return [proto_config, &context, timeout, filter_config](Network::FilterManager& filter_manager) -> void { filter_manager.addReadFilter(std::make_shared( filter_config, Filters::Common::RateLimit::rateLimitClient( - client_factory, context, proto_config.rate_limit_service().grpc_service(), timeout))); + context, proto_config.rate_limit_service().grpc_service(), timeout))); }; } diff --git a/source/extensions/filters/network/rbac/rbac_filter.cc b/source/extensions/filters/network/rbac/rbac_filter.cc index 1267e9cac3872..0c5008c88b1d8 100644 --- a/source/extensions/filters/network/rbac/rbac_filter.cc +++ b/source/extensions/filters/network/rbac/rbac_filter.cc @@ -5,6 +5,8 @@ #include "extensions/filters/network/well_known_names.h" +#include "absl/strings/str_join.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { @@ -26,7 +28,8 @@ Network::FilterStatus RoleBasedAccessControlFilter::onData(Buffer::Instance&, bo callbacks_->connection().remoteAddress()->asString(), callbacks_->connection().localAddress()->asString(), callbacks_->connection().ssl() - ? "uriSanPeerCertificate: " + callbacks_->connection().ssl()->uriSanPeerCertificate() + + ? "uriSanPeerCertificate: " + + absl::StrJoin(callbacks_->connection().ssl()->uriSanPeerCertificate(), ",") + ", subjectPeerCertificate: " + callbacks_->connection().ssl()->subjectPeerCertificate() : "none", diff --git a/source/extensions/filters/network/redis_proxy/BUILD b/source/extensions/filters/network/redis_proxy/BUILD index 2d3767310c6f2..9825a435144e7 100644 --- a/source/extensions/filters/network/redis_proxy/BUILD +++ b/source/extensions/filters/network/redis_proxy/BUILD @@ -12,37 +12,30 @@ load( envoy_package() -envoy_cc_library( - name = "codec_interface", - hdrs = ["codec.h"], - deps = ["//include/envoy/buffer:buffer_interface"], -) - envoy_cc_library( name = "command_splitter_interface", hdrs = ["command_splitter.h"], - deps = [":codec_interface"], + deps = [ + "//source/extensions/filters/network/common/redis:codec_interface", + ], ) envoy_cc_library( name = "conn_pool_interface", hdrs = ["conn_pool.h"], deps = [ - ":codec_interface", "//include/envoy/upstream:cluster_manager_interface", + "//source/extensions/filters/network/common/redis:client_interface", + "//source/extensions/filters/network/common/redis:codec_interface", ], ) envoy_cc_library( - name = "codec_lib", - srcs = ["codec_impl.cc"], - hdrs = ["codec_impl.h"], + name = "router_interface", + hdrs = ["router.h"], deps = [ - ":codec_interface", - "//source/common/common:assert_lib", - "//source/common/common:minimal_logger_lib", - "//source/common/common:stack_array", - "//source/common/common:utility_lib", + ":conn_pool_interface", + "@envoy_api//envoy/config/filter/network/redis_proxy/v2:redis_proxy_cc", ], ) @@ -52,14 +45,15 @@ envoy_cc_library( hdrs = ["command_splitter_impl.h"], deps = [ ":command_splitter_interface", - ":conn_pool_interface", - ":supported_commands_lib", + ":router_interface", "//include/envoy/stats:stats_macros", "//include/envoy/stats:timespan", "//source/common/common:assert_lib", "//source/common/common:minimal_logger_lib", "//source/common/common:to_lower_table_lib", "//source/common/common:utility_lib", + "//source/extensions/filters/network/common/redis:client_lib", + "//source/extensions/filters/network/common/redis:supported_commands_lib", ], ) @@ -68,16 +62,17 @@ envoy_cc_library( srcs = ["conn_pool_impl.cc"], hdrs = ["conn_pool_impl.h"], deps = [ - ":codec_lib", ":conn_pool_interface", - "//include/envoy/router:router_interface", "//include/envoy/thread_local:thread_local_interface", "//include/envoy/upstream:cluster_manager_interface", "//source/common/buffer:buffer_lib", "//source/common/common:assert_lib", + "//source/common/network:address_lib", "//source/common/network:filter_lib", "//source/common/protobuf:utility_lib", "//source/common/upstream:load_balancer_lib", + "//source/common/upstream:upstream_lib", + "//source/extensions/filters/network/common/redis:client_lib", "@envoy_api//envoy/config/filter/network/redis_proxy/v2:redis_proxy_cc", ], ) @@ -87,7 +82,6 @@ envoy_cc_library( srcs = ["proxy_filter.cc"], hdrs = ["proxy_filter.h"], deps = [ - ":codec_interface", ":command_splitter_interface", "//include/envoy/network:drain_decision_interface", "//include/envoy/network:filter_interface", @@ -95,18 +89,11 @@ envoy_cc_library( "//source/common/buffer:buffer_lib", "//source/common/common:assert_lib", "//source/common/config:utility_lib", + "//source/extensions/filters/network/common/redis:codec_interface", "@envoy_api//envoy/config/filter/network/redis_proxy/v2:redis_proxy_cc", ], ) -envoy_cc_library( - name = "supported_commands_lib", - hdrs = ["supported_commands.h"], - deps = [ - "//source/common/common:macros", - ], -) - envoy_cc_library( name = "config", srcs = ["config.cc"], @@ -116,9 +103,23 @@ envoy_cc_library( "//source/common/config:filter_json_lib", "//source/extensions/filters/network:well_known_names", "//source/extensions/filters/network/common:factory_base_lib", - "//source/extensions/filters/network/redis_proxy:codec_lib", + "//source/extensions/filters/network/common/redis:codec_lib", "//source/extensions/filters/network/redis_proxy:command_splitter_lib", - "//source/extensions/filters/network/redis_proxy:conn_pool_lib", "//source/extensions/filters/network/redis_proxy:proxy_filter_lib", + "//source/extensions/filters/network/redis_proxy:router_lib", + ], +) + +envoy_cc_library( + name = "router_lib", + srcs = ["router_impl.cc"], + hdrs = ["router_impl.h"], + deps = [ + ":router_interface", + "//include/envoy/thread_local:thread_local_interface", + "//include/envoy/upstream:cluster_manager_interface", + "//source/common/common:to_lower_table_lib", + "//source/extensions/filters/network/redis_proxy:conn_pool_lib", + "@envoy_api//envoy/config/filter/network/redis_proxy/v2:redis_proxy_cc", ], ) diff --git a/source/extensions/filters/network/redis_proxy/command_splitter.h b/source/extensions/filters/network/redis_proxy/command_splitter.h index 7c1a78289aaa6..678a9e9807907 100644 --- a/source/extensions/filters/network/redis_proxy/command_splitter.h +++ b/source/extensions/filters/network/redis_proxy/command_splitter.h @@ -4,7 +4,7 @@ #include "envoy/common/pure.h" -#include "extensions/filters/network/redis_proxy/codec.h" +#include "extensions/filters/network/common/redis/codec.h" namespace Envoy { namespace Extensions { @@ -38,7 +38,7 @@ class SplitCallbacks { * Called when the response is ready. * @param value supplies the response which is now owned by the callee. */ - virtual void onResponse(RespValuePtr&& value) PURE; + virtual void onResponse(Common::Redis::RespValuePtr&& value) PURE; }; /** @@ -50,14 +50,15 @@ class Instance { virtual ~Instance() {} /** - * Make a split redis request. - * @param request supplies the split request to make. + * Make a split redis request capable of being retried/redirected. + * @param request supplies the split request to make (ownership transferred to call). * @param callbacks supplies the split request completion callbacks. * @return SplitRequestPtr a handle to the active request or nullptr if the request has already * been satisfied (via onResponse() being called). The splitter ALWAYS calls * onResponse() for a given request. */ - virtual SplitRequestPtr makeRequest(const RespValue& request, SplitCallbacks& callbacks) PURE; + virtual SplitRequestPtr makeRequest(Common::Redis::RespValuePtr&& request, + SplitCallbacks& callbacks) PURE; }; } // namespace CommandSplitter diff --git a/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc b/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc index 52d6f6beeebb8..e41440ec7da28 100644 --- a/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc +++ b/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc @@ -10,7 +10,7 @@ #include "common/common/assert.h" #include "common/common/fmt.h" -#include "extensions/filters/network/redis_proxy/supported_commands.h" +#include "extensions/filters/network/common/redis/supported_commands.h" namespace Envoy { namespace Extensions { @@ -18,15 +18,72 @@ namespace NetworkFilters { namespace RedisProxy { namespace CommandSplitter { -RespValuePtr Utility::makeError(const std::string& error) { - RespValuePtr response(new RespValue()); - response->type(RespType::Error); +Common::Redis::RespValuePtr Utility::makeError(const std::string& error) { + Common::Redis::RespValuePtr response(new Common::Redis::RespValue()); + response->type(Common::Redis::RespType::Error); response->asString() = error; return response; } +namespace { + +// null_pool_callbacks is used for requests that must be filtered and not redirected such as +// "asking". +DoNothingPoolCallbacks null_pool_callbacks; + +// Create an asking command request. +const Common::Redis::RespValue& askingRequest() { + static Common::Redis::RespValue request; + static bool initialized = false; + + if (!initialized) { + Common::Redis::RespValue asking_cmd; + asking_cmd.type(Common::Redis::RespType::BulkString); + asking_cmd.asString() = "asking"; + request.type(Common::Redis::RespType::Array); + request.asArray().push_back(asking_cmd); + initialized = true; + } + return request; +} + +/** + * Validate the received moved/ask redirection error and the original redis request. + * @param[in] original_request supplies the incoming request associated with the command splitter + * request. + * @param[in] error_response supplies the moved/ask redirection response from the upstream Redis + * server. + * @param[out] error_substrings the non-whitespace substrings of error_response. + * @param[out] ask_redirection true if error_response is an ASK redirection error, false otherwise. + * @return bool true if the original_request or error_response are not valid, false otherwise. + */ +bool redirectionArgsInvalid(const Common::Redis::RespValue* original_request, + const Common::Redis::RespValue& error_response, + std::vector& error_substrings, + bool& ask_redirection) { + if ((original_request == nullptr) || (error_response.type() != Common::Redis::RespType::Error)) { + return true; + } + error_substrings = StringUtil::splitToken(error_response.asString(), " ", false); + if (error_substrings.size() != 3) { + return true; + } + if (error_substrings[0] == "ASK") { + ask_redirection = true; + } else if (error_substrings[0] == "MOVED") { + ask_redirection = false; + } else { + // The first substring must be MOVED or ASK. + return true; + } + // Other validation done later to avoid duplicate processing. + return false; +} + +} // namespace + void SplitRequestBase::onWrongNumberOfArguments(SplitCallbacks& callbacks, - const RespValue& request) { + const Common::Redis::RespValue& request) { callbacks.onResponse(Utility::makeError( fmt::format("wrong number of arguments for '{}' command", request.asArray()[0].asString()))); } @@ -37,12 +94,12 @@ void SplitRequestBase::updateStats(const bool success) { } else { command_stats_.error_.inc(); } - command_latency_ms_->complete(); + command_latency_->complete(); } SingleServerRequest::~SingleServerRequest() { ASSERT(!handle_); } -void SingleServerRequest::onResponse(RespValuePtr&& response) { +void SingleServerRequest::onResponse(Common::Redis::RespValuePtr&& response) { handle_ = nullptr; updateStats(true); callbacks_.onResponse(std::move(response)); @@ -54,49 +111,87 @@ void SingleServerRequest::onFailure() { callbacks_.onResponse(Utility::makeError(Response::get().UpstreamFailure)); } +bool SingleServerRequest::onRedirection(const Common::Redis::RespValue& value) { + std::vector err; + bool ask_redirection = false; + if (redirectionArgsInvalid(incoming_request_.get(), value, err, ask_redirection) || !conn_pool_) { + return false; + } + + // MOVED and ASK redirection errors have the following substrings: MOVED or ASK (err[0]), hash key + // slot (err[1]), and IP address and TCP port separated by a colon (err[2]). + const std::string host_address = std::string(err[2]); + + // Prepend request with an asking command if redirected via an ASK error. The returned handle is + // not important since there is no point in being able to cancel the request. The use of + // null_pool_callbacks ensures the transparent filtering of the Redis server's response to the + // "asking" command; this is fine since the server either responds with an OK or an error message + // if cluster support is not enabled (in which case we should not get an ASK redirection error). + if (ask_redirection && + !conn_pool_->makeRequestToHost(host_address, askingRequest(), null_pool_callbacks)) { + return false; + } + handle_ = conn_pool_->makeRequestToHost(host_address, *incoming_request_, *this); + return (handle_ != nullptr); +} + void SingleServerRequest::cancel() { handle_->cancel(); handle_ = nullptr; } -SplitRequestPtr SimpleRequest::create(ConnPool::Instance& conn_pool, - const RespValue& incoming_request, SplitCallbacks& callbacks, - CommandStats& command_stats, TimeSource& time_source) { +SplitRequestPtr SimpleRequest::create(Router& router, + Common::Redis::RespValuePtr&& incoming_request, + SplitCallbacks& callbacks, CommandStats& command_stats, + TimeSource& time_source, bool latency_in_micros) { std::unique_ptr request_ptr{ - new SimpleRequest(callbacks, command_stats, time_source)}; + new SimpleRequest(callbacks, command_stats, time_source, latency_in_micros)}; + + auto conn_pool = router.upstreamPool(incoming_request->asArray()[1].asString()); + if (conn_pool) { + request_ptr->conn_pool_ = conn_pool; + request_ptr->handle_ = conn_pool->makeRequest(incoming_request->asArray()[1].asString(), + *incoming_request, *request_ptr); + } - request_ptr->handle_ = conn_pool.makeRequest(incoming_request.asArray()[1].asString(), - incoming_request, *request_ptr); if (!request_ptr->handle_) { - request_ptr->callbacks_.onResponse(Utility::makeError(Response::get().NoUpstreamHost)); + callbacks.onResponse(Utility::makeError(Response::get().NoUpstreamHost)); return nullptr; } - return std::move(request_ptr); + request_ptr->incoming_request_ = std::move(incoming_request); + return request_ptr; } -SplitRequestPtr EvalRequest::create(ConnPool::Instance& conn_pool, - const RespValue& incoming_request, SplitCallbacks& callbacks, - CommandStats& command_stats, TimeSource& time_source) { - +SplitRequestPtr EvalRequest::create(Router& router, Common::Redis::RespValuePtr&& incoming_request, + SplitCallbacks& callbacks, CommandStats& command_stats, + TimeSource& time_source, bool latency_in_micros) { // EVAL looks like: EVAL script numkeys key [key ...] arg [arg ...] // Ensure there are at least three args to the command or it cannot be hashed. - if (incoming_request.asArray().size() < 4) { - onWrongNumberOfArguments(callbacks, incoming_request); + if (incoming_request->asArray().size() < 4) { + onWrongNumberOfArguments(callbacks, *incoming_request); command_stats.error_.inc(); return nullptr; } - std::unique_ptr request_ptr{new EvalRequest(callbacks, command_stats, time_source)}; - request_ptr->handle_ = conn_pool.makeRequest(incoming_request.asArray()[3].asString(), - incoming_request, *request_ptr); + std::unique_ptr request_ptr{ + new EvalRequest(callbacks, command_stats, time_source, latency_in_micros)}; + + auto conn_pool = router.upstreamPool(incoming_request->asArray()[3].asString()); + if (conn_pool) { + request_ptr->conn_pool_ = conn_pool; + request_ptr->handle_ = conn_pool->makeRequest(incoming_request->asArray()[3].asString(), + *incoming_request, *request_ptr); + } + if (!request_ptr->handle_) { command_stats.error_.inc(); - request_ptr->callbacks_.onResponse(Utility::makeError(Response::get().NoUpstreamHost)); + callbacks.onResponse(Utility::makeError(Response::get().NoUpstreamHost)); return nullptr; } - return std::move(request_ptr); + request_ptr->incoming_request_ = std::move(incoming_request); + return request_ptr; } FragmentedRequest::~FragmentedRequest() { @@ -120,65 +215,105 @@ void FragmentedRequest::onChildFailure(uint32_t index) { onChildResponse(Utility::makeError(Response::get().UpstreamFailure), index); } -SplitRequestPtr MGETRequest::create(ConnPool::Instance& conn_pool, - const RespValue& incoming_request, SplitCallbacks& callbacks, - CommandStats& command_stats, TimeSource& time_source) { - std::unique_ptr request_ptr{new MGETRequest(callbacks, command_stats, time_source)}; +SplitRequestPtr MGETRequest::create(Router& router, Common::Redis::RespValuePtr&& incoming_request, + SplitCallbacks& callbacks, CommandStats& command_stats, + TimeSource& time_source, bool latency_in_micros) { + std::unique_ptr request_ptr{ + new MGETRequest(callbacks, command_stats, time_source, latency_in_micros)}; - request_ptr->num_pending_responses_ = incoming_request.asArray().size() - 1; + request_ptr->num_pending_responses_ = incoming_request->asArray().size() - 1; request_ptr->pending_requests_.reserve(request_ptr->num_pending_responses_); - request_ptr->pending_response_ = std::make_unique(); - request_ptr->pending_response_->type(RespType::Array); - std::vector responses(request_ptr->num_pending_responses_); + request_ptr->pending_response_ = std::make_unique(); + request_ptr->pending_response_->type(Common::Redis::RespType::Array); + std::vector responses(request_ptr->num_pending_responses_); request_ptr->pending_response_->asArray().swap(responses); - std::vector values(2); - values[0].type(RespType::BulkString); + std::vector values(2); + values[0].type(Common::Redis::RespType::BulkString); values[0].asString() = "get"; - values[1].type(RespType::BulkString); - RespValue single_mget; - single_mget.type(RespType::Array); + values[1].type(Common::Redis::RespType::BulkString); + Common::Redis::RespValue single_mget; + single_mget.type(Common::Redis::RespType::Array); single_mget.asArray().swap(values); - for (uint64_t i = 1; i < incoming_request.asArray().size(); i++) { + for (uint64_t i = 1; i < incoming_request->asArray().size(); i++) { request_ptr->pending_requests_.emplace_back(*request_ptr, i - 1); PendingRequest& pending_request = request_ptr->pending_requests_.back(); - single_mget.asArray()[1].asString() = incoming_request.asArray()[i].asString(); + single_mget.asArray()[1].asString() = incoming_request->asArray()[i].asString(); ENVOY_LOG(debug, "redis: parallel get: '{}'", single_mget.toString()); - pending_request.handle_ = conn_pool.makeRequest(incoming_request.asArray()[i].asString(), - single_mget, pending_request); + auto conn_pool = router.upstreamPool(incoming_request->asArray()[i].asString()); + if (conn_pool) { + pending_request.conn_pool_ = conn_pool; + pending_request.handle_ = conn_pool->makeRequest(incoming_request->asArray()[i].asString(), + single_mget, pending_request); + } + if (!pending_request.handle_) { pending_request.onResponse(Utility::makeError(Response::get().NoUpstreamHost)); } } - return request_ptr->num_pending_responses_ > 0 ? std::move(request_ptr) : nullptr; + if (request_ptr->num_pending_responses_ > 0) { + request_ptr->incoming_request_ = std::move(incoming_request); + return request_ptr; + } + + return nullptr; } -void MGETRequest::onChildResponse(RespValuePtr&& value, uint32_t index) { +bool FragmentedRequest::onChildRedirection(const Common::Redis::RespValue& value, uint32_t index, + const ConnPool::InstanceSharedPtr& conn_pool) { + std::vector err; + bool ask_redirection = false; + if (redirectionArgsInvalid(incoming_request_.get(), value, err, ask_redirection) || !conn_pool) { + return false; + } + + // MOVED and ASK redirection errors have the following substrings: MOVED or ASK (err[0]), hash key + // slot (err[1]), and IP address and TCP port separated by a colon (err[2]). + std::string host_address = std::string(err[2]); + Common::Redis::RespValue request; + recreate(request, index); + + // Prepend request with an asking command if redirected via an ASK error. The returned handle is + // not important since there is no point in being able to cancel the request. The use of + // null_pool_callbacks ensures the transparent filtering of the Redis server's response to the + // "asking" command; this is fine since the server either responds with an OK or an error message + // if cluster support is not enabled (in which case we should not get an ASK redirection error). + if (ask_redirection && + !conn_pool->makeRequestToHost(host_address, askingRequest(), null_pool_callbacks)) { + return false; + } + + this->pending_requests_[index].handle_ = + conn_pool->makeRequestToHost(host_address, request, this->pending_requests_[index]); + return (this->pending_requests_[index].handle_ != nullptr); +} + +void MGETRequest::onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t index) { pending_requests_[index].handle_ = nullptr; pending_response_->asArray()[index].type(value->type()); switch (value->type()) { - case RespType::Array: - case RespType::Integer: - case RespType::SimpleString: { - pending_response_->asArray()[index].type(RespType::Error); + case Common::Redis::RespType::Array: + case Common::Redis::RespType::Integer: + case Common::Redis::RespType::SimpleString: { + pending_response_->asArray()[index].type(Common::Redis::RespType::Error); pending_response_->asArray()[index].asString() = Response::get().UpstreamProtocolError; error_count_++; break; } - case RespType::Error: { + case Common::Redis::RespType::Error: { error_count_++; FALLTHRU; } - case RespType::BulkString: { + case Common::Redis::RespType::BulkString: { pending_response_->asArray()[index].asString().swap(value->asString()); break; } - case RespType::Null: + case Common::Redis::RespType::Null: break; } @@ -190,55 +325,81 @@ void MGETRequest::onChildResponse(RespValuePtr&& value, uint32_t index) { } } -SplitRequestPtr MSETRequest::create(ConnPool::Instance& conn_pool, - const RespValue& incoming_request, SplitCallbacks& callbacks, - CommandStats& command_stats, TimeSource& time_source) { - if ((incoming_request.asArray().size() - 1) % 2 != 0) { - onWrongNumberOfArguments(callbacks, incoming_request); +void MGETRequest::recreate(Common::Redis::RespValue& request, uint32_t index) { + static const uint32_t GET_COMMAND_SUBSTRINGS = 2; + uint32_t num_values = GET_COMMAND_SUBSTRINGS; + std::vector values(num_values); + + for (uint32_t i = 0; i < num_values; i++) { + values[i].type(Common::Redis::RespType::BulkString); + } + values[--num_values].asString() = incoming_request_->asArray()[index + 1].asString(); + values[--num_values].asString() = "get"; + + request.type(Common::Redis::RespType::Array); + request.asArray().swap(values); +} + +SplitRequestPtr MSETRequest::create(Router& router, Common::Redis::RespValuePtr&& incoming_request, + SplitCallbacks& callbacks, CommandStats& command_stats, + TimeSource& time_source, bool latency_in_micros) { + if ((incoming_request->asArray().size() - 1) % 2 != 0) { + onWrongNumberOfArguments(callbacks, *incoming_request); command_stats.error_.inc(); return nullptr; } - std::unique_ptr request_ptr{new MSETRequest(callbacks, command_stats, time_source)}; + std::unique_ptr request_ptr{ + new MSETRequest(callbacks, command_stats, time_source, latency_in_micros)}; - request_ptr->num_pending_responses_ = (incoming_request.asArray().size() - 1) / 2; + request_ptr->num_pending_responses_ = (incoming_request->asArray().size() - 1) / 2; request_ptr->pending_requests_.reserve(request_ptr->num_pending_responses_); - request_ptr->pending_response_ = std::make_unique(); - request_ptr->pending_response_->type(RespType::SimpleString); + request_ptr->pending_response_ = std::make_unique(); + request_ptr->pending_response_->type(Common::Redis::RespType::SimpleString); - std::vector values(3); - values[0].type(RespType::BulkString); + std::vector values(3); + values[0].type(Common::Redis::RespType::BulkString); values[0].asString() = "set"; - values[1].type(RespType::BulkString); - values[2].type(RespType::BulkString); - RespValue single_mset; - single_mset.type(RespType::Array); + values[1].type(Common::Redis::RespType::BulkString); + values[2].type(Common::Redis::RespType::BulkString); + Common::Redis::RespValue single_mset; + single_mset.type(Common::Redis::RespType::Array); single_mset.asArray().swap(values); uint64_t fragment_index = 0; - for (uint64_t i = 1; i < incoming_request.asArray().size(); i += 2) { + for (uint64_t i = 1; i < incoming_request->asArray().size(); i += 2) { request_ptr->pending_requests_.emplace_back(*request_ptr, fragment_index++); PendingRequest& pending_request = request_ptr->pending_requests_.back(); - single_mset.asArray()[1].asString() = incoming_request.asArray()[i].asString(); - single_mset.asArray()[2].asString() = incoming_request.asArray()[i + 1].asString(); + single_mset.asArray()[1].asString() = incoming_request->asArray()[i].asString(); + single_mset.asArray()[2].asString() = incoming_request->asArray()[i + 1].asString(); ENVOY_LOG(debug, "redis: parallel set: '{}'", single_mset.toString()); - pending_request.handle_ = conn_pool.makeRequest(incoming_request.asArray()[i].asString(), - single_mset, pending_request); + auto conn_pool = router.upstreamPool(incoming_request->asArray()[i].asString()); + if (conn_pool) { + pending_request.conn_pool_ = conn_pool; + pending_request.handle_ = conn_pool->makeRequest(incoming_request->asArray()[i].asString(), + single_mset, pending_request); + } + if (!pending_request.handle_) { pending_request.onResponse(Utility::makeError(Response::get().NoUpstreamHost)); } } - return request_ptr->num_pending_responses_ > 0 ? std::move(request_ptr) : nullptr; + if (request_ptr->num_pending_responses_ > 0) { + request_ptr->incoming_request_ = std::move(incoming_request); + return request_ptr; + } + + return nullptr; } -void MSETRequest::onChildResponse(RespValuePtr&& value, uint32_t index) { +void MSETRequest::onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t index) { pending_requests_[index].handle_ = nullptr; switch (value->type()) { - case RespType::SimpleString: { + case Common::Redis::RespType::SimpleString: { if (value->asString() == Response::get().OK) { break; } @@ -263,50 +424,77 @@ void MSETRequest::onChildResponse(RespValuePtr&& value, uint32_t index) { } } -SplitRequestPtr SplitKeysSumResultRequest::create(ConnPool::Instance& conn_pool, - const RespValue& incoming_request, +void MSETRequest::recreate(Common::Redis::RespValue& request, uint32_t index) { + static const uint32_t SET_COMMAND_SUBSTRINGS = 3; + uint32_t num_values = SET_COMMAND_SUBSTRINGS; + std::vector values(num_values); + + for (uint32_t i = 0; i < num_values; i++) { + values[i].type(Common::Redis::RespType::BulkString); + } + values[--num_values].asString() = incoming_request_->asArray()[(index * 2) + 2].asString(); + values[--num_values].asString() = incoming_request_->asArray()[(index * 2) + 1].asString(); + values[--num_values].asString() = "set"; + + request.type(Common::Redis::RespType::Array); + request.asArray().swap(values); +} + +SplitRequestPtr SplitKeysSumResultRequest::create(Router& router, + Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, - TimeSource& time_source) { + TimeSource& time_source, bool latency_in_micros) { std::unique_ptr request_ptr{ - new SplitKeysSumResultRequest(callbacks, command_stats, time_source)}; + new SplitKeysSumResultRequest(callbacks, command_stats, time_source, latency_in_micros)}; - request_ptr->num_pending_responses_ = incoming_request.asArray().size() - 1; + request_ptr->num_pending_responses_ = incoming_request->asArray().size() - 1; request_ptr->pending_requests_.reserve(request_ptr->num_pending_responses_); - request_ptr->pending_response_ = std::make_unique(); - request_ptr->pending_response_->type(RespType::Integer); + request_ptr->pending_response_ = std::make_unique(); + request_ptr->pending_response_->type(Common::Redis::RespType::Integer); - std::vector values(2); - values[0].type(RespType::BulkString); - values[0].asString() = incoming_request.asArray()[0].asString(); - values[1].type(RespType::BulkString); - RespValue single_fragment; - single_fragment.type(RespType::Array); + std::vector values(2); + values[0].type(Common::Redis::RespType::BulkString); + values[0].asString() = incoming_request->asArray()[0].asString(); + values[1].type(Common::Redis::RespType::BulkString); + Common::Redis::RespValue single_fragment; + single_fragment.type(Common::Redis::RespType::Array); single_fragment.asArray().swap(values); - for (uint64_t i = 1; i < incoming_request.asArray().size(); i++) { + for (uint64_t i = 1; i < incoming_request->asArray().size(); i++) { request_ptr->pending_requests_.emplace_back(*request_ptr, i - 1); PendingRequest& pending_request = request_ptr->pending_requests_.back(); - single_fragment.asArray()[1].asString() = incoming_request.asArray()[i].asString(); - ENVOY_LOG(debug, "redis: parallel {}: '{}'", incoming_request.asArray()[0].asString(), + single_fragment.asArray()[1].asString() = incoming_request->asArray()[i].asString(); + ENVOY_LOG(debug, "redis: parallel {}: '{}'", incoming_request->asArray()[0].asString(), single_fragment.toString()); - pending_request.handle_ = conn_pool.makeRequest(incoming_request.asArray()[i].asString(), - single_fragment, pending_request); + auto conn_pool = router.upstreamPool(incoming_request->asArray()[i].asString()); + if (conn_pool) { + pending_request.conn_pool_ = conn_pool; + pending_request.handle_ = conn_pool->makeRequest(incoming_request->asArray()[i].asString(), + single_fragment, pending_request); + } + if (!pending_request.handle_) { pending_request.onResponse(Utility::makeError(Response::get().NoUpstreamHost)); } } - return request_ptr->num_pending_responses_ > 0 ? std::move(request_ptr) : nullptr; + if (request_ptr->num_pending_responses_ > 0) { + request_ptr->incoming_request_ = std::move(incoming_request); + return request_ptr; + } + + return nullptr; } -void SplitKeysSumResultRequest::onChildResponse(RespValuePtr&& value, uint32_t index) { +void SplitKeysSumResultRequest::onChildResponse(Common::Redis::RespValuePtr&& value, + uint32_t index) { pending_requests_[index].handle_ = nullptr; switch (value->type()) { - case RespType::Integer: { + case Common::Redis::RespType::Integer: { total_ += value->asInteger(); break; } @@ -329,55 +517,72 @@ void SplitKeysSumResultRequest::onChildResponse(RespValuePtr&& value, uint32_t i } } -InstanceImpl::InstanceImpl(ConnPool::InstancePtr&& conn_pool, Stats::Scope& scope, - const std::string& stat_prefix, TimeSource& time_source) - : conn_pool_(std::move(conn_pool)), simple_command_handler_(*conn_pool_), - eval_command_handler_(*conn_pool_), mget_handler_(*conn_pool_), mset_handler_(*conn_pool_), - split_keys_sum_result_handler_(*conn_pool_), +void SplitKeysSumResultRequest::recreate(Common::Redis::RespValue& request, uint32_t index) { + static const uint32_t BASE_COMMAND_SUBSTRINGS = 2; + uint32_t num_values = BASE_COMMAND_SUBSTRINGS; + std::vector values(num_values); + + for (uint32_t i = 0; i < num_values; i++) { + values[i].type(Common::Redis::RespType::BulkString); + } + values[--num_values].asString() = incoming_request_->asArray()[index + 1].asString(); + values[--num_values].asString() = incoming_request_->asArray()[0].asString(); + + request.type(Common::Redis::RespType::Array); + request.asArray().swap(values); +} + +InstanceImpl::InstanceImpl(RouterPtr&& router, Stats::Scope& scope, const std::string& stat_prefix, + TimeSource& time_source, bool latency_in_micros) + : router_(std::move(router)), simple_command_handler_(*router_), + eval_command_handler_(*router_), mget_handler_(*router_), mset_handler_(*router_), + split_keys_sum_result_handler_(*router_), stats_{ALL_COMMAND_SPLITTER_STATS(POOL_COUNTER_PREFIX(scope, stat_prefix + "splitter."))}, - time_source_(time_source) { - for (const std::string& command : SupportedCommands::simpleCommands()) { + latency_in_micros_(latency_in_micros), time_source_(time_source) { + for (const std::string& command : Common::Redis::SupportedCommands::simpleCommands()) { addHandler(scope, stat_prefix, command, simple_command_handler_); } - for (const std::string& command : SupportedCommands::evalCommands()) { + for (const std::string& command : Common::Redis::SupportedCommands::evalCommands()) { addHandler(scope, stat_prefix, command, eval_command_handler_); } - for (const std::string& command : SupportedCommands::hashMultipleSumResultCommands()) { + for (const std::string& command : + Common::Redis::SupportedCommands::hashMultipleSumResultCommands()) { addHandler(scope, stat_prefix, command, split_keys_sum_result_handler_); } - addHandler(scope, stat_prefix, SupportedCommands::mget(), mget_handler_); - addHandler(scope, stat_prefix, SupportedCommands::mset(), mset_handler_); + addHandler(scope, stat_prefix, Common::Redis::SupportedCommands::mget(), mget_handler_); + addHandler(scope, stat_prefix, Common::Redis::SupportedCommands::mset(), mset_handler_); } -SplitRequestPtr InstanceImpl::makeRequest(const RespValue& request, SplitCallbacks& callbacks) { - if (request.type() != RespType::Array) { +SplitRequestPtr InstanceImpl::makeRequest(Common::Redis::RespValuePtr&& request, + SplitCallbacks& callbacks) { + if (request->type() != Common::Redis::RespType::Array) { onInvalidRequest(callbacks); return nullptr; } - std::string to_lower_string(request.asArray()[0].asString()); + std::string to_lower_string(request->asArray()[0].asString()); to_lower_table_.toLowerCase(to_lower_string); - if (to_lower_string == SupportedCommands::ping()) { + if (to_lower_string == Common::Redis::SupportedCommands::ping()) { // Respond to PING locally. - RespValuePtr pong(new RespValue()); - pong->type(RespType::SimpleString); + Common::Redis::RespValuePtr pong(new Common::Redis::RespValue()); + pong->type(Common::Redis::RespType::SimpleString); pong->asString() = "PONG"; callbacks.onResponse(std::move(pong)); return nullptr; } - if (request.asArray().size() < 2) { + if (request->asArray().size() < 2) { // Commands other than PING all have at least two arguments. onInvalidRequest(callbacks); return nullptr; } - for (const RespValue& value : request.asArray()) { - if (value.type() != RespType::BulkString) { + for (const Common::Redis::RespValue& value : request->asArray()) { + if (value.type() != Common::Redis::RespType::BulkString) { onInvalidRequest(callbacks); return nullptr; } @@ -387,13 +592,13 @@ SplitRequestPtr InstanceImpl::makeRequest(const RespValue& request, SplitCallbac if (handler == nullptr) { stats_.unsupported_command_.inc(); callbacks.onResponse(Utility::makeError( - fmt::format("unsupported command '{}'", request.asArray()[0].asString()))); + fmt::format("unsupported command '{}'", request->asArray()[0].asString()))); return nullptr; } - ENVOY_LOG(debug, "redis: splitting '{}'", request.toString()); + ENVOY_LOG(debug, "redis: splitting '{}'", request->toString()); handler->command_stats_.total_.inc(); SplitRequestPtr request_ptr = handler->handler_.get().startRequest( - request, callbacks, handler->command_stats_, time_source_); + std::move(request), callbacks, handler->command_stats_, time_source_, latency_in_micros_); return request_ptr; } diff --git a/source/extensions/filters/network/redis_proxy/command_splitter_impl.h b/source/extensions/filters/network/redis_proxy/command_splitter_impl.h index 009871bb63e41..5ca017ca8fdb9 100644 --- a/source/extensions/filters/network/redis_proxy/command_splitter_impl.h +++ b/source/extensions/filters/network/redis_proxy/command_splitter_impl.h @@ -14,8 +14,10 @@ #include "common/common/utility.h" #include "common/singleton/const_singleton.h" +#include "extensions/filters/network/common/redis/client_impl.h" #include "extensions/filters/network/redis_proxy/command_splitter.h" #include "extensions/filters/network/redis_proxy/conn_pool.h" +#include "extensions/filters/network/redis_proxy/router.h" namespace Envoy { namespace Extensions { @@ -35,7 +37,7 @@ typedef ConstSingleton Response; class Utility { public: - static RespValuePtr makeError(const std::string& error); + static Common::Redis::RespValuePtr makeError(const std::string& error); }; /** @@ -46,65 +48,76 @@ class Utility { COUNTER(total) \ COUNTER(success) \ COUNTER(error) \ - HISTOGRAM(latency) \ + HISTOGRAM(latency) // clang-format on /** * Struct definition for all command stats. @see stats_macros.h */ struct CommandStats { - ALL_COMMAND_STATS(GENERATE_COUNTER_STRUCT,GENERATE_HISTOGRAM_STRUCT) + ALL_COMMAND_STATS(GENERATE_COUNTER_STRUCT, GENERATE_HISTOGRAM_STRUCT) }; class CommandHandler { public: virtual ~CommandHandler() {} - virtual SplitRequestPtr startRequest(const RespValue& request, SplitCallbacks& callbacks, - CommandStats& command_stats, TimeSource& time_source) PURE; + virtual SplitRequestPtr startRequest(Common::Redis::RespValuePtr&& request, + SplitCallbacks& callbacks, CommandStats& command_stats, + TimeSource& time_source, bool latency_in_micros) PURE; }; class CommandHandlerBase { protected: - CommandHandlerBase(ConnPool::Instance& conn_pool) : conn_pool_(conn_pool) {} + CommandHandlerBase(Router& router) : router_(router) {} - ConnPool::Instance& conn_pool_; + Router& router_; }; class SplitRequestBase : public SplitRequest { protected: - static void onWrongNumberOfArguments(SplitCallbacks& callbacks, const RespValue& request); + static void onWrongNumberOfArguments(SplitCallbacks& callbacks, + const Common::Redis::RespValue& request); void updateStats(const bool success); - SplitRequestBase(CommandStats& command_stats, TimeSource& time_source) + SplitRequestBase(CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros) : command_stats_(command_stats) { - command_latency_ms_ = std::make_unique(command_stats_.latency_, time_source); + if (latency_in_micros) { + command_latency_ = std::make_unique>( + command_stats_.latency_, time_source); + } else { + command_latency_ = std::make_unique>( + command_stats_.latency_, time_source); + } } CommandStats& command_stats_; - Stats::TimespanPtr command_latency_ms_; + Stats::CompletableTimespanPtr command_latency_; }; /** * SingleServerRequest is a base class for commands that hash to a single backend. */ -class SingleServerRequest : public SplitRequestBase, public ConnPool::PoolCallbacks { +class SingleServerRequest : public SplitRequestBase, public Common::Redis::Client::PoolCallbacks { public: ~SingleServerRequest(); - // RedisProxy::ConnPool::PoolCallbacks - void onResponse(RespValuePtr&& response) override; + // Common::Redis::Client::PoolCallbacks + void onResponse(Common::Redis::RespValuePtr&& response) override; void onFailure() override; + bool onRedirection(const Common::Redis::RespValue& value) override; // RedisProxy::CommandSplitter::SplitRequest void cancel() override; protected: SingleServerRequest(SplitCallbacks& callbacks, CommandStats& command_stats, - TimeSource& time_source) - : SplitRequestBase(command_stats, time_source), callbacks_(callbacks) {} + TimeSource& time_source, bool latency_in_micros) + : SplitRequestBase(command_stats, time_source, latency_in_micros), callbacks_(callbacks) {} SplitCallbacks& callbacks_; - ConnPool::PoolRequest* handle_{}; + ConnPool::InstanceSharedPtr conn_pool_; + Common::Redis::Client::PoolRequest* handle_{}; + Common::Redis::RespValuePtr incoming_request_; }; /** @@ -112,13 +125,14 @@ class SingleServerRequest : public SplitRequestBase, public ConnPool::PoolCallba */ class SimpleRequest : public SingleServerRequest { public: - static SplitRequestPtr create(ConnPool::Instance& conn_pool, const RespValue& incoming_request, + static SplitRequestPtr create(Router& router, Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, - TimeSource& time_source); + TimeSource& time_source, bool latency_in_micros); private: - SimpleRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source) - : SingleServerRequest(callbacks, command_stats, time_source) {} + SimpleRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, + bool latency_in_micros) + : SingleServerRequest(callbacks, command_stats, time_source, latency_in_micros) {} }; /** @@ -126,13 +140,14 @@ class SimpleRequest : public SingleServerRequest { */ class EvalRequest : public SingleServerRequest { public: - static SplitRequestPtr create(ConnPool::Instance& conn_pool, const RespValue& incoming_request, + static SplitRequestPtr create(Router& router, Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, - TimeSource& time_source); + TimeSource& time_source, bool latency_in_micros); private: - EvalRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source) - : SingleServerRequest(callbacks, command_stats, time_source) {} + EvalRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, + bool latency_in_micros) + : SingleServerRequest(callbacks, command_stats, time_source, latency_in_micros) {} }; /** @@ -148,28 +163,39 @@ class FragmentedRequest : public SplitRequestBase { void cancel() override; protected: - FragmentedRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source) - : SplitRequestBase(command_stats, time_source), callbacks_(callbacks) {} + FragmentedRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, + bool latency_in_micros) + : SplitRequestBase(command_stats, time_source, latency_in_micros), callbacks_(callbacks) {} - struct PendingRequest : public ConnPool::PoolCallbacks { + struct PendingRequest : public Common::Redis::Client::PoolCallbacks { PendingRequest(FragmentedRequest& parent, uint32_t index) : parent_(parent), index_(index) {} - // RedisProxy::ConnPool::PoolCallbacks - void onResponse(RespValuePtr&& value) override { + // Common::Redis::Client::PoolCallbacks + void onResponse(Common::Redis::RespValuePtr&& value) override { parent_.onChildResponse(std::move(value), index_); } void onFailure() override { parent_.onChildFailure(index_); } + bool onRedirection(const Common::Redis::RespValue& value) override { + return parent_.onChildRedirection(value, index_, conn_pool_); + } + FragmentedRequest& parent_; const uint32_t index_; - ConnPool::PoolRequest* handle_{}; + Common::Redis::Client::PoolRequest* handle_{}; + ConnPool::InstanceSharedPtr conn_pool_; }; - virtual void onChildResponse(RespValuePtr&& value, uint32_t index) PURE; + virtual void onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t index) PURE; void onChildFailure(uint32_t index); + bool onChildRedirection(const Common::Redis::RespValue& value, uint32_t index, + const ConnPool::InstanceSharedPtr& conn_pool); + virtual void recreate(Common::Redis::RespValue& request, uint32_t index) PURE; SplitCallbacks& callbacks_; - RespValuePtr pending_response_; + + Common::Redis::RespValuePtr incoming_request_; + Common::Redis::RespValuePtr pending_response_; std::vector pending_requests_; uint32_t num_pending_responses_; uint32_t error_count_{0}; @@ -181,16 +207,18 @@ class FragmentedRequest : public SplitRequestBase { */ class MGETRequest : public FragmentedRequest, Logger::Loggable { public: - static SplitRequestPtr create(ConnPool::Instance& conn_pool, const RespValue& incoming_request, + static SplitRequestPtr create(Router& router, Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, - TimeSource& time_source); + TimeSource& time_source, bool latency_in_micros); private: - MGETRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source) - : FragmentedRequest(callbacks, command_stats, time_source) {} + MGETRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, + bool latency_in_micros) + : FragmentedRequest(callbacks, command_stats, time_source, latency_in_micros) {} // RedisProxy::CommandSplitter::FragmentedRequest - void onChildResponse(RespValuePtr&& value, uint32_t index) override; + void onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t index) override; + void recreate(Common::Redis::RespValue& request, uint32_t index) override; }; /** @@ -201,17 +229,18 @@ class MGETRequest : public FragmentedRequest, Logger::Loggable { public: - static SplitRequestPtr create(ConnPool::Instance& conn_pool, const RespValue& incoming_request, + static SplitRequestPtr create(Router& router, Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, - TimeSource& time_source); + TimeSource& time_source, bool latency_in_micros); private: SplitKeysSumResultRequest(SplitCallbacks& callbacks, CommandStats& command_stats, - TimeSource& time_source) - : FragmentedRequest(callbacks, command_stats, time_source) {} + TimeSource& time_source, bool latency_in_micros) + : FragmentedRequest(callbacks, command_stats, time_source, latency_in_micros) {} // RedisProxy::CommandSplitter::FragmentedRequest - void onChildResponse(RespValuePtr&& value, uint32_t index) override; + void onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t index) override; + void recreate(Common::Redis::RespValue& request, uint32_t index) override; int64_t total_{0}; }; @@ -223,16 +252,18 @@ class SplitKeysSumResultRequest : public FragmentedRequest, Logger::Loggable { public: - static SplitRequestPtr create(ConnPool::Instance& conn_pool, const RespValue& incoming_request, + static SplitRequestPtr create(Router& router, Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, - TimeSource& time_source); + TimeSource& time_source, bool latency_in_micros); private: - MSETRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source) - : FragmentedRequest(callbacks, command_stats, time_source) {} + MSETRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, + bool latency_in_micros) + : FragmentedRequest(callbacks, command_stats, time_source, latency_in_micros) {} // RedisProxy::CommandSplitter::FragmentedRequest - void onChildResponse(RespValuePtr&& value, uint32_t index) override; + void onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t index) override; + void recreate(Common::Redis::RespValue& request, uint32_t index) override; }; /** @@ -242,10 +273,12 @@ class MSETRequest : public FragmentedRequest, Logger::Loggable class CommandHandlerFactory : public CommandHandler, CommandHandlerBase { public: - CommandHandlerFactory(ConnPool::Instance& conn_pool) : CommandHandlerBase(conn_pool) {} - SplitRequestPtr startRequest(const RespValue& request, SplitCallbacks& callbacks, - CommandStats& command_stats, TimeSource& time_source) { - return RequestClass::create(conn_pool_, request, callbacks, command_stats, time_source); + CommandHandlerFactory(Router& router) : CommandHandlerBase(router) {} + SplitRequestPtr startRequest(Common::Redis::RespValuePtr&& request, SplitCallbacks& callbacks, + CommandStats& command_stats, TimeSource& time_source, + bool latency_in_micros) { + return RequestClass::create(router_, std::move(request), callbacks, command_stats, time_source, + latency_in_micros); } }; @@ -267,11 +300,12 @@ struct InstanceStats { class InstanceImpl : public Instance, Logger::Loggable { public: - InstanceImpl(ConnPool::InstancePtr&& conn_pool, Stats::Scope& scope, - const std::string& stat_prefix, TimeSource& time_source); + InstanceImpl(RouterPtr&& router, Stats::Scope& scope, const std::string& stat_prefix, + TimeSource& time_source, bool latency_in_micros); // RedisProxy::CommandSplitter::Instance - SplitRequestPtr makeRequest(const RespValue& request, SplitCallbacks& callbacks) override; + SplitRequestPtr makeRequest(Common::Redis::RespValuePtr&& request, + SplitCallbacks& callbacks) override; private: struct HandlerData { @@ -285,7 +319,7 @@ class InstanceImpl : public Instance, Logger::Loggable { CommandHandler& handler); void onInvalidRequest(SplitCallbacks& callbacks); - ConnPool::InstancePtr conn_pool_; + RouterPtr router_; CommandHandlerFactory simple_command_handler_; CommandHandlerFactory eval_command_handler_; CommandHandlerFactory mget_handler_; @@ -294,9 +328,22 @@ class InstanceImpl : public Instance, Logger::Loggable { TrieLookupTable handler_lookup_table_; InstanceStats stats_; const ToLowerTable to_lower_table_; + const bool latency_in_micros_; TimeSource& time_source_; }; +/** + * DoNothingPoolCallbacks is used for internally generated commands whose response is + * transparently filtered, and redirection never occurs (e.g., "asking", etc.). + */ +class DoNothingPoolCallbacks : public Common::Redis::Client::PoolCallbacks { +public: + // Common::Redis::Client::PoolCallbacks + void onResponse(Common::Redis::RespValuePtr&&) override {} + void onFailure() override {} + bool onRedirection(const Common::Redis::RespValue&) override { return false; } +}; + } // namespace CommandSplitter } // namespace RedisProxy } // namespace NetworkFilters diff --git a/source/extensions/filters/network/redis_proxy/config.cc b/source/extensions/filters/network/redis_proxy/config.cc index fba73107f1bd9..9838c2cc5ebf4 100644 --- a/source/extensions/filters/network/redis_proxy/config.cc +++ b/source/extensions/filters/network/redis_proxy/config.cc @@ -8,10 +8,11 @@ #include "common/config/filter_json.h" -#include "extensions/filters/network/redis_proxy/codec_impl.h" +#include "extensions/filters/network/common/redis/client_impl.h" +#include "extensions/filters/network/common/redis/codec_impl.h" #include "extensions/filters/network/redis_proxy/command_splitter_impl.h" -#include "extensions/filters/network/redis_proxy/conn_pool_impl.h" #include "extensions/filters/network/redis_proxy/proxy_filter.h" +#include "extensions/filters/network/redis_proxy/router_impl.h" namespace Envoy { namespace Extensions { @@ -23,20 +24,48 @@ Network::FilterFactoryCb RedisProxyFilterConfigFactory::createFilterFactoryFromP Server::Configuration::FactoryContext& context) { ASSERT(!proto_config.stat_prefix().empty()); - ASSERT(!proto_config.cluster().empty()); ASSERT(proto_config.has_settings()); ProxyFilterConfigSharedPtr filter_config(std::make_shared( proto_config, context.scope(), context.drainDecision(), context.runtime())); - ConnPool::InstancePtr conn_pool(new ConnPool::InstanceImpl( - filter_config->cluster_name_, context.clusterManager(), - ConnPool::ClientFactoryImpl::instance_, context.threadLocal(), proto_config.settings())); - std::shared_ptr splitter(new CommandSplitter::InstanceImpl( - std::move(conn_pool), context.scope(), filter_config->stat_prefix_, context.timeSource())); + + envoy::config::filter::network::redis_proxy::v2::RedisProxy::PrefixRoutes prefix_routes( + proto_config.prefix_routes()); + + // set the catch-all route from the deprecated cluster and settings parameters. + if (prefix_routes.catch_all_cluster().empty() && prefix_routes.routes_size() == 0) { + if (proto_config.cluster().empty()) { + throw EnvoyException("cannot configure a redis-proxy without any upstream"); + } + + prefix_routes.set_catch_all_cluster(proto_config.cluster()); + } + + std::set unique_clusters; + for (auto& route : prefix_routes.routes()) { + unique_clusters.emplace(route.cluster()); + } + unique_clusters.emplace(prefix_routes.catch_all_cluster()); + + Upstreams upstreams; + for (auto& cluster : unique_clusters) { + upstreams.emplace(cluster, std::make_shared( + cluster, context.clusterManager(), + Common::Redis::Client::ClientFactoryImpl::instance_, + context.threadLocal(), proto_config.settings())); + } + + auto router = std::make_unique(prefix_routes, std::move(upstreams)); + + std::shared_ptr splitter = + std::make_shared( + std::move(router), context.scope(), filter_config->stat_prefix_, context.timeSource(), + proto_config.latency_in_micros()); return [splitter, filter_config](Network::FilterManager& filter_manager) -> void { - DecoderFactoryImpl factory; + Common::Redis::DecoderFactoryImpl factory; filter_manager.addReadFilter(std::make_shared( - factory, EncoderPtr{new EncoderImpl()}, *splitter, filter_config)); + factory, Common::Redis::EncoderPtr{new Common::Redis::EncoderImpl()}, *splitter, + filter_config)); }; } diff --git a/source/extensions/filters/network/redis_proxy/conn_pool.h b/source/extensions/filters/network/redis_proxy/conn_pool.h index c37a46dc185c6..a926f568f062a 100644 --- a/source/extensions/filters/network/redis_proxy/conn_pool.h +++ b/source/extensions/filters/network/redis_proxy/conn_pool.h @@ -6,7 +6,8 @@ #include "envoy/upstream/cluster_manager.h" -#include "extensions/filters/network/redis_proxy/codec.h" +#include "extensions/filters/network/common/redis/client.h" +#include "extensions/filters/network/common/redis/codec.h" namespace Envoy { namespace Extensions { @@ -14,112 +15,6 @@ namespace NetworkFilters { namespace RedisProxy { namespace ConnPool { -/** - * A handle to an outbound request. - */ -class PoolRequest { -public: - virtual ~PoolRequest() {} - - /** - * Cancel the request. No further request callbacks will be called. - */ - virtual void cancel() PURE; -}; - -/** - * Outbound request callbacks. - */ -class PoolCallbacks { -public: - virtual ~PoolCallbacks() {} - - /** - * Called when a pipelined response is received. - * @param value supplies the response which is now owned by the callee. - */ - virtual void onResponse(RespValuePtr&& value) PURE; - - /** - * Called when a network/protocol error occurs and there is no response. - */ - virtual void onFailure() PURE; -}; - -/** - * A single redis client connection. - */ -class Client : public Event::DeferredDeletable { -public: - virtual ~Client() {} - - /** - * Adds network connection callbacks to the underlying network connection. - */ - virtual void addConnectionCallbacks(Network::ConnectionCallbacks& callbacks) PURE; - - /** - * Closes the underlying network connection. - */ - virtual void close() PURE; - - /** - * Make a pipelined request to the remote redis server. - * @param request supplies the RESP request to make. - * @param callbacks supplies the request callbacks. - * @return PoolRequest* a handle to the active request or nullptr if the request could not be made - * for some reason. - */ - virtual PoolRequest* makeRequest(const RespValue& request, PoolCallbacks& callbacks) PURE; -}; - -typedef std::unique_ptr ClientPtr; - -/** - * Configuration for a redis connection pool. - */ -class Config { -public: - virtual ~Config() {} - - /** - * @return std::chrono::milliseconds the timeout for an individual redis operation. Currently, - * all operations use the same timeout. - */ - virtual std::chrono::milliseconds opTimeout() const PURE; - - /** - * @return bool disable outlier events even if the cluster has it enabled. This is used by the - * healthchecker's connection pool to avoid double counting active healthcheck operations as - * passive healthcheck operations. - */ - virtual bool disableOutlierEvents() const PURE; - - /** - * @return when enabled, a hash tagging function will be used to guarantee that keys with the - * same hash tag will be forwarded to the same upstream. - */ - virtual bool enableHashtagging() const PURE; -}; - -/** - * A factory for individual redis client connections. - */ -class ClientFactory { -public: - virtual ~ClientFactory() {} - - /** - * Create a client given an upstream host. - * @param host supplies the upstream host. - * @param dispatcher supplies the owning thread's dispatcher. - * @param config supplies the connection pool configuration. - * @return ClientPtr a new connection pool client. - */ - virtual ClientPtr create(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, - const Config& config) PURE; -}; - /** * A redis connection pool. Wraps M connections to N upstream hosts, consistent hashing, * pipelining, failure handling, etc. @@ -136,11 +31,26 @@ class Instance { * @return PoolRequest* a handle to the active request or nullptr if the request could not be made * for some reason. */ - virtual PoolRequest* makeRequest(const std::string& hash_key, const RespValue& request, - PoolCallbacks& callbacks) PURE; + virtual Common::Redis::Client::PoolRequest* + makeRequest(const std::string& hash_key, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks& callbacks) PURE; + + /** + * Makes a redis request based on IP address and TCP port of the upstream host (e.g., moved/ask + * cluster redirection). + * @param host_address supplies the IP address and TCP port of the upstream host to receive the + * request. + * @param request supplies the Redis request to make. + * @param callbacks supplies the request completion callbacks. + * @return PoolRequest* a handle to the active request or nullptr if the request could not be made + * for some reason. + */ + virtual Common::Redis::Client::PoolRequest* + makeRequestToHost(const std::string& host_address, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks& callbacks) PURE; }; -typedef std::unique_ptr InstancePtr; +typedef std::shared_ptr InstanceSharedPtr; } // namespace ConnPool } // namespace RedisProxy diff --git a/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc b/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc index 596233af4f55c..018d1135231af 100644 --- a/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc +++ b/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc @@ -13,185 +13,9 @@ namespace NetworkFilters { namespace RedisProxy { namespace ConnPool { -ConfigImpl::ConfigImpl( - const envoy::config::filter::network::redis_proxy::v2::RedisProxy::ConnPoolSettings& config) - : op_timeout_(PROTOBUF_GET_MS_REQUIRED(config, op_timeout)), - enable_hashtagging_(config.enable_hashtagging()) {} - -ClientPtr ClientImpl::create(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, - EncoderPtr&& encoder, DecoderFactory& decoder_factory, - const Config& config) { - - std::unique_ptr client( - new ClientImpl(host, dispatcher, std::move(encoder), decoder_factory, config)); - client->connection_ = host->createConnection(dispatcher, nullptr, nullptr).connection_; - client->connection_->addConnectionCallbacks(*client); - client->connection_->addReadFilter(Network::ReadFilterSharedPtr{new UpstreamReadFilter(*client)}); - client->connection_->connect(); - client->connection_->noDelay(true); - return std::move(client); -} - -ClientImpl::ClientImpl(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, - EncoderPtr&& encoder, DecoderFactory& decoder_factory, const Config& config) - : host_(host), encoder_(std::move(encoder)), decoder_(decoder_factory.create(*this)), - config_(config), - connect_or_op_timer_(dispatcher.createTimer([this]() -> void { onConnectOrOpTimeout(); })) { - host->cluster().stats().upstream_cx_total_.inc(); - host->stats().cx_total_.inc(); - host->cluster().stats().upstream_cx_active_.inc(); - host->stats().cx_active_.inc(); - connect_or_op_timer_->enableTimer(host->cluster().connectTimeout()); -} - -ClientImpl::~ClientImpl() { - ASSERT(pending_requests_.empty()); - ASSERT(connection_->state() == Network::Connection::State::Closed); - host_->cluster().stats().upstream_cx_active_.dec(); - host_->stats().cx_active_.dec(); -} - -void ClientImpl::close() { connection_->close(Network::ConnectionCloseType::NoFlush); } - -PoolRequest* ClientImpl::makeRequest(const RespValue& request, PoolCallbacks& callbacks) { - ASSERT(connection_->state() == Network::Connection::State::Open); - - pending_requests_.emplace_back(*this, callbacks); - encoder_->encode(request, encoder_buffer_); - connection_->write(encoder_buffer_, false); - - // Only boost the op timeout if: - // - We are not already connected. Otherwise, we are governed by the connect timeout and the timer - // will be reset when/if connection occurs. This allows a relatively long connection spin up - // time for example if TLS is being used. - // - This is the first request on the pipeline. Otherwise the timeout would effectively start on - // the last operation. - if (connected_ && pending_requests_.size() == 1) { - connect_or_op_timer_->enableTimer(config_.opTimeout()); - } - - return &pending_requests_.back(); -} - -void ClientImpl::onConnectOrOpTimeout() { - putOutlierEvent(Upstream::Outlier::Result::TIMEOUT); - if (connected_) { - host_->cluster().stats().upstream_rq_timeout_.inc(); - host_->stats().rq_timeout_.inc(); - } else { - host_->cluster().stats().upstream_cx_connect_timeout_.inc(); - host_->stats().cx_connect_fail_.inc(); - } - - connection_->close(Network::ConnectionCloseType::NoFlush); -} - -void ClientImpl::onData(Buffer::Instance& data) { - try { - decoder_->decode(data); - } catch (ProtocolError&) { - putOutlierEvent(Upstream::Outlier::Result::REQUEST_FAILED); - host_->cluster().stats().upstream_cx_protocol_error_.inc(); - host_->stats().rq_error_.inc(); - connection_->close(Network::ConnectionCloseType::NoFlush); - } -} - -void ClientImpl::putOutlierEvent(Upstream::Outlier::Result result) { - if (!config_.disableOutlierEvents()) { - host_->outlierDetector().putResult(result); - } -} - -void ClientImpl::onEvent(Network::ConnectionEvent event) { - if (event == Network::ConnectionEvent::RemoteClose || - event == Network::ConnectionEvent::LocalClose) { - if (!pending_requests_.empty()) { - host_->cluster().stats().upstream_cx_destroy_with_active_rq_.inc(); - if (event == Network::ConnectionEvent::RemoteClose) { - putOutlierEvent(Upstream::Outlier::Result::SERVER_FAILURE); - host_->cluster().stats().upstream_cx_destroy_remote_with_active_rq_.inc(); - } - if (event == Network::ConnectionEvent::LocalClose) { - host_->cluster().stats().upstream_cx_destroy_local_with_active_rq_.inc(); - } - } - - while (!pending_requests_.empty()) { - PendingRequest& request = pending_requests_.front(); - if (!request.canceled_) { - request.callbacks_.onFailure(); - } else { - host_->cluster().stats().upstream_rq_cancelled_.inc(); - } - pending_requests_.pop_front(); - } - - connect_or_op_timer_->disableTimer(); - } else if (event == Network::ConnectionEvent::Connected) { - connected_ = true; - ASSERT(!pending_requests_.empty()); - connect_or_op_timer_->enableTimer(config_.opTimeout()); - } - - if (event == Network::ConnectionEvent::RemoteClose && !connected_) { - host_->cluster().stats().upstream_cx_connect_fail_.inc(); - host_->stats().cx_connect_fail_.inc(); - } -} - -void ClientImpl::onRespValue(RespValuePtr&& value) { - ASSERT(!pending_requests_.empty()); - PendingRequest& request = pending_requests_.front(); - if (!request.canceled_) { - request.callbacks_.onResponse(std::move(value)); - } else { - host_->cluster().stats().upstream_rq_cancelled_.inc(); - } - pending_requests_.pop_front(); - - // If there are no remaining ops in the pipeline we need to disable the timer. - // Otherwise we boost the timer since we are receiving responses and there are more to flush out. - if (pending_requests_.empty()) { - connect_or_op_timer_->disableTimer(); - } else { - connect_or_op_timer_->enableTimer(config_.opTimeout()); - } - - putOutlierEvent(Upstream::Outlier::Result::SUCCESS); -} - -ClientImpl::PendingRequest::PendingRequest(ClientImpl& parent, PoolCallbacks& callbacks) - : parent_(parent), callbacks_(callbacks) { - parent.host_->cluster().stats().upstream_rq_total_.inc(); - parent.host_->stats().rq_total_.inc(); - parent.host_->cluster().stats().upstream_rq_active_.inc(); - parent.host_->stats().rq_active_.inc(); -} - -ClientImpl::PendingRequest::~PendingRequest() { - parent_.host_->cluster().stats().upstream_rq_active_.dec(); - parent_.host_->stats().rq_active_.dec(); -} - -void ClientImpl::PendingRequest::cancel() { - // If we get a cancellation, we just mark the pending request as cancelled, and then we drop - // the response as it comes through. There is no reason to blow away the connection when the - // remote is already responding as fast as possible. - canceled_ = true; -} - -ClientFactoryImpl ClientFactoryImpl::instance_; - -ClientPtr ClientFactoryImpl::create(Upstream::HostConstSharedPtr host, - Event::Dispatcher& dispatcher, const Config& config) { - return ClientImpl::create(host, dispatcher, EncoderPtr{new EncoderImpl()}, decoder_factory_, - config); -} - InstanceImpl::InstanceImpl( - const std::string& cluster_name, Upstream::ClusterManager& cm, ClientFactory& client_factory, - ThreadLocal::SlotAllocator& tls, + const std::string& cluster_name, Upstream::ClusterManager& cm, + Common::Redis::Client::ClientFactory& client_factory, ThreadLocal::SlotAllocator& tls, const envoy::config::filter::network::redis_proxy::v2::RedisProxy::ConnPoolSettings& config) : cm_(cm), client_factory_(client_factory), tls_(tls.allocateSlot()), config_(config) { tls_->set([this, cluster_name]( @@ -200,9 +24,17 @@ InstanceImpl::InstanceImpl( }); } -PoolRequest* InstanceImpl::makeRequest(const std::string& key, const RespValue& value, - PoolCallbacks& callbacks) { - return tls_->getTyped().makeRequest(key, value, callbacks); +Common::Redis::Client::PoolRequest* +InstanceImpl::makeRequest(const std::string& key, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks& callbacks) { + return tls_->getTyped().makeRequest(key, request, callbacks); +} + +Common::Redis::Client::PoolRequest* +InstanceImpl::makeRequestToHost(const std::string& host_address, + const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks& callbacks) { + return tls_->getTyped().makeRequestToHost(host_address, request, callbacks); } InstanceImpl::ThreadLocalPool::ThreadLocalPool(InstanceImpl& parent, Event::Dispatcher& dispatcher, @@ -244,6 +76,13 @@ void InstanceImpl::ThreadLocalPool::onClusterAddOrUpdateNonVirtual( const std::vector& hosts_removed) -> void { onHostsRemoved(hosts_removed); }); + + ASSERT(host_address_map_.empty()); + for (uint32_t i = 0; i < cluster_->prioritySet().hostSetsPerPriority().size(); i++) { + for (auto& host : cluster_->prioritySet().hostSetsPerPriority()[i]->hosts()) { + host_address_map_[host->address()->asString()] = host; + } + } } void InstanceImpl::ThreadLocalPool::onClusterRemoval(const std::string& cluster_name) { @@ -259,6 +98,7 @@ void InstanceImpl::ThreadLocalPool::onClusterRemoval(const std::string& cluster_ cluster_ = nullptr; host_set_member_update_cb_handle_ = nullptr; + host_address_map_.clear(); } void InstanceImpl::ThreadLocalPool::onHostsRemoved( @@ -270,12 +110,14 @@ void InstanceImpl::ThreadLocalPool::onHostsRemoved( // we just close the connection. This will fail any pending requests. it->second->redis_client_->close(); } + host_address_map_.erase(host->address()->asString()); } } -PoolRequest* InstanceImpl::ThreadLocalPool::makeRequest(const std::string& key, - const RespValue& request, - PoolCallbacks& callbacks) { +Common::Redis::Client::PoolRequest* +InstanceImpl::ThreadLocalPool::makeRequest(const std::string& key, + const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks& callbacks) { if (cluster_ == nullptr) { ASSERT(client_map_.empty()); ASSERT(host_set_member_update_cb_handle_ == nullptr); @@ -296,6 +138,88 @@ PoolRequest* InstanceImpl::ThreadLocalPool::makeRequest(const std::string& key, client->redis_client_->addConnectionCallbacks(*client); } + // Keep host_address_map_ in sync with client_map_. + auto host_cached_by_address = host_address_map_.find(host->address()->asString()); + if (host_cached_by_address == host_address_map_.end()) { + host_address_map_[host->address()->asString()] = host; + } + + return client->redis_client_->makeRequest(request, callbacks); +} + +Common::Redis::Client::PoolRequest* +InstanceImpl::ThreadLocalPool::makeRequestToHost(const std::string& host_address, + const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks& callbacks) { + if (cluster_ == nullptr) { + ASSERT(client_map_.empty()); + ASSERT(host_set_member_update_cb_handle_ == nullptr); + return nullptr; + } + + auto colon_pos = host_address.rfind(":"); + if ((colon_pos == std::string::npos) || (colon_pos == (host_address.size() - 1))) { + return nullptr; + } + + const std::string ip_address = host_address.substr(0, colon_pos); + const bool ipv6 = (ip_address.find(":") != std::string::npos); + std::string host_address_map_key; + Network::Address::InstanceConstSharedPtr address_ptr; + + if (!ipv6) { + host_address_map_key = host_address; + } else { + const auto ip_port = absl::string_view(host_address).substr(colon_pos + 1); + uint64_t ip_port_number; + if (!absl::SimpleAtoi(ip_port, &ip_port_number) || (ip_port_number > 65535)) { + return nullptr; + } + try { + address_ptr = std::make_shared(ip_address, ip_port_number); + } catch (const EnvoyException&) { + return nullptr; + } + host_address_map_key = address_ptr->asString(); + } + + auto it = host_address_map_.find(host_address_map_key); + if (it == host_address_map_.end()) { + // This host is not known to the cluster manager. Create a new host and insert it into the map. + // TODO(msukalski): Add logic to track the number of these "unknown" host connections, + // cap the number of these connections, and implement time-out and cleaning logic, etc. + + if (!ipv6) { + // Only create an IPv4 address instance if we need a new Upstream::HostImpl. + const auto ip_port = absl::string_view(host_address).substr(colon_pos + 1); + uint64_t ip_port_number; + if (!absl::SimpleAtoi(ip_port, &ip_port_number) || (ip_port_number > 65535)) { + return nullptr; + } + try { + address_ptr = std::make_shared(ip_address, ip_port_number); + } catch (const EnvoyException&) { + return nullptr; + } + } + Upstream::HostSharedPtr new_host{new Upstream::HostImpl( + cluster_->info(), "", address_ptr, envoy::api::v2::core::Metadata::default_instance(), 1, + envoy::api::v2::core::Locality(), + envoy::api::v2::endpoint::Endpoint::HealthCheckConfig::default_instance(), 0, + envoy::api::v2::core::HealthStatus::UNKNOWN)}; + host_address_map_[host_address_map_key] = new_host; + it = host_address_map_.find(host_address_map_key); + } + + ThreadLocalActiveClientPtr& client = client_map_[it->second]; + if (!client) { + client = std::make_unique(*this); + client->host_ = it->second; + client->redis_client_ = + parent_.client_factory_.create(it->second, dispatcher_, parent_.config_); + client->redis_client_->addConnectionCallbacks(*client); + } + return client->redis_client_->makeRequest(request, callbacks); } diff --git a/source/extensions/filters/network/redis_proxy/conn_pool_impl.h b/source/extensions/filters/network/redis_proxy/conn_pool_impl.h index ba89f3098a8d1..8ed565ac50b92 100644 --- a/source/extensions/filters/network/redis_proxy/conn_pool_impl.h +++ b/source/extensions/filters/network/redis_proxy/conn_pool_impl.h @@ -14,11 +14,15 @@ #include "common/buffer/buffer_impl.h" #include "common/common/hash.h" +#include "common/network/address_impl.h" #include "common/network/filter_impl.h" #include "common/protobuf/utility.h" +#include "common/singleton/const_singleton.h" #include "common/upstream/load_balancer_impl.h" +#include "common/upstream/upstream_impl.h" -#include "extensions/filters/network/redis_proxy/codec_impl.h" +#include "extensions/filters/network/common/redis/client_impl.h" +#include "extensions/filters/network/common/redis/codec_impl.h" #include "extensions/filters/network/redis_proxy/conn_pool.h" namespace Envoy { @@ -30,107 +34,19 @@ namespace ConnPool { // TODO(mattklein123): Circuit breaking // TODO(rshriram): Fault injection -class ConfigImpl : public Config { -public: - ConfigImpl( - const envoy::config::filter::network::redis_proxy::v2::RedisProxy::ConnPoolSettings& config); - - bool disableOutlierEvents() const override { return false; } - std::chrono::milliseconds opTimeout() const override { return op_timeout_; } - bool enableHashtagging() const override { return enable_hashtagging_; } - -private: - const std::chrono::milliseconds op_timeout_; - const bool enable_hashtagging_; -}; - -class ClientImpl : public Client, public DecoderCallbacks, public Network::ConnectionCallbacks { -public: - static ClientPtr create(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, - EncoderPtr&& encoder, DecoderFactory& decoder_factory, - const Config& config); - - ~ClientImpl(); - - // RedisProxy::ConnPool::Client - void addConnectionCallbacks(Network::ConnectionCallbacks& callbacks) override { - connection_->addConnectionCallbacks(callbacks); - } - void close() override; - PoolRequest* makeRequest(const RespValue& request, PoolCallbacks& callbacks) override; - -private: - struct UpstreamReadFilter : public Network::ReadFilterBaseImpl { - UpstreamReadFilter(ClientImpl& parent) : parent_(parent) {} - - // Network::ReadFilter - Network::FilterStatus onData(Buffer::Instance& data, bool) override { - parent_.onData(data); - return Network::FilterStatus::Continue; - } - - ClientImpl& parent_; - }; - - struct PendingRequest : public PoolRequest { - PendingRequest(ClientImpl& parent, PoolCallbacks& callbacks); - ~PendingRequest(); - - // RedisProxy::ConnPool::PoolRequest - void cancel() override; - - ClientImpl& parent_; - PoolCallbacks& callbacks_; - bool canceled_{}; - }; - - ClientImpl(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, EncoderPtr&& encoder, - DecoderFactory& decoder_factory, const Config& config); - void onConnectOrOpTimeout(); - void onData(Buffer::Instance& data); - void putOutlierEvent(Upstream::Outlier::Result result); - - // RedisProxy::DecoderCallbacks - void onRespValue(RespValuePtr&& value) override; - - // Network::ConnectionCallbacks - void onEvent(Network::ConnectionEvent event) override; - void onAboveWriteBufferHighWatermark() override {} - void onBelowWriteBufferLowWatermark() override {} - - Upstream::HostConstSharedPtr host_; - Network::ClientConnectionPtr connection_; - EncoderPtr encoder_; - Buffer::OwnedImpl encoder_buffer_; - DecoderPtr decoder_; - const Config& config_; - std::list pending_requests_; - Event::TimerPtr connect_or_op_timer_; - bool connected_{}; -}; - -class ClientFactoryImpl : public ClientFactory { -public: - // RedisProxy::ConnPool::ClientFactoryImpl - ClientPtr create(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, - const Config& config) override; - - static ClientFactoryImpl instance_; - -private: - DecoderFactoryImpl decoder_factory_; -}; - class InstanceImpl : public Instance { public: InstanceImpl( - const std::string& cluster_name, Upstream::ClusterManager& cm, ClientFactory& client_factory, - ThreadLocal::SlotAllocator& tls, + const std::string& cluster_name, Upstream::ClusterManager& cm, + Common::Redis::Client::ClientFactory& client_factory, ThreadLocal::SlotAllocator& tls, const envoy::config::filter::network::redis_proxy::v2::RedisProxy::ConnPoolSettings& config); - // RedisProxy::ConnPool::Instance - PoolRequest* makeRequest(const std::string& key, const RespValue& request, - PoolCallbacks& callbacks) override; + Common::Redis::Client::PoolRequest* + makeRequest(const std::string& key, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks& callbacks) override; + Common::Redis::Client::PoolRequest* + makeRequestToHost(const std::string& host_address, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks& callbacks) override; private: struct ThreadLocalPool; @@ -145,7 +61,7 @@ class InstanceImpl : public Instance { ThreadLocalPool& parent_; Upstream::HostConstSharedPtr host_; - ClientPtr redis_client_; + Common::Redis::Client::ClientPtr redis_client_; }; typedef std::unique_ptr ThreadLocalActiveClientPtr; @@ -154,8 +70,12 @@ class InstanceImpl : public Instance { public Upstream::ClusterUpdateCallbacks { ThreadLocalPool(InstanceImpl& parent, Event::Dispatcher& dispatcher, std::string cluster_name); ~ThreadLocalPool(); - PoolRequest* makeRequest(const std::string& key, const RespValue& request, - PoolCallbacks& callbacks); + Common::Redis::Client::PoolRequest* + makeRequest(const std::string& key, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks& callbacks); + Common::Redis::Client::PoolRequest* + makeRequestToHost(const std::string& host_address, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks& callbacks); void onClusterAddOrUpdateNonVirtual(Upstream::ThreadLocalCluster& cluster); void onHostsRemoved(const std::vector& hosts_removed); @@ -172,6 +92,7 @@ class InstanceImpl : public Instance { Upstream::ThreadLocalCluster* cluster_{}; std::unordered_map client_map_; Envoy::Common::CallbackHandle* host_set_member_update_cb_handle_{}; + std::unordered_map host_address_map_; }; struct LbContextImpl : public Upstream::LoadBalancerContextBase { @@ -186,9 +107,9 @@ class InstanceImpl : public Instance { }; Upstream::ClusterManager& cm_; - ClientFactory& client_factory_; + Common::Redis::Client::ClientFactory& client_factory_; ThreadLocal::SlotPtr tls_; - ConfigImpl config_; + Common::Redis::Client::ConfigImpl config_; }; } // namespace ConnPool diff --git a/source/extensions/filters/network/redis_proxy/proxy_filter.cc b/source/extensions/filters/network/redis_proxy/proxy_filter.cc index b676332136ae3..acc5ccca0e211 100644 --- a/source/extensions/filters/network/redis_proxy/proxy_filter.cc +++ b/source/extensions/filters/network/redis_proxy/proxy_filter.cc @@ -17,7 +17,7 @@ namespace RedisProxy { ProxyFilterConfig::ProxyFilterConfig( const envoy::config::filter::network::redis_proxy::v2::RedisProxy& config, Stats::Scope& scope, const Network::DrainDecision& drain_decision, Runtime::Loader& runtime) - : drain_decision_(drain_decision), runtime_(runtime), cluster_name_(config.cluster()), + : drain_decision_(drain_decision), runtime_(runtime), stat_prefix_(fmt::format("redis.{}.", config.stat_prefix())), stats_(generateStats(stat_prefix_, scope)) {} @@ -26,8 +26,9 @@ ProxyStats ProxyFilterConfig::generateStats(const std::string& prefix, Stats::Sc ALL_REDIS_PROXY_STATS(POOL_COUNTER_PREFIX(scope, prefix), POOL_GAUGE_PREFIX(scope, prefix))}; } -ProxyFilter::ProxyFilter(DecoderFactory& factory, EncoderPtr&& encoder, - CommandSplitter::Instance& splitter, ProxyFilterConfigSharedPtr config) +ProxyFilter::ProxyFilter(Common::Redis::DecoderFactory& factory, + Common::Redis::EncoderPtr&& encoder, CommandSplitter::Instance& splitter, + ProxyFilterConfigSharedPtr config) : decoder_(factory.create(*this)), encoder_(std::move(encoder)), splitter_(splitter), config_(config) { config_->stats_.downstream_cx_total_.inc(); @@ -49,10 +50,10 @@ void ProxyFilter::initializeReadFilterCallbacks(Network::ReadFilterCallbacks& ca nullptr, nullptr}); } -void ProxyFilter::onRespValue(RespValuePtr&& value) { +void ProxyFilter::onRespValue(Common::Redis::RespValuePtr&& value) { pending_requests_.emplace_back(*this); PendingRequest& request = pending_requests_.back(); - CommandSplitter::SplitRequestPtr split = splitter_.makeRequest(*value, request); + CommandSplitter::SplitRequestPtr split = splitter_.makeRequest(std::move(value), request); if (split) { // The splitter can immediately respond and destroy the pending request. Only store the handle // if the request is still alive. @@ -72,7 +73,7 @@ void ProxyFilter::onEvent(Network::ConnectionEvent event) { } } -void ProxyFilter::onResponse(PendingRequest& request, RespValuePtr&& value) { +void ProxyFilter::onResponse(PendingRequest& request, Common::Redis::RespValuePtr&& value) { ASSERT(!pending_requests_.empty()); request.pending_response_ = std::move(value); request.request_handle_ = nullptr; @@ -100,10 +101,10 @@ Network::FilterStatus ProxyFilter::onData(Buffer::Instance& data, bool) { try { decoder_->decode(data); return Network::FilterStatus::Continue; - } catch (ProtocolError&) { + } catch (Common::Redis::ProtocolError&) { config_->stats_.downstream_cx_protocol_error_.inc(); - RespValue error; - error.type(RespType::Error); + Common::Redis::RespValue error; + error.type(Common::Redis::RespType::Error); error.asString() = "downstream protocol error"; encoder_->encode(error, encoder_buffer_); callbacks_->connection().write(encoder_buffer_, false); diff --git a/source/extensions/filters/network/redis_proxy/proxy_filter.h b/source/extensions/filters/network/redis_proxy/proxy_filter.h index c0b825a85b91f..ae2141a322d94 100644 --- a/source/extensions/filters/network/redis_proxy/proxy_filter.h +++ b/source/extensions/filters/network/redis_proxy/proxy_filter.h @@ -13,7 +13,7 @@ #include "common/buffer/buffer_impl.h" -#include "extensions/filters/network/redis_proxy/codec.h" +#include "extensions/filters/network/common/redis/codec.h" #include "extensions/filters/network/redis_proxy/command_splitter.h" namespace Envoy { @@ -56,7 +56,6 @@ class ProxyFilterConfig { const Network::DrainDecision& drain_decision_; Runtime::Loader& runtime_; - const std::string cluster_name_; const std::string stat_prefix_; const std::string redis_drain_close_runtime_key_{"redis.drain_close_enabled"}; ProxyStats stats_; @@ -72,11 +71,11 @@ typedef std::shared_ptr ProxyFilterConfigSharedPtr; * multiplex them onto a consistently hashed connection pool of backend servers. */ class ProxyFilter : public Network::ReadFilter, - public DecoderCallbacks, + public Common::Redis::DecoderCallbacks, public Network::ConnectionCallbacks { public: - ProxyFilter(DecoderFactory& factory, EncoderPtr&& encoder, CommandSplitter::Instance& splitter, - ProxyFilterConfigSharedPtr config); + ProxyFilter(Common::Redis::DecoderFactory& factory, Common::Redis::EncoderPtr&& encoder, + CommandSplitter::Instance& splitter, ProxyFilterConfigSharedPtr config); ~ProxyFilter(); // Network::ReadFilter @@ -89,8 +88,8 @@ class ProxyFilter : public Network::ReadFilter, void onAboveWriteBufferHighWatermark() override {} void onBelowWriteBufferLowWatermark() override {} - // RedisProxy::DecoderCallbacks - void onRespValue(RespValuePtr&& value) override; + // Common::Redis::DecoderCallbacks + void onRespValue(Common::Redis::RespValuePtr&& value) override; private: struct PendingRequest : public CommandSplitter::SplitCallbacks { @@ -98,17 +97,19 @@ class ProxyFilter : public Network::ReadFilter, ~PendingRequest(); // RedisProxy::CommandSplitter::SplitCallbacks - void onResponse(RespValuePtr&& value) override { parent_.onResponse(*this, std::move(value)); } + void onResponse(Common::Redis::RespValuePtr&& value) override { + parent_.onResponse(*this, std::move(value)); + } ProxyFilter& parent_; - RespValuePtr pending_response_; + Common::Redis::RespValuePtr pending_response_; CommandSplitter::SplitRequestPtr request_handle_; }; - void onResponse(PendingRequest& request, RespValuePtr&& value); + void onResponse(PendingRequest& request, Common::Redis::RespValuePtr&& value); - DecoderPtr decoder_; - EncoderPtr encoder_; + Common::Redis::DecoderPtr decoder_; + Common::Redis::EncoderPtr encoder_; CommandSplitter::Instance& splitter_; ProxyFilterConfigSharedPtr config_; Buffer::OwnedImpl encoder_buffer_; diff --git a/source/extensions/filters/network/redis_proxy/router.h b/source/extensions/filters/network/redis_proxy/router.h new file mode 100644 index 0000000000000..5312e34cea4be --- /dev/null +++ b/source/extensions/filters/network/redis_proxy/router.h @@ -0,0 +1,37 @@ +#pragma once + +#include +#include + +#include "envoy/common/pure.h" +#include "envoy/config/filter/network/redis_proxy/v2/redis_proxy.pb.h" + +#include "extensions/filters/network/redis_proxy/conn_pool.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RedisProxy { + +/* + * Decorator of a connection pool in order to enable key based routing. + */ +class Router { +public: + virtual ~Router() = default; + + /** + * Returns a connection pool that matches a given route. When no match is found, the catch all + * pool is used. When remove prefix is set to true, the prefix will be removed from the key. + * @param key mutable reference to the key of the current command. + * @return a handle to the connection pool. + */ + virtual ConnPool::InstanceSharedPtr upstreamPool(std::string& key) PURE; +}; + +typedef std::unique_ptr RouterPtr; + +} // namespace RedisProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/redis_proxy/router_impl.cc b/source/extensions/filters/network/redis_proxy/router_impl.cc new file mode 100644 index 0000000000000..cd963e1ec778c --- /dev/null +++ b/source/extensions/filters/network/redis_proxy/router_impl.cc @@ -0,0 +1,61 @@ +#include "extensions/filters/network/redis_proxy/router_impl.h" + +#include "common/common/fmt.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RedisProxy { + +PrefixRoutes::PrefixRoutes( + const envoy::config::filter::network::redis_proxy::v2::RedisProxy::PrefixRoutes& config, + Upstreams&& upstreams) + : case_insensitive_(config.case_insensitive()), upstreams_(std::move(upstreams)), + catch_all_upstream_(config.catch_all_cluster().empty() + ? nullptr + : upstreams_.at(config.catch_all_cluster())) { + + for (auto const& route : config.routes()) { + std::string copy(route.prefix()); + + if (case_insensitive_) { + to_lower_table_.toLowerCase(copy); + } + + auto success = prefix_lookup_table_.add(copy.c_str(), + std::make_shared(Prefix{ + route.prefix(), + route.remove_prefix(), + upstreams_.at(route.cluster()), + }), + false); + if (!success) { + throw EnvoyException(fmt::format("prefix `{}` already exists.", route.prefix())); + } + } +} + +ConnPool::InstanceSharedPtr PrefixRoutes::upstreamPool(std::string& key) { + PrefixPtr value = nullptr; + if (case_insensitive_) { + std::string copy(key); + to_lower_table_.toLowerCase(copy); + value = prefix_lookup_table_.findLongestPrefix(copy.c_str()); + } else { + value = prefix_lookup_table_.findLongestPrefix(key.c_str()); + } + + if (value != nullptr) { + if (value->remove_prefix) { + key.erase(0, value->prefix.length()); + } + return value->upstream; + } + + return catch_all_upstream_; +} + +} // namespace RedisProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/redis_proxy/router_impl.h b/source/extensions/filters/network/redis_proxy/router_impl.h new file mode 100644 index 0000000000000..2744e88eff4cd --- /dev/null +++ b/source/extensions/filters/network/redis_proxy/router_impl.h @@ -0,0 +1,53 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "envoy/config/filter/network/redis_proxy/v2/redis_proxy.pb.h" +#include "envoy/thread_local/thread_local.h" +#include "envoy/upstream/cluster_manager.h" + +#include "common/common/to_lower_table.h" + +#include "extensions/filters/network/redis_proxy/conn_pool_impl.h" +#include "extensions/filters/network/redis_proxy/router.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RedisProxy { + +typedef std::map Upstreams; + +class PrefixRoutes : public Router { +public: + PrefixRoutes(const envoy::config::filter::network::redis_proxy::v2::RedisProxy::PrefixRoutes& + prefix_routes, + Upstreams&& upstreams); + + ConnPool::InstanceSharedPtr upstreamPool(std::string& key) override; + +private: + struct Prefix { + const std::string prefix; + const bool remove_prefix; + ConnPool::InstanceSharedPtr upstream; + }; + + typedef std::shared_ptr PrefixPtr; + + TrieLookupTable prefix_lookup_table_; + const ToLowerTable to_lower_table_; + const bool case_insensitive_; + Upstreams upstreams_; + ConnPool::InstanceSharedPtr catch_all_upstream_; +}; + +} // namespace RedisProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/tcp_proxy/config.cc b/source/extensions/filters/network/tcp_proxy/config.cc index 8cf146109ebe8..f1b93b910cd97 100644 --- a/source/extensions/filters/network/tcp_proxy/config.cc +++ b/source/extensions/filters/network/tcp_proxy/config.cc @@ -30,7 +30,7 @@ Network::FilterFactoryCb ConfigFactory::createFilterFactoryFromProtoTyped( std::make_shared(proto_config, context)); return [filter_config, &context](Network::FilterManager& filter_manager) -> void { filter_manager.addReadFilter(std::make_shared( - filter_config, context.clusterManager(), context.dispatcher().timeSystem())); + filter_config, context.clusterManager(), context.dispatcher().timeSource())); }; } diff --git a/source/extensions/filters/network/thrift_proxy/binary_protocol_impl.cc b/source/extensions/filters/network/thrift_proxy/binary_protocol_impl.cc index 68927619859d2..ea0907fa50731 100644 --- a/source/extensions/filters/network/thrift_proxy/binary_protocol_impl.cc +++ b/source/extensions/filters/network/thrift_proxy/binary_protocol_impl.cc @@ -292,7 +292,7 @@ void BinaryProtocolImpl::writeFieldEnd(Buffer::Instance& buffer) { UNREFERENCED_ void BinaryProtocolImpl::writeMapBegin(Buffer::Instance& buffer, FieldType key_type, FieldType value_type, uint32_t size) { - if (size > std::numeric_limits::max()) { + if (size > static_cast(std::numeric_limits::max())) { throw EnvoyException(fmt::format("illegal binary protocol map size {}", size)); } @@ -305,7 +305,7 @@ void BinaryProtocolImpl::writeMapEnd(Buffer::Instance& buffer) { UNREFERENCED_PA void BinaryProtocolImpl::writeListBegin(Buffer::Instance& buffer, FieldType elem_type, uint32_t size) { - if (size > std::numeric_limits::max()) { + if (size > static_cast(std::numeric_limits::max())) { throw EnvoyException(fmt::format("illegal binary protocol list/set size {}", size)); } diff --git a/source/extensions/filters/network/thrift_proxy/compact_protocol_impl.cc b/source/extensions/filters/network/thrift_proxy/compact_protocol_impl.cc index 16f5300bf1771..7c2a6c0a851b1 100644 --- a/source/extensions/filters/network/thrift_proxy/compact_protocol_impl.cc +++ b/source/extensions/filters/network/thrift_proxy/compact_protocol_impl.cc @@ -467,7 +467,7 @@ void CompactProtocolImpl::writeFieldEnd(Buffer::Instance& buffer) { void CompactProtocolImpl::writeMapBegin(Buffer::Instance& buffer, FieldType key_type, FieldType value_type, uint32_t size) { - if (size > std::numeric_limits::max()) { + if (size > static_cast(std::numeric_limits::max())) { throw EnvoyException(fmt::format("illegal compact protocol map size {}", size)); } @@ -486,7 +486,7 @@ void CompactProtocolImpl::writeMapEnd(Buffer::Instance& buffer) { UNREFERENCED_P void CompactProtocolImpl::writeListBegin(Buffer::Instance& buffer, FieldType elem_type, uint32_t size) { - if (size > std::numeric_limits::max()) { + if (size > static_cast(std::numeric_limits::max())) { throw EnvoyException(fmt::format("illegal compact protocol list/set size {}", size)); } diff --git a/source/extensions/filters/network/thrift_proxy/config.cc b/source/extensions/filters/network/thrift_proxy/config.cc index 1f988684e3bc0..50d8ab1359786 100644 --- a/source/extensions/filters/network/thrift_proxy/config.cc +++ b/source/extensions/filters/network/thrift_proxy/config.cc @@ -109,7 +109,7 @@ Network::FilterFactoryCb ThriftProxyFilterConfigFactory::createFilterFactoryFrom return [filter_config, &context](Network::FilterManager& filter_manager) -> void { filter_manager.addReadFilter(std::make_shared( - *filter_config, context.random(), context.dispatcher().timeSystem())); + *filter_config, context.random(), context.dispatcher().timeSource())); }; } @@ -156,7 +156,7 @@ ProtocolPtr ConfigImpl::createProtocol() { void ConfigImpl::processFilter( const envoy::config::filter::network::thrift_proxy::v2alpha1::ThriftFilter& proto_config) { - const ProtobufTypes::String& string_name = proto_config.name(); + const std::string& string_name = proto_config.name(); ENVOY_LOG(debug, " thrift filter #{}", filter_factories_.size()); ENVOY_LOG(debug, " name: {}", string_name); diff --git a/source/extensions/filters/network/thrift_proxy/conn_manager.cc b/source/extensions/filters/network/thrift_proxy/conn_manager.cc index d2eaed92e7243..0edb3d6cbb04c 100644 --- a/source/extensions/filters/network/thrift_proxy/conn_manager.cc +++ b/source/extensions/filters/network/thrift_proxy/conn_manager.cc @@ -13,11 +13,11 @@ namespace NetworkFilters { namespace ThriftProxy { ConnectionManager::ConnectionManager(Config& config, Runtime::RandomGenerator& random_generator, - Event::TimeSystem& time_system) + TimeSource& time_source) : config_(config), stats_(config_.stats()), transport_(config.createTransport()), protocol_(config.createProtocol()), decoder_(std::make_unique(*transport_, *protocol_, *this)), - random_generator_(random_generator), time_system_(time_system) {} + random_generator_(random_generator), time_source_(time_source) {} ConnectionManager::~ConnectionManager() {} @@ -87,8 +87,11 @@ void ConnectionManager::dispatch() { void ConnectionManager::sendLocalReply(MessageMetadata& metadata, const DirectResponse& response, bool end_stream) { - Buffer::OwnedImpl buffer; + if (read_callbacks_->connection().state() == Network::Connection::State::Closed) { + return; + } + Buffer::OwnedImpl buffer; const DirectResponse::ResponseType result = response.encode(metadata, *protocol_, buffer); Buffer::OwnedImpl response_buffer; @@ -204,6 +207,11 @@ FilterStatus ConnectionManager::ResponseDecoder::transportEnd() { ConnectionManager& cm = parent_.parent_; + if (cm.read_callbacks_->connection().state() == Network::Connection::State::Closed) { + complete_ = true; + throw EnvoyException("downstream connection is closed"); + } + Buffer::OwnedImpl buffer; // Use the factory to get the concrete transport from the decoder transport (as opposed to diff --git a/source/extensions/filters/network/thrift_proxy/conn_manager.h b/source/extensions/filters/network/thrift_proxy/conn_manager.h index 818baae085296..0cea4a6281537 100644 --- a/source/extensions/filters/network/thrift_proxy/conn_manager.h +++ b/source/extensions/filters/network/thrift_proxy/conn_manager.h @@ -60,7 +60,7 @@ class ConnectionManager : public Network::ReadFilter, Logger::Loggable { public: ConnectionManager(Config& config, Runtime::RandomGenerator& random_generator, - Event::TimeSystem& time_system); + TimeSource& time_system); ~ConnectionManager(); // Network::ReadFilter @@ -154,9 +154,9 @@ class ConnectionManager : public Network::ReadFilter, public ThriftFilters::FilterChainFactoryCallbacks { ActiveRpc(ConnectionManager& parent) : parent_(parent), request_timer_(new Stats::Timespan(parent_.stats_.request_time_ms_, - parent_.time_system_)), + parent_.time_source_)), stream_id_(parent_.random_generator_.random()), - stream_info_(parent_.time_system_), local_response_sent_{false}, pending_transport_end_{ + stream_info_(parent_.time_source_), local_response_sent_{false}, pending_transport_end_{ false} { parent_.stats_.request_active_.inc(); @@ -267,7 +267,7 @@ class ConnectionManager : public Network::ReadFilter, Runtime::RandomGenerator& random_generator_; bool stopped_{false}; bool half_closed_{false}; - Event::TimeSystem& time_system_; + TimeSource& time_source_; }; } // namespace ThriftProxy diff --git a/source/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD b/source/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD index 367718b09f705..8796cbcc7400d 100644 --- a/source/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD +++ b/source/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD @@ -18,7 +18,6 @@ envoy_cc_library( "//source/common/tracing:http_tracer_lib", "//source/extensions/filters/common/ratelimit:ratelimit_client_interface", "//source/extensions/filters/common/ratelimit:ratelimit_lib", - "//source/extensions/filters/common/ratelimit:ratelimit_registration_lib", "//source/extensions/filters/network/thrift_proxy:app_exception_lib", "//source/extensions/filters/network/thrift_proxy/filters:filter_interface", "//source/extensions/filters/network/thrift_proxy/router:router_ratelimit_interface", diff --git a/source/extensions/filters/network/thrift_proxy/filters/ratelimit/config.cc b/source/extensions/filters/network/thrift_proxy/filters/ratelimit/config.cc index ead0840842ecc..98a4efcd9d6b4 100644 --- a/source/extensions/filters/network/thrift_proxy/filters/ratelimit/config.cc +++ b/source/extensions/filters/network/thrift_proxy/filters/ratelimit/config.cc @@ -9,7 +9,6 @@ #include "common/protobuf/utility.h" #include "extensions/filters/common/ratelimit/ratelimit_impl.h" -#include "extensions/filters/common/ratelimit/ratelimit_registration.h" #include "extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit.h" namespace Envoy { @@ -28,20 +27,12 @@ RateLimitFilterConfig::createFilterFactoryFromProtoTyped( context.runtime(), context.clusterManager())); const std::chrono::milliseconds timeout = std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(proto_config, timeout, 20)); - Filters::Common::RateLimit::ClientFactoryPtr client_factory = - Filters::Common::RateLimit::rateLimitClientFactory(context); - // If ratelimit service config is provided in both bootstrap and filter, we should validate that - // they are same. - Filters::Common::RateLimit::validateRateLimitConfig< - const envoy::config::filter::thrift::rate_limit::v2alpha1::RateLimit&>(proto_config, - client_factory); - - return [client_factory, proto_config, &context, timeout, + + return [proto_config, &context, timeout, config](ThriftProxy::ThriftFilters::FilterChainFactoryCallbacks& callbacks) -> void { callbacks.addDecoderFilter(std::make_shared( - config, - Filters::Common::RateLimit::rateLimitClient( - client_factory, context, proto_config.rate_limit_service().grpc_service(), timeout))); + config, Filters::Common::RateLimit::rateLimitClient( + context, proto_config.rate_limit_service().grpc_service(), timeout))); }; } diff --git a/source/extensions/filters/network/thrift_proxy/router/router_ratelimit_impl.cc b/source/extensions/filters/network/thrift_proxy/router/router_ratelimit_impl.cc index 398bbda698742..c1f3f99f7cb20 100644 --- a/source/extensions/filters/network/thrift_proxy/router/router_ratelimit_impl.cc +++ b/source/extensions/filters/network/thrift_proxy/router/router_ratelimit_impl.cc @@ -44,7 +44,8 @@ bool RequestHeadersAction::populateDescriptor(const RouteEntry&, RateLimit::Desc return false; } - descriptor.entries_.push_back({descriptor_key_, header_value->value().c_str()}); + descriptor.entries_.push_back( + {descriptor_key_, std::string(header_value->value().getStringView())}); return true; } diff --git a/source/extensions/filters/network/thrift_proxy/twitter_protocol_impl.cc b/source/extensions/filters/network/thrift_proxy/twitter_protocol_impl.cc index 8d104250bff52..614e98de363ae 100644 --- a/source/extensions/filters/network/thrift_proxy/twitter_protocol_impl.cc +++ b/source/extensions/filters/network/thrift_proxy/twitter_protocol_impl.cc @@ -393,13 +393,15 @@ class RequestHeader { RequestHeader& rh = *static_cast(cb); if (key == Headers::get().ClientId.get()) { - rh.client_id_ = ClientId(header.value().c_str()); + rh.client_id_ = ClientId(std::string(header.value().getStringView())); } else if (key == Headers::get().Dest.get()) { - rh.dest_ = header.value().c_str(); + rh.dest_ = std::string(header.value().getStringView()); } else if (key.find(":d:") == 0 && key.size() > 3) { - rh.delegations_.emplace_back(std::string(key.substr(3)), header.value().c_str()); + rh.delegations_.emplace_back(std::string(key.substr(3)), + std::string(header.value().getStringView())); } else if (key[0] != ':') { - rh.contexts_.emplace_back(std::string(key), header.value().c_str()); + rh.contexts_.emplace_back(std::string(key), + std::string(header.value().getStringView())); } return Http::HeaderMap::Iterate::Continue; }, @@ -577,8 +579,8 @@ class ResponseHeader { [](const Http::HeaderEntry& header, void* cb) -> Http::HeaderMap::Iterate { absl::string_view key = header.key().getStringView(); if (!key.empty() && key[0] != ':') { - static_cast*>(cb)->emplace_back(std::string(key), - header.value().c_str()); + static_cast*>(cb)->emplace_back( + std::string(key), std::string(header.value().getStringView())); } return Http::HeaderMap::Iterate::Continue; }, diff --git a/source/extensions/filters/network/well_known_names.h b/source/extensions/filters/network/well_known_names.h index f540aab0d36f7..a1d435f4e7b2a 100644 --- a/source/extensions/filters/network/well_known_names.h +++ b/source/extensions/filters/network/well_known_names.h @@ -38,6 +38,8 @@ class NetworkFilterNameValues { const std::string Rbac = "envoy.filters.network.rbac"; // SNI Cluster filter const std::string SniCluster = "envoy.filters.network.sni_cluster"; + // ZooKeeper proxy filter + const std::string ZooKeeperProxy = "envoy.filters.network.zookeeper_proxy"; // Converts names from v1 to v2 const Config::V1Converter v1_converter_; diff --git a/source/extensions/filters/network/zookeeper_proxy/BUILD b/source/extensions/filters/network/zookeeper_proxy/BUILD new file mode 100644 index 0000000000000..26d144167c519 --- /dev/null +++ b/source/extensions/filters/network/zookeeper_proxy/BUILD @@ -0,0 +1,48 @@ +licenses(["notice"]) # Apache 2 + +# ZooKeeper proxy L7 network filter. +# Public docs: docs/root/configuration/network_filters/zookeeper_proxy_filter.rst + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "proxy_lib", + srcs = [ + "decoder.cc", + "filter.cc", + "utils.cc", + ], + hdrs = [ + "decoder.h", + "filter.h", + "utils.h", + ], + deps = [ + "//include/envoy/network:filter_interface", + "//include/envoy/server:filter_config_interface", + "//include/envoy/stats:stats_interface", + "//include/envoy/stats:stats_macros", + "//source/common/buffer:buffer_lib", + "//source/common/common:enum_to_int", + "//source/common/network:filter_lib", + "//source/extensions/filters/network:well_known_names", + ], +) + +envoy_cc_library( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + deps = [ + ":proxy_lib", + "//source/extensions/filters/network:well_known_names", + "//source/extensions/filters/network/common:factory_base_lib", + "@envoy_api//envoy/config/filter/network/zookeeper_proxy/v1alpha1:zookeeper_proxy_cc", + ], +) diff --git a/source/extensions/filters/network/zookeeper_proxy/config.cc b/source/extensions/filters/network/zookeeper_proxy/config.cc new file mode 100644 index 0000000000000..b46bbde4fbf5b --- /dev/null +++ b/source/extensions/filters/network/zookeeper_proxy/config.cc @@ -0,0 +1,46 @@ +#include "extensions/filters/network/zookeeper_proxy/config.h" + +#include + +#include "envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.pb.validate.h" +#include "envoy/registry/registry.h" +#include "envoy/server/filter_config.h" + +#include "common/common/logger.h" + +#include "extensions/filters/network/zookeeper_proxy/filter.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace ZooKeeperProxy { + +/** + * Config registration for the ZooKeeper proxy filter. @see NamedNetworkFilterConfigFactory. + */ +Network::FilterFactoryCb ZooKeeperConfigFactory::createFilterFactoryFromProtoTyped( + const envoy::config::filter::network::zookeeper_proxy::v1alpha1::ZooKeeperProxy& proto_config, + Server::Configuration::FactoryContext& context) { + + ASSERT(!proto_config.stat_prefix().empty()); + + const std::string stat_prefix = fmt::format("{}.zookeeper.", proto_config.stat_prefix()); + const uint32_t max_packet_bytes = + PROTOBUF_GET_WRAPPED_OR_DEFAULT(proto_config, max_packet_bytes, 1024 * 1024); + + ZooKeeperFilterConfigSharedPtr filter_config( + std::make_shared(stat_prefix, max_packet_bytes, context.scope())); + return [filter_config](Network::FilterManager& filter_manager) -> void { + filter_manager.addFilter(std::make_shared(filter_config)); + }; +} + +/** + * Static registration for the ZooKeeper proxy filter. @see RegisterFactory. + */ +REGISTER_FACTORY(ZooKeeperConfigFactory, Server::Configuration::NamedNetworkFilterConfigFactory); + +} // namespace ZooKeeperProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/zookeeper_proxy/config.h b/source/extensions/filters/network/zookeeper_proxy/config.h new file mode 100644 index 0000000000000..1d813a15ef3c2 --- /dev/null +++ b/source/extensions/filters/network/zookeeper_proxy/config.h @@ -0,0 +1,33 @@ +#pragma once + +#include "envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.pb.h" +#include "envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.pb.validate.h" + +#include "extensions/filters/network/common/factory_base.h" +#include "extensions/filters/network/well_known_names.h" +#include "extensions/filters/network/zookeeper_proxy/filter.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace ZooKeeperProxy { + +/** + * Config registration for the ZooKeeper proxy filter. + */ +class ZooKeeperConfigFactory + : public Common::FactoryBase< + envoy::config::filter::network::zookeeper_proxy::v1alpha1::ZooKeeperProxy> { +public: + ZooKeeperConfigFactory() : FactoryBase(NetworkFilterNames::get().ZooKeeperProxy) {} + +private: + Network::FilterFactoryCb createFilterFactoryFromProtoTyped( + const envoy::config::filter::network::zookeeper_proxy::v1alpha1::ZooKeeperProxy& proto_config, + Server::Configuration::FactoryContext& context) override; +}; + +} // namespace ZooKeeperProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/zookeeper_proxy/decoder.cc b/source/extensions/filters/network/zookeeper_proxy/decoder.cc new file mode 100644 index 0000000000000..db2d5fb5b9195 --- /dev/null +++ b/source/extensions/filters/network/zookeeper_proxy/decoder.cc @@ -0,0 +1,417 @@ +#include "extensions/filters/network/zookeeper_proxy/decoder.h" + +#include + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace ZooKeeperProxy { + +constexpr uint32_t BOOL_LENGTH = 1; +constexpr uint32_t INT_LENGTH = 4; +constexpr uint32_t LONG_LENGTH = 8; +constexpr uint32_t XID_LENGTH = 4; +constexpr uint32_t OPCODE_LENGTH = 4; +constexpr uint32_t ZXID_LENGTH = 8; +constexpr uint32_t TIMEOUT_LENGTH = 4; +constexpr uint32_t SESSION_LENGTH = 8; +constexpr uint32_t MULTI_HEADER_LENGTH = 9; + +const char* createFlagsToString(CreateFlags flags) { + switch (flags) { + case CreateFlags::PERSISTENT: + return "persistent"; + case CreateFlags::PERSISTENT_SEQUENTIAL: + return "persistent_sequential"; + case CreateFlags::EPHEMERAL: + return "ephemeral"; + case CreateFlags::EPHEMERAL_SEQUENTIAL: + return "ephemeral_sequential"; + case CreateFlags::CONTAINER: + return "container"; + case CreateFlags::PERSISTENT_WITH_TTL: + return "persistent_with_ttl"; + case CreateFlags::PERSISTENT_SEQUENTIAL_WITH_TTL: + return "persistent_sequential_with_ttl"; + } + + return "unknown"; +} + +void DecoderImpl::decode(Buffer::Instance& data, uint64_t& offset) { + ENVOY_LOG(trace, "zookeeper_proxy: decoding {} bytes at offset {}", data.length(), offset); + + // Reset the helper's cursor, to ensure the current message stays within the + // allowed max length, even when it's different than the declared length + // by the message. + // + // Note: we need to keep two cursors — offset and helper_'s internal one — because + // a buffer may contain multiple messages, so offset is global and helper_'s + // internal cursor is reset for each individual message. + helper_.reset(); + + // Check message length. + const int32_t len = helper_.peekInt32(data, offset); + ensureMinLength(len, INT_LENGTH + XID_LENGTH); + ensureMaxLength(len); + + // Control requests, with XIDs <= 0. + // + // These are meant to control the state of a session: + // connect, keep-alive, authenticate and set initial watches. + // + // Note: setWatches is a command historically used to set watches + // right after connecting, typically used when roaming from one + // ZooKeeper server to the next. Thus, the special xid. + // However, some client implementations might expose setWatches + // as a regular data request, so we support that as well. + const int32_t xid = helper_.peekInt32(data, offset); + switch (static_cast(xid)) { + case XidCodes::CONNECT_XID: + parseConnect(data, offset, len); + return; + case XidCodes::PING_XID: + offset += OPCODE_LENGTH; + callbacks_.onPing(); + return; + case XidCodes::AUTH_XID: + parseAuthRequest(data, offset, len); + return; + case XidCodes::SET_WATCHES_XID: + offset += OPCODE_LENGTH; + parseSetWatchesRequest(data, offset, len); + return; + default: + // WATCH_XID is generated by the server, so that and everything + // else can be ignored here. + break; + } + + // Data requests, with XIDs > 0. + // + // These are meant to happen after a successful control request, except + // for two cases: auth requests can happen at any time and ping requests + // must happen every 1/3 of the negotiated session timeout, to keep + // the session alive. + const int32_t opcode = helper_.peekInt32(data, offset); + switch (static_cast(opcode)) { + case OpCodes::GETDATA: + parseGetDataRequest(data, offset, len); + break; + case OpCodes::CREATE: + case OpCodes::CREATE2: + case OpCodes::CREATECONTAINER: + case OpCodes::CREATETTL: + parseCreateRequest(data, offset, len, static_cast(opcode)); + break; + case OpCodes::SETDATA: + parseSetRequest(data, offset, len); + break; + case OpCodes::GETCHILDREN: + parseGetChildrenRequest(data, offset, len, false); + break; + case OpCodes::GETCHILDREN2: + parseGetChildrenRequest(data, offset, len, true); + break; + case OpCodes::DELETE: + parseDeleteRequest(data, offset, len); + break; + case OpCodes::EXISTS: + parseExistsRequest(data, offset, len); + break; + case OpCodes::GETACL: + parseGetAclRequest(data, offset, len); + break; + case OpCodes::SETACL: + parseSetAclRequest(data, offset, len); + break; + case OpCodes::SYNC: + callbacks_.onSyncRequest(pathOnlyRequest(data, offset, len)); + break; + case OpCodes::CHECK: + parseCheckRequest(data, offset, len); + break; + case OpCodes::MULTI: + parseMultiRequest(data, offset, len); + break; + case OpCodes::RECONFIG: + parseReconfigRequest(data, offset, len); + break; + case OpCodes::SETWATCHES: + parseSetWatchesRequest(data, offset, len); + break; + case OpCodes::CHECKWATCHES: + parseXWatchesRequest(data, offset, len, OpCodes::CHECKWATCHES); + break; + case OpCodes::REMOVEWATCHES: + parseXWatchesRequest(data, offset, len, OpCodes::REMOVEWATCHES); + break; + case OpCodes::GETEPHEMERALS: + callbacks_.onGetEphemeralsRequest(pathOnlyRequest(data, offset, len)); + break; + case OpCodes::GETALLCHILDRENNUMBER: + callbacks_.onGetAllChildrenNumberRequest(pathOnlyRequest(data, offset, len)); + break; + case OpCodes::CLOSE: + callbacks_.onCloseRequest(); + break; + default: + throw EnvoyException(fmt::format("Unknown opcode: {}", opcode)); + } +} + +void DecoderImpl::ensureMinLength(const int32_t len, const int32_t minlen) const { + if (len < minlen) { + throw EnvoyException("Packet is too small"); + } +} + +void DecoderImpl::ensureMaxLength(const int32_t len) const { + if (static_cast(len) > max_packet_bytes_) { + throw EnvoyException("Packet is too big"); + } +} + +void DecoderImpl::parseConnect(Buffer::Instance& data, uint64_t& offset, uint32_t len) { + ensureMinLength(len, XID_LENGTH + ZXID_LENGTH + TIMEOUT_LENGTH + SESSION_LENGTH + INT_LENGTH); + + // Skip zxid, timeout, and session id. + offset += ZXID_LENGTH + TIMEOUT_LENGTH + SESSION_LENGTH; + + // Skip password. + skipString(data, offset); + + // Read readonly flag, if it's there. + bool readonly{}; + if (data.length() >= offset + 1) { + readonly = helper_.peekBool(data, offset); + } + + callbacks_.onConnect(readonly); +} + +void DecoderImpl::parseAuthRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) { + ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + INT_LENGTH + INT_LENGTH + INT_LENGTH); + + // Skip opcode + type. + offset += OPCODE_LENGTH + INT_LENGTH; + const std::string scheme = helper_.peekString(data, offset); + // Skip credential. + skipString(data, offset); + + callbacks_.onAuthRequest(scheme); +} + +void DecoderImpl::parseGetDataRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) { + ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + INT_LENGTH + BOOL_LENGTH); + + const std::string path = helper_.peekString(data, offset); + const bool watch = helper_.peekBool(data, offset); + + callbacks_.onGetDataRequest(path, watch); +} + +void DecoderImpl::skipAcls(Buffer::Instance& data, uint64_t& offset) { + const int32_t count = helper_.peekInt32(data, offset); + + for (int i = 0; i < count; ++i) { + // Perms. + helper_.peekInt32(data, offset); + // Skip scheme. + skipString(data, offset); + // Skip cred. + skipString(data, offset); + } +} + +void DecoderImpl::parseCreateRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len, + OpCodes opcode) { + ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + (3 * INT_LENGTH)); + + const std::string path = helper_.peekString(data, offset); + + // Skip data. + skipString(data, offset); + skipAcls(data, offset); + + const CreateFlags flags = static_cast(helper_.peekInt32(data, offset)); + callbacks_.onCreateRequest(path, flags, opcode); +} + +void DecoderImpl::parseSetRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) { + ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + (3 * INT_LENGTH)); + + const std::string path = helper_.peekString(data, offset); + // Skip data. + skipString(data, offset); + // Ignore version. + helper_.peekInt32(data, offset); + + callbacks_.onSetRequest(path); +} + +void DecoderImpl::parseGetChildrenRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len, + const bool two) { + ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + INT_LENGTH + BOOL_LENGTH); + + const std::string path = helper_.peekString(data, offset); + const bool watch = helper_.peekBool(data, offset); + + callbacks_.onGetChildrenRequest(path, watch, two); +} + +void DecoderImpl::parseDeleteRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) { + ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + (2 * INT_LENGTH)); + + const std::string path = helper_.peekString(data, offset); + const int32_t version = helper_.peekInt32(data, offset); + + callbacks_.onDeleteRequest(path, version); +} + +void DecoderImpl::parseExistsRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) { + ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + INT_LENGTH + BOOL_LENGTH); + + const std::string path = helper_.peekString(data, offset); + const bool watch = helper_.peekBool(data, offset); + + callbacks_.onExistsRequest(path, watch); +} + +void DecoderImpl::parseGetAclRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) { + ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + INT_LENGTH); + + const std::string path = helper_.peekString(data, offset); + + callbacks_.onGetAclRequest(path); +} + +void DecoderImpl::parseSetAclRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) { + ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + (2 * INT_LENGTH)); + + const std::string path = helper_.peekString(data, offset); + skipAcls(data, offset); + const int32_t version = helper_.peekInt32(data, offset); + + callbacks_.onSetAclRequest(path, version); +} + +std::string DecoderImpl::pathOnlyRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) { + ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + INT_LENGTH); + return helper_.peekString(data, offset); +} + +void DecoderImpl::parseCheckRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) { + ensureMinLength(len, (2 * INT_LENGTH)); + + const std::string path = helper_.peekString(data, offset); + const int32_t version = helper_.peekInt32(data, offset); + + callbacks_.onCheckRequest(path, version); +} + +void DecoderImpl::parseMultiRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) { + // Treat empty transactions as a decoding error, there should be at least 1 header. + ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + MULTI_HEADER_LENGTH); + + while (true) { + const int32_t opcode = helper_.peekInt32(data, offset); + const bool done = helper_.peekBool(data, offset); + // Ignore error field. + helper_.peekInt32(data, offset); + + if (done) { + break; + } + + switch (static_cast(opcode)) { + case OpCodes::CREATE: + parseCreateRequest(data, offset, len, OpCodes::CREATE); + break; + case OpCodes::SETDATA: + parseSetRequest(data, offset, len); + break; + case OpCodes::CHECK: + parseCheckRequest(data, offset, len); + break; + default: + throw EnvoyException(fmt::format("Unknown opcode within a transaction: {}", opcode)); + } + } + + callbacks_.onMultiRequest(); +} + +void DecoderImpl::parseReconfigRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) { + ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + (3 * INT_LENGTH) + LONG_LENGTH); + + // Skip joining. + skipString(data, offset); + // Skip leaving. + skipString(data, offset); + // Skip new members. + skipString(data, offset); + // Read config id. + helper_.peekInt64(data, offset); + + callbacks_.onReconfigRequest(); +} + +void DecoderImpl::parseSetWatchesRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) { + ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + (3 * INT_LENGTH)); + + // Data watches. + skipStrings(data, offset); + // Exist watches. + skipStrings(data, offset); + // Child watches. + skipStrings(data, offset); + + callbacks_.onSetWatchesRequest(); +} + +void DecoderImpl::parseXWatchesRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len, + OpCodes opcode) { + ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + (2 * INT_LENGTH)); + + const std::string path = helper_.peekString(data, offset); + const int32_t type = helper_.peekInt32(data, offset); + + if (opcode == OpCodes::CHECKWATCHES) { + callbacks_.onCheckWatchesRequest(path, type); + } else { + callbacks_.onRemoveWatchesRequest(path, type); + } +} + +void DecoderImpl::skipString(Buffer::Instance& data, uint64_t& offset) { + const int32_t slen = helper_.peekInt32(data, offset); + helper_.skip(slen, offset); +} + +void DecoderImpl::skipStrings(Buffer::Instance& data, uint64_t& offset) { + const int32_t count = helper_.peekInt32(data, offset); + + for (int i = 0; i < count; ++i) { + skipString(data, offset); + } +} + +void DecoderImpl::onData(Buffer::Instance& data) { + uint64_t offset = 0; + try { + while (offset < data.length()) { + const uint64_t current = offset; + decode(data, offset); + callbacks_.onRequestBytes(offset - current); + } + } catch (const EnvoyException& e) { + ENVOY_LOG(debug, "zookeeper_proxy: decoding exception {}", e.what()); + callbacks_.onDecodeError(); + } +} + +} // namespace ZooKeeperProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/zookeeper_proxy/decoder.h b/source/extensions/filters/network/zookeeper_proxy/decoder.h new file mode 100644 index 0000000000000..46efb96fe65e4 --- /dev/null +++ b/source/extensions/filters/network/zookeeper_proxy/decoder.h @@ -0,0 +1,152 @@ +#pragma once + +#include +#include + +#include "envoy/common/platform.h" + +#include "common/buffer/buffer_impl.h" +#include "common/common/logger.h" + +#include "extensions/filters/network/zookeeper_proxy/utils.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace ZooKeeperProxy { + +enum class XidCodes { + CONNECT_XID = 0, + WATCH_XID = -1, + PING_XID = -2, + AUTH_XID = -4, + SET_WATCHES_XID = -8 +}; + +enum class OpCodes { + CONNECT = 0, + CREATE = 1, + DELETE = 2, + EXISTS = 3, + GETDATA = 4, + SETDATA = 5, + GETACL = 6, + SETACL = 7, + GETCHILDREN = 8, + SYNC = 9, + PING = 11, + GETCHILDREN2 = 12, + CHECK = 13, + MULTI = 14, + CREATE2 = 15, + RECONFIG = 16, + CHECKWATCHES = 17, + REMOVEWATCHES = 18, + CREATECONTAINER = 19, + CREATETTL = 21, + CLOSE = -11, + SETAUTH = 100, + SETWATCHES = 101, + GETEPHEMERALS = 103, + GETALLCHILDRENNUMBER = 104 +}; + +enum class WatcherType { CHILDREN = 1, DATA = 2, ANY = 3 }; + +enum class CreateFlags { + PERSISTENT, + PERSISTENT_SEQUENTIAL, + EPHEMERAL, + EPHEMERAL_SEQUENTIAL, + CONTAINER, + PERSISTENT_WITH_TTL, + PERSISTENT_SEQUENTIAL_WITH_TTL +}; + +const char* createFlagsToString(CreateFlags flags); + +/** + * General callbacks for dispatching decoded ZooKeeper messages to a sink. + */ +class DecoderCallbacks { +public: + virtual ~DecoderCallbacks() {} + + virtual void onDecodeError() PURE; + virtual void onRequestBytes(uint64_t bytes) PURE; + virtual void onConnect(bool readonly) PURE; + virtual void onPing() PURE; + virtual void onAuthRequest(const std::string& scheme) PURE; + virtual void onGetDataRequest(const std::string& path, bool watch) PURE; + virtual void onCreateRequest(const std::string& path, CreateFlags flags, OpCodes opcode) PURE; + virtual void onSetRequest(const std::string& path) PURE; + virtual void onGetChildrenRequest(const std::string& path, bool watch, bool v2) PURE; + virtual void onGetEphemeralsRequest(const std::string& path) PURE; + virtual void onGetAllChildrenNumberRequest(const std::string& path) PURE; + virtual void onDeleteRequest(const std::string& path, int32_t version) PURE; + virtual void onExistsRequest(const std::string& path, bool watch) PURE; + virtual void onGetAclRequest(const std::string& path) PURE; + virtual void onSetAclRequest(const std::string& path, int32_t version) PURE; + virtual void onSyncRequest(const std::string& path) PURE; + virtual void onCheckRequest(const std::string& path, int32_t version) PURE; + virtual void onMultiRequest() PURE; + virtual void onReconfigRequest() PURE; + virtual void onSetWatchesRequest() PURE; + virtual void onCheckWatchesRequest(const std::string& path, int32_t type) PURE; + virtual void onRemoveWatchesRequest(const std::string& path, int32_t type) PURE; + virtual void onCloseRequest() PURE; +}; + +/** + * ZooKeeper message decoder. + */ +class Decoder { +public: + virtual ~Decoder() {} + + virtual void onData(Buffer::Instance& data) PURE; +}; + +typedef std::unique_ptr DecoderPtr; + +class DecoderImpl : public Decoder, Logger::Loggable { +public: + explicit DecoderImpl(DecoderCallbacks& callbacks, uint32_t max_packet_bytes) + : callbacks_(callbacks), max_packet_bytes_(max_packet_bytes), helper_(max_packet_bytes) {} + + // ZooKeeperProxy::Decoder + void onData(Buffer::Instance& data) override; + +private: + void decode(Buffer::Instance& data, uint64_t& offset); + void parseConnect(Buffer::Instance& data, uint64_t& offset, uint32_t len); + void parseAuthRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len); + void parseGetDataRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len); + void parseCreateRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len, OpCodes opcode); + void skipAcls(Buffer::Instance& data, uint64_t& offset); + void parseSetRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len); + void parseGetChildrenRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len, bool two); + void parseDeleteRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len); + void parseExistsRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len); + void parseGetAclRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len); + void parseSetAclRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len); + void parseCheckRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len); + void parseMultiRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len); + void parseReconfigRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len); + void parseSetWatchesRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len); + void parseXWatchesRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len, OpCodes opcode); + void skipString(Buffer::Instance& data, uint64_t& offset); + void skipStrings(Buffer::Instance& data, uint64_t& offset); + void ensureMinLength(int32_t len, int32_t minlen) const; + void ensureMaxLength(int32_t len) const; + std::string pathOnlyRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len); + + DecoderCallbacks& callbacks_; + const uint32_t max_packet_bytes_; + BufferHelper helper_; +}; + +} // namespace ZooKeeperProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/zookeeper_proxy/filter.cc b/source/extensions/filters/network/zookeeper_proxy/filter.cc new file mode 100644 index 0000000000000..3a11262f298f0 --- /dev/null +++ b/source/extensions/filters/network/zookeeper_proxy/filter.cc @@ -0,0 +1,242 @@ +#include "extensions/filters/network/zookeeper_proxy/filter.h" + +#include +#include + +#include "common/buffer/buffer_impl.h" +#include "common/common/assert.h" +#include "common/common/enum_to_int.h" +#include "common/common/fmt.h" +#include "common/common/logger.h" + +#include "extensions/filters/network/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace ZooKeeperProxy { + +ZooKeeperFilterConfig::ZooKeeperFilterConfig(const std::string& stat_prefix, + const uint32_t max_packet_bytes, Stats::Scope& scope) + : scope_(scope), max_packet_bytes_(max_packet_bytes), stat_prefix_(stat_prefix), + stats_(generateStats(stat_prefix, scope)) {} + +ZooKeeperFilter::ZooKeeperFilter(ZooKeeperFilterConfigSharedPtr config) + : config_(std::move(config)) {} + +void ZooKeeperFilter::initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) { + read_callbacks_ = &callbacks; +} + +Network::FilterStatus ZooKeeperFilter::onData(Buffer::Instance& data, bool) { + doDecode(data); + return Network::FilterStatus::Continue; +} + +Network::FilterStatus ZooKeeperFilter::onWrite(Buffer::Instance&, bool) { + return Network::FilterStatus::Continue; +} + +Network::FilterStatus ZooKeeperFilter::onNewConnection() { return Network::FilterStatus::Continue; } + +void ZooKeeperFilter::doDecode(Buffer::Instance& buffer) { + clearDynamicMetadata(); + + if (!decoder_) { + decoder_ = createDecoder(*this); + } + + decoder_->onData(buffer); +} + +DecoderPtr ZooKeeperFilter::createDecoder(DecoderCallbacks& callbacks) { + return std::make_unique(callbacks, config_->maxPacketBytes()); +} + +void ZooKeeperFilter::setDynamicMetadata(const std::string& key, const std::string& value) { + setDynamicMetadata({{key, value}}); +} + +void ZooKeeperFilter::clearDynamicMetadata() { + envoy::api::v2::core::Metadata& dynamic_metadata = + read_callbacks_->connection().streamInfo().dynamicMetadata(); + auto& metadata = + (*dynamic_metadata.mutable_filter_metadata())[NetworkFilterNames::get().ZooKeeperProxy]; + metadata.mutable_fields()->clear(); +} + +void ZooKeeperFilter::setDynamicMetadata( + const std::vector>& data) { + envoy::api::v2::core::Metadata& dynamic_metadata = + read_callbacks_->connection().streamInfo().dynamicMetadata(); + ProtobufWkt::Struct metadata( + (*dynamic_metadata.mutable_filter_metadata())[NetworkFilterNames::get().ZooKeeperProxy]); + auto& fields = *metadata.mutable_fields(); + + for (const auto& pair : data) { + auto val = ProtobufWkt::Value(); + val.set_string_value(pair.second); + fields.insert({pair.first, val}); + } + + read_callbacks_->connection().streamInfo().setDynamicMetadata( + NetworkFilterNames::get().ZooKeeperProxy, metadata); +} + +void ZooKeeperFilter::onConnect(const bool readonly) { + if (readonly) { + config_->stats_.connect_readonly_rq_.inc(); + setDynamicMetadata("opname", "connect_readonly"); + } else { + config_->stats_.connect_rq_.inc(); + setDynamicMetadata("opname", "connect"); + } +} + +void ZooKeeperFilter::onDecodeError() { + config_->stats_.decoder_error_.inc(); + setDynamicMetadata("opname", "error"); +} + +void ZooKeeperFilter::onRequestBytes(const uint64_t bytes) { + config_->stats_.request_bytes_.add(bytes); + setDynamicMetadata("bytes", std::to_string(bytes)); +} + +void ZooKeeperFilter::onPing() { + config_->stats_.ping_rq_.inc(); + setDynamicMetadata("opname", "ping"); +} + +void ZooKeeperFilter::onAuthRequest(const std::string& scheme) { + config_->scope_.counter(fmt::format("{}.auth.{}_rq", config_->stat_prefix_, scheme)).inc(); + setDynamicMetadata("opname", "auth"); +} + +void ZooKeeperFilter::onGetDataRequest(const std::string& path, const bool watch) { + config_->stats_.getdata_rq_.inc(); + setDynamicMetadata({{"opname", "getdata"}, {"path", path}, {"watch", watch ? "true" : "false"}}); +} + +void ZooKeeperFilter::onCreateRequest(const std::string& path, const CreateFlags flags, + const OpCodes opcode) { + std::string opname; + + switch (opcode) { + case OpCodes::CREATE: + opname = "create"; + config_->stats_.create_rq_.inc(); + break; + case OpCodes::CREATE2: + opname = "create2"; + config_->stats_.create2_rq_.inc(); + break; + case OpCodes::CREATECONTAINER: + opname = "createcontainer"; + config_->stats_.createcontainer_rq_.inc(); + break; + case OpCodes::CREATETTL: + opname = "createttl"; + config_->stats_.createttl_rq_.inc(); + break; + default: + throw EnvoyException(fmt::format("Unknown opcode: {}", enumToSignedInt(opcode))); + break; + } + + setDynamicMetadata( + {{"opname", opname}, {"path", path}, {"create_type", createFlagsToString(flags)}}); +} + +void ZooKeeperFilter::onSetRequest(const std::string& path) { + config_->stats_.setdata_rq_.inc(); + setDynamicMetadata({{"opname", "setdata"}, {"path", path}}); +} + +void ZooKeeperFilter::onGetChildrenRequest(const std::string& path, const bool watch, + const bool v2) { + std::string opname = "getchildren"; + + if (v2) { + config_->stats_.getchildren2_rq_.inc(); + opname = "getchildren2"; + } else { + config_->stats_.getchildren_rq_.inc(); + } + + setDynamicMetadata({{"opname", opname}, {"path", path}, {"watch", watch ? "true" : "false"}}); +} + +void ZooKeeperFilter::onDeleteRequest(const std::string& path, const int32_t version) { + config_->stats_.remove_rq_.inc(); + setDynamicMetadata({{"opname", "remove"}, {"path", path}, {"version", std::to_string(version)}}); +} + +void ZooKeeperFilter::onExistsRequest(const std::string& path, const bool watch) { + config_->stats_.exists_rq_.inc(); + setDynamicMetadata({{"opname", "exists"}, {"path", path}, {"watch", watch ? "true" : "false"}}); +} + +void ZooKeeperFilter::onGetAclRequest(const std::string& path) { + config_->stats_.getacl_rq_.inc(); + setDynamicMetadata({{"opname", "getacl"}, {"path", path}}); +} + +void ZooKeeperFilter::onSetAclRequest(const std::string& path, const int32_t version) { + config_->stats_.setacl_rq_.inc(); + setDynamicMetadata({{"opname", "setacl"}, {"path", path}, {"version", std::to_string(version)}}); +} + +void ZooKeeperFilter::onSyncRequest(const std::string& path) { + config_->stats_.sync_rq_.inc(); + setDynamicMetadata({{"opname", "sync"}, {"path", path}}); +} + +void ZooKeeperFilter::onCheckRequest(const std::string&, const int32_t) { + config_->stats_.check_rq_.inc(); +} + +void ZooKeeperFilter::onCheckWatchesRequest(const std::string& path, const int32_t) { + config_->stats_.checkwatches_rq_.inc(); + setDynamicMetadata({{"opname", "checkwatches"}, {"path", path}}); +} + +void ZooKeeperFilter::onRemoveWatchesRequest(const std::string& path, const int32_t) { + config_->stats_.removewatches_rq_.inc(); + setDynamicMetadata({{"opname", "removewatches"}, {"path", path}}); +} + +void ZooKeeperFilter::onMultiRequest() { + config_->stats_.multi_rq_.inc(); + setDynamicMetadata("opname", "multi"); +} + +void ZooKeeperFilter::onReconfigRequest() { + config_->stats_.reconfig_rq_.inc(); + setDynamicMetadata("opname", "reconfig"); +} + +void ZooKeeperFilter::onSetWatchesRequest() { + config_->stats_.setwatches_rq_.inc(); + setDynamicMetadata("opname", "setwatches"); +} + +void ZooKeeperFilter::onGetEphemeralsRequest(const std::string& path) { + config_->stats_.getephemerals_rq_.inc(); + setDynamicMetadata({{"opname", "getephemerals"}, {"path", path}}); +} + +void ZooKeeperFilter::onGetAllChildrenNumberRequest(const std::string& path) { + config_->stats_.getallchildrennumber_rq_.inc(); + setDynamicMetadata({{"opname", "getallchildrennumber"}, {"path", path}}); +} + +void ZooKeeperFilter::onCloseRequest() { + config_->stats_.close_rq_.inc(); + setDynamicMetadata("opname", "close"); +} + +} // namespace ZooKeeperProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/zookeeper_proxy/filter.h b/source/extensions/filters/network/zookeeper_proxy/filter.h new file mode 100644 index 0000000000000..491a120329654 --- /dev/null +++ b/source/extensions/filters/network/zookeeper_proxy/filter.h @@ -0,0 +1,146 @@ +#pragma once + +#include +#include +#include + +#include "envoy/access_log/access_log.h" +#include "envoy/network/connection.h" +#include "envoy/network/filter.h" +#include "envoy/stats/scope.h" +#include "envoy/stats/stats.h" +#include "envoy/stats/stats_macros.h" + +#include "common/common/logger.h" + +#include "extensions/filters/network/zookeeper_proxy/decoder.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace ZooKeeperProxy { + +/** + * All ZooKeeper proxy stats. @see stats_macros.h + */ +// clang-format off +#define ALL_ZOOKEEPER_PROXY_STATS(COUNTER) \ + COUNTER(decoder_error) \ + COUNTER(request_bytes) \ + COUNTER(connect_rq) \ + COUNTER(connect_readonly_rq) \ + COUNTER(getdata_rq) \ + COUNTER(create_rq) \ + COUNTER(create2_rq) \ + COUNTER(createcontainer_rq) \ + COUNTER(createttl_rq) \ + COUNTER(setdata_rq) \ + COUNTER(getchildren_rq) \ + COUNTER(getchildren2_rq) \ + COUNTER(getephemerals_rq) \ + COUNTER(getallchildrennumber_rq) \ + COUNTER(remove_rq) \ + COUNTER(exists_rq) \ + COUNTER(getacl_rq) \ + COUNTER(setacl_rq) \ + COUNTER(sync_rq) \ + COUNTER(ping_rq) \ + COUNTER(multi_rq) \ + COUNTER(reconfig_rq) \ + COUNTER(close_rq) \ + COUNTER(setauth_rq) \ + COUNTER(setwatches_rq) \ + COUNTER(checkwatches_rq) \ + COUNTER(removewatches_rq) \ + COUNTER(check_rq) +// clang-format on + +/** + * Struct definition for all ZooKeeper proxy stats. @see stats_macros.h + */ +struct ZooKeeperProxyStats { + ALL_ZOOKEEPER_PROXY_STATS(GENERATE_COUNTER_STRUCT) +}; + +/** + * Configuration for the ZooKeeper proxy filter. + */ +class ZooKeeperFilterConfig { +public: + ZooKeeperFilterConfig(const std::string& stat_prefix, uint32_t max_packet_bytes, + Stats::Scope& scope); + + const ZooKeeperProxyStats& stats() { return stats_; } + uint32_t maxPacketBytes() const { return max_packet_bytes_; } + + Stats::Scope& scope_; + const uint32_t max_packet_bytes_; + const std::string stat_prefix_; + ZooKeeperProxyStats stats_; + +private: + ZooKeeperProxyStats generateStats(const std::string& prefix, Stats::Scope& scope) { + return ZooKeeperProxyStats{ALL_ZOOKEEPER_PROXY_STATS(POOL_COUNTER_PREFIX(scope, prefix))}; + } +}; + +using ZooKeeperFilterConfigSharedPtr = std::shared_ptr; + +/** + * Implementation of ZooKeeper proxy filter. + */ +class ZooKeeperFilter : public Network::Filter, + DecoderCallbacks, + Logger::Loggable { +public: + explicit ZooKeeperFilter(ZooKeeperFilterConfigSharedPtr config); + + // Network::ReadFilter + Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override; + Network::FilterStatus onNewConnection() override; + void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override; + + // Network::WriteFilter + Network::FilterStatus onWrite(Buffer::Instance& data, bool end_stream) override; + + // ZooKeeperProxy::DecoderCallback + void onDecodeError() override; + void onRequestBytes(uint64_t bytes) override; + void onConnect(bool readonly) override; + void onPing() override; + void onAuthRequest(const std::string& scheme) override; + void onGetDataRequest(const std::string& path, bool watch) override; + void onCreateRequest(const std::string& path, CreateFlags flags, OpCodes opcode) override; + void onSetRequest(const std::string& path) override; + void onGetChildrenRequest(const std::string& path, bool watch, bool v2) override; + void onDeleteRequest(const std::string& path, int32_t version) override; + void onExistsRequest(const std::string& path, bool watch) override; + void onGetAclRequest(const std::string& path) override; + void onSetAclRequest(const std::string& path, int32_t version) override; + void onSyncRequest(const std::string& path) override; + void onCheckRequest(const std::string& path, int32_t version) override; + void onMultiRequest() override; + void onReconfigRequest() override; + void onSetWatchesRequest() override; + void onCheckWatchesRequest(const std::string& path, int32_t type) override; + void onRemoveWatchesRequest(const std::string& path, int32_t type) override; + void onGetEphemeralsRequest(const std::string& path) override; + void onGetAllChildrenNumberRequest(const std::string& path) override; + void onCloseRequest() override; + + void doDecode(Buffer::Instance& buffer); + DecoderPtr createDecoder(DecoderCallbacks& callbacks); + void setDynamicMetadata(const std::string& key, const std::string& value); + void setDynamicMetadata(const std::vector>& data); + void clearDynamicMetadata(); + +private: + Network::ReadFilterCallbacks* read_callbacks_{}; + ZooKeeperFilterConfigSharedPtr config_; + std::unique_ptr decoder_; +}; + +} // namespace ZooKeeperProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/zookeeper_proxy/utils.cc b/source/extensions/filters/network/zookeeper_proxy/utils.cc new file mode 100644 index 0000000000000..ec2d524ee5842 --- /dev/null +++ b/source/extensions/filters/network/zookeeper_proxy/utils.cc @@ -0,0 +1,73 @@ +#include "extensions/filters/network/zookeeper_proxy/utils.h" + +#include + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace ZooKeeperProxy { + +int32_t BufferHelper::peekInt32(Buffer::Instance& buffer, uint64_t& offset) { + ensureMaxLen(sizeof(int32_t)); + + int32_t val = buffer.peekBEInt(offset); + offset += sizeof(int32_t); + return val; +} + +int64_t BufferHelper::peekInt64(Buffer::Instance& buffer, uint64_t& offset) { + ensureMaxLen(sizeof(int64_t)); + + int64_t val = buffer.peekBEInt(offset); + offset += sizeof(int64_t); + return val; +} + +bool BufferHelper::peekBool(Buffer::Instance& buffer, uint64_t& offset) { + ensureMaxLen(1); + + const char byte = buffer.peekInt(offset); + const bool val = static_cast(byte); + offset += 1; + return val; +} + +std::string BufferHelper::peekString(Buffer::Instance& buffer, uint64_t& offset) { + std::string val; + uint32_t len = peekInt32(buffer, offset); + + if (len == 0) { + return val; + } + + if (buffer.length() < (offset + len)) { + throw EnvoyException("peekString: buffer is smaller than string length"); + } + + ensureMaxLen(len); + + std::unique_ptr data(new char[len]); + buffer.copyOut(offset, len, data.get()); + val.assign(data.get(), len); + offset += len; + + return val; +} + +void BufferHelper::skip(const uint32_t len, uint64_t& offset) { + offset += len; + current_ += len; +} + +void BufferHelper::ensureMaxLen(const uint32_t size) { + current_ += size; + + if (current_ > max_len_) { + throw EnvoyException("read beyond max length"); + } +} + +} // namespace ZooKeeperProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/zookeeper_proxy/utils.h b/source/extensions/filters/network/zookeeper_proxy/utils.h new file mode 100644 index 0000000000000..ad210a8150f4c --- /dev/null +++ b/source/extensions/filters/network/zookeeper_proxy/utils.h @@ -0,0 +1,47 @@ +#pragma once + +#include +#include + +#include "envoy/common/platform.h" + +#include "common/buffer/buffer_impl.h" +#include "common/common/byte_order.h" +#include "common/common/logger.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace ZooKeeperProxy { + +/** + * Helper for extracting ZooKeeper data from a buffer. + * + * If at any point a peek is tried beyond max_len, an EnvoyException + * will be thrown. This is important to protect Envoy against malformed + * requests (e.g.: when the declared and actual length don't match). + * + * Note: ZooKeeper's protocol uses network byte ordering (big-endian). + */ +class BufferHelper : public Logger::Loggable { +public: + BufferHelper(uint32_t max_len) : max_len_(max_len) {} + + int32_t peekInt32(Buffer::Instance& buffer, uint64_t& offset); + int64_t peekInt64(Buffer::Instance& buffer, uint64_t& offset); + std::string peekString(Buffer::Instance& buffer, uint64_t& offset); + bool peekBool(Buffer::Instance& buffer, uint64_t& offset); + void skip(uint32_t len, uint64_t& offset); + void reset() { current_ = 0; } + +private: + void ensureMaxLen(uint32_t size); + + uint32_t max_len_; + uint32_t current_{}; +}; + +} // namespace ZooKeeperProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/health_checkers/redis/BUILD b/source/extensions/health_checkers/redis/BUILD index 3148d7c09711f..1c92f366295c3 100644 --- a/source/extensions/health_checkers/redis/BUILD +++ b/source/extensions/health_checkers/redis/BUILD @@ -16,6 +16,7 @@ envoy_cc_library( hdrs = ["redis.h"], deps = [ "//source/common/upstream:health_checker_base_lib", + "//source/extensions/filters/network/common/redis:client_lib", "//source/extensions/filters/network/redis_proxy:conn_pool_lib", "@envoy_api//envoy/api/v2/core:health_check_cc", "@envoy_api//envoy/config/health_checker/redis/v2:redis_cc", diff --git a/source/extensions/health_checkers/redis/config.cc b/source/extensions/health_checkers/redis/config.cc index 7102b0b74f4c0..eeb0206374837 100644 --- a/source/extensions/health_checkers/redis/config.cc +++ b/source/extensions/health_checkers/redis/config.cc @@ -17,7 +17,7 @@ Upstream::HealthCheckerSharedPtr RedisHealthCheckerFactory::createCustomHealthCh return std::make_shared( context.cluster(), config, getRedisHealthCheckConfig(config), context.dispatcher(), context.runtime(), context.random(), context.eventLogger(), - NetworkFilters::RedisProxy::ConnPool::ClientFactoryImpl::instance_); + NetworkFilters::Common::Redis::Client::ClientFactoryImpl::instance_); }; /** diff --git a/source/extensions/health_checkers/redis/redis.cc b/source/extensions/health_checkers/redis/redis.cc index db3ea51b69cef..b6342316499ae 100644 --- a/source/extensions/health_checkers/redis/redis.cc +++ b/source/extensions/health_checkers/redis/redis.cc @@ -10,7 +10,7 @@ RedisHealthChecker::RedisHealthChecker( const envoy::config::health_checker::redis::v2::Redis& redis_config, Event::Dispatcher& dispatcher, Runtime::Loader& runtime, Runtime::RandomGenerator& random, Upstream::HealthCheckEventLoggerPtr&& event_logger, - Extensions::NetworkFilters::RedisProxy::ConnPool::ClientFactory& client_factory) + Extensions::NetworkFilters::Common::Redis::Client::ClientFactory& client_factory) : HealthCheckerImplBase(cluster, config, dispatcher, runtime, random, std::move(event_logger)), client_factory_(client_factory), key_(redis_config.key()) { if (!key_.empty()) { @@ -25,6 +25,12 @@ RedisHealthChecker::RedisActiveHealthCheckSession::RedisActiveHealthCheckSession : ActiveHealthCheckSession(parent, host), parent_(parent) {} RedisHealthChecker::RedisActiveHealthCheckSession::~RedisActiveHealthCheckSession() { + onDeferredDelete(); + ASSERT(current_request_ == nullptr); + ASSERT(client_ == nullptr); +} + +void RedisHealthChecker::RedisActiveHealthCheckSession::onDeferredDelete() { if (current_request_) { current_request_->cancel(); current_request_ = nullptr; @@ -65,12 +71,12 @@ void RedisHealthChecker::RedisActiveHealthCheckSession::onInterval() { } void RedisHealthChecker::RedisActiveHealthCheckSession::onResponse( - Extensions::NetworkFilters::RedisProxy::RespValuePtr&& value) { + NetworkFilters::Common::Redis::RespValuePtr&& value) { current_request_ = nullptr; switch (parent_.type_) { case Type::Exists: - if (value->type() == Extensions::NetworkFilters::RedisProxy::RespType::Integer && + if (value->type() == NetworkFilters::Common::Redis::RespType::Integer && value->asInteger() == 0) { handleSuccess(); } else { @@ -78,7 +84,7 @@ void RedisHealthChecker::RedisActiveHealthCheckSession::onResponse( } break; case Type::Ping: - if (value->type() == Extensions::NetworkFilters::RedisProxy::RespType::SimpleString && + if (value->type() == NetworkFilters::Common::Redis::RespType::SimpleString && value->asString() == "PONG") { handleSuccess(); } else { @@ -99,6 +105,14 @@ void RedisHealthChecker::RedisActiveHealthCheckSession::onFailure() { handleFailure(envoy::data::core::v2alpha::HealthCheckFailureType::NETWORK); } +bool RedisHealthChecker::RedisActiveHealthCheckSession::onRedirection( + const NetworkFilters::Common::Redis::RespValue&) { + // Treat any redirection error response from a Redis server as success. + current_request_ = nullptr; + handleSuccess(); + return true; +} + void RedisHealthChecker::RedisActiveHealthCheckSession::onTimeout() { current_request_->cancel(); current_request_ = nullptr; @@ -106,20 +120,20 @@ void RedisHealthChecker::RedisActiveHealthCheckSession::onTimeout() { } RedisHealthChecker::HealthCheckRequest::HealthCheckRequest(const std::string& key) { - std::vector values(2); - values[0].type(Extensions::NetworkFilters::RedisProxy::RespType::BulkString); + std::vector values(2); + values[0].type(NetworkFilters::Common::Redis::RespType::BulkString); values[0].asString() = "EXISTS"; - values[1].type(Extensions::NetworkFilters::RedisProxy::RespType::BulkString); + values[1].type(NetworkFilters::Common::Redis::RespType::BulkString); values[1].asString() = key; - request_.type(Extensions::NetworkFilters::RedisProxy::RespType::Array); + request_.type(NetworkFilters::Common::Redis::RespType::Array); request_.asArray().swap(values); } RedisHealthChecker::HealthCheckRequest::HealthCheckRequest() { - std::vector values(1); - values[0].type(Extensions::NetworkFilters::RedisProxy::RespType::BulkString); + std::vector values(1); + values[0].type(NetworkFilters::Common::Redis::RespType::BulkString); values[0].asString() = "PING"; - request_.type(Extensions::NetworkFilters::RedisProxy::RespType::Array); + request_.type(NetworkFilters::Common::Redis::RespType::Array); request_.asArray().swap(values); } diff --git a/source/extensions/health_checkers/redis/redis.h b/source/extensions/health_checkers/redis/redis.h index 5aa702e5665d5..f43aabe7622a6 100644 --- a/source/extensions/health_checkers/redis/redis.h +++ b/source/extensions/health_checkers/redis/redis.h @@ -1,9 +1,12 @@ #pragma once +#include + #include "envoy/config/health_checker/redis/v2/redis.pb.validate.h" #include "common/upstream/health_checker_base_impl.h" +#include "extensions/filters/network/common/redis/client_impl.h" #include "extensions/filters/network/redis_proxy/conn_pool_impl.h" namespace Envoy { @@ -21,14 +24,14 @@ class RedisHealthChecker : public Upstream::HealthCheckerImplBase { const envoy::config::health_checker::redis::v2::Redis& redis_config, Event::Dispatcher& dispatcher, Runtime::Loader& runtime, Runtime::RandomGenerator& random, Upstream::HealthCheckEventLoggerPtr&& event_logger, - Extensions::NetworkFilters::RedisProxy::ConnPool::ClientFactory& client_factory); + Extensions::NetworkFilters::Common::Redis::Client::ClientFactory& client_factory); - static const Extensions::NetworkFilters::RedisProxy::RespValue& pingHealthCheckRequest() { + static const NetworkFilters::Common::Redis::RespValue& pingHealthCheckRequest() { static HealthCheckRequest* request = new HealthCheckRequest(); return request->request_; } - static const Extensions::NetworkFilters::RedisProxy::RespValue& + static const NetworkFilters::Common::Redis::RespValue& existsHealthCheckRequest(const std::string& key) { static HealthCheckRequest* request = new HealthCheckRequest(key); return request->request_; @@ -42,26 +45,40 @@ class RedisHealthChecker : public Upstream::HealthCheckerImplBase { private: struct RedisActiveHealthCheckSession : public ActiveHealthCheckSession, - public Extensions::NetworkFilters::RedisProxy::ConnPool::Config, - public Extensions::NetworkFilters::RedisProxy::ConnPool::PoolCallbacks, + public Extensions::NetworkFilters::Common::Redis::Client::Config, + public Extensions::NetworkFilters::Common::Redis::Client::PoolCallbacks, public Network::ConnectionCallbacks { RedisActiveHealthCheckSession(RedisHealthChecker& parent, const Upstream::HostSharedPtr& host); ~RedisActiveHealthCheckSession(); + // ActiveHealthCheckSession void onInterval() override; void onTimeout() override; + void onDeferredDelete() final; - // Extensions::NetworkFilters::RedisProxy::ConnPool::Config + // Extensions::NetworkFilters::Common::Redis::Client::Config bool disableOutlierEvents() const override { return true; } std::chrono::milliseconds opTimeout() const override { // Allow the main Health Check infra to control timeout. return parent_.timeout_ * 2; } bool enableHashtagging() const override { return false; } + bool enableRedirection() const override { + return true; + } // Redirection errors are treated as check successes. + + // Batching + unsigned int maxBufferSizeBeforeFlush() const override { + return 0; + } // Forces an immediate flush + std::chrono::milliseconds bufferFlushTimeoutInMs() const override { + return std::chrono::milliseconds(1); + } - // Extensions::NetworkFilters::RedisProxy::ConnPool::PoolCallbacks - void onResponse(Extensions::NetworkFilters::RedisProxy::RespValuePtr&& value) override; + // Extensions::NetworkFilters::Common::Redis::Client::PoolCallbacks + void onResponse(NetworkFilters::Common::Redis::RespValuePtr&& value) override; void onFailure() override; + bool onRedirection(const NetworkFilters::Common::Redis::RespValue& value) override; // Network::ConnectionCallbacks void onEvent(Network::ConnectionEvent event) override; @@ -69,8 +86,8 @@ class RedisHealthChecker : public Upstream::HealthCheckerImplBase { void onBelowWriteBufferLowWatermark() override {} RedisHealthChecker& parent_; - Extensions::NetworkFilters::RedisProxy::ConnPool::ClientPtr client_; - Extensions::NetworkFilters::RedisProxy::ConnPool::PoolRequest* current_request_{}; + Extensions::NetworkFilters::Common::Redis::Client::ClientPtr client_; + Extensions::NetworkFilters::Common::Redis::Client::PoolRequest* current_request_{}; }; enum class Type { Ping, Exists }; @@ -79,7 +96,7 @@ class RedisHealthChecker : public Upstream::HealthCheckerImplBase { HealthCheckRequest(const std::string& key); HealthCheckRequest(); - Extensions::NetworkFilters::RedisProxy::RespValue request_; + NetworkFilters::Common::Redis::RespValue request_; }; typedef std::unique_ptr RedisActiveHealthCheckSessionPtr; @@ -89,7 +106,7 @@ class RedisHealthChecker : public Upstream::HealthCheckerImplBase { return std::make_unique(*this, host); } - Extensions::NetworkFilters::RedisProxy::ConnPool::ClientFactory& client_factory_; + Extensions::NetworkFilters::Common::Redis::Client::ClientFactory& client_factory_; Type type_; const std::string key_; }; diff --git a/source/extensions/quic_listeners/quiche/platform/BUILD b/source/extensions/quic_listeners/quiche/platform/BUILD index d163699de1b93..31fc97de3bb0a 100644 --- a/source/extensions/quic_listeners/quiche/platform/BUILD +++ b/source/extensions/quic_listeners/quiche/platform/BUILD @@ -3,8 +3,8 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", + "envoy_cc_test_library", "envoy_package", - "envoy_select_quiche", ) envoy_package() @@ -17,32 +17,82 @@ envoy_package() # TODO: add build target for quic_platform_impl_lib +envoy_cc_library( + name = "flags_impl_lib", + srcs = ["flags_impl.cc"], + hdrs = [ + "flags_impl.h", + "flags_list.h", + ], + external_deps = [ + "abseil_base", + "abseil_synchronization", + ], + visibility = ["//visibility:public"], +) + +envoy_cc_library( + name = "string_utils_lib", + srcs = ["string_utils.cc"], + hdrs = ["string_utils.h"], + external_deps = ["abseil_str_format"], + visibility = ["//visibility:private"], + deps = [ + "//source/common/common:assert_lib", + "//source/common/common:base64_lib", + ], +) + envoy_cc_library( name = "http2_platform_impl_lib", hdrs = [ "http2_arraysize_impl.h", + "http2_bug_tracker_impl.h", "http2_containers_impl.h", "http2_estimate_memory_usage_impl.h", "http2_export_impl.h", "http2_flag_utils_impl.h", + "http2_flags_impl.h", + "http2_logging_impl.h", "http2_macros_impl.h", "http2_optional_impl.h", "http2_ptr_util_impl.h", "http2_string_impl.h", "http2_string_piece_impl.h", + "http2_string_utils_impl.h", ], external_deps = [ "abseil_base", "abseil_optional", + "abseil_str_format", ], visibility = ["//visibility:public"], + deps = [ + ":flags_impl_lib", + ":quic_platform_logging_impl_lib", + ":string_utils_lib", + ], +) + +envoy_cc_library( + name = "quic_platform_export_impl_lib", + hdrs = ["quic_export_impl.h"], + visibility = ["//visibility:public"], +) + +envoy_cc_library( + name = "quic_platform_logging_impl_lib", + srcs = ["quic_logging_impl.cc"], + hdrs = [ + "quic_bug_tracker_impl.h", + "quic_logging_impl.h", + ], + visibility = ["//visibility:public"], + deps = ["//source/common/common:assert_lib"], ) envoy_cc_library( name = "quic_platform_base_impl_lib", - srcs = envoy_select_quiche([ - "quic_logging_impl.cc", - ]), hdrs = [ "quic_aligned_impl.h", "quic_arraysize_impl.h", @@ -50,16 +100,23 @@ envoy_cc_library( "quic_containers_impl.h", "quic_endian_impl.h", "quic_estimate_memory_usage_impl.h", - "quic_export_impl.h", "quic_fallthrough_impl.h", "quic_flag_utils_impl.h", + "quic_flags_impl.h", "quic_iovec_impl.h", + "quic_logging_impl.h", + "quic_map_util_impl.h", "quic_prefetch_impl.h", "quic_ptr_util_impl.h", - "quic_string_impl.h", + "quic_reference_counted_impl.h", + "quic_server_stats_impl.h", + "quic_stack_trace_impl.h", + "quic_str_cat_impl.h", + "quic_stream_buffer_allocator_impl.h", "quic_string_piece_impl.h", + "quic_text_utils_impl.h", "quic_uint128_impl.h", - ] + envoy_select_quiche(["quic_logging_impl.h"]), + ], external_deps = [ "abseil_base", "abseil_hash", @@ -67,48 +124,92 @@ envoy_cc_library( "abseil_memory", "abseil_node_hash_map", "abseil_node_hash_set", + "googletest", ], visibility = ["//visibility:public"], - deps = envoy_select_quiche([ + deps = [ + ":flags_impl_lib", + ":quic_platform_logging_impl_lib", + ":string_utils_lib", "//source/common/common:assert_lib", - ]), + "//source/common/common:byte_order_lib", + "//source/server:backtrace_lib", + "@com_googlesource_quiche//:quic_core_buffer_allocator_lib", + "@com_googlesource_quiche//:quic_platform_export", + ], ) envoy_cc_library( name = "quic_platform_impl_lib", + srcs = [ + "quic_cert_utils_impl.cc", + "quic_file_utils_impl.cc", + "quic_hostname_utils_impl.cc", + ], hdrs = [ + "quic_cert_utils_impl.h", + "quic_file_utils_impl.h", + "quic_hostname_utils_impl.h", "quic_mutex_impl.h", - "quic_str_cat_impl.h", + "quic_pcc_sender_impl.h", "quic_string_utils_impl.h", ], external_deps = [ "quiche_quic_platform_base", + "abseil_str_format", "abseil_synchronization", + "abseil_time", + "ssl", ], visibility = ["//visibility:public"], + deps = [ + "//source/common/common:assert_lib", + "//source/common/filesystem:directory_lib", + "//source/common/filesystem:filesystem_lib", + "//source/common/http:utility_lib", + "@com_googlesource_quiche//:quic_core_types_lib", + ], ) envoy_cc_library( name = "spdy_platform_impl_lib", hdrs = [ "spdy_arraysize_impl.h", + "spdy_bug_tracker_impl.h", "spdy_containers_impl.h", "spdy_endianness_util_impl.h", "spdy_estimate_memory_usage_impl.h", "spdy_export_impl.h", + "spdy_flags_impl.h", + "spdy_logging_impl.h", "spdy_macros_impl.h", + "spdy_mem_slice_impl.h", "spdy_ptr_util_impl.h", "spdy_string_impl.h", "spdy_string_piece_impl.h", + "spdy_string_utils_impl.h", "spdy_test_helpers_impl.h", "spdy_test_utils_prod_impl.h", - "spdy_unsafe_arena_impl.h", ], external_deps = [ "abseil_base", "abseil_hash", "abseil_inlined_vector", "abseil_memory", + "abseil_str_format", ], visibility = ["//visibility:public"], + deps = [ + ":flags_impl_lib", + ":quic_platform_logging_impl_lib", + ":string_utils_lib", + "//source/common/common:assert_lib", + ], +) + +envoy_cc_library( + name = "spdy_platform_unsafe_arena_impl_lib", + hdrs = ["spdy_unsafe_arena_impl.h"], + visibility = ["//visibility:public"], + deps = ["@com_googlesource_quiche//:spdy_simple_arena_lib"], ) diff --git a/source/extensions/quic_listeners/quiche/platform/flags_impl.cc b/source/extensions/quic_listeners/quiche/platform/flags_impl.cc new file mode 100644 index 0000000000000..70fb182d673d0 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/flags_impl.cc @@ -0,0 +1,102 @@ +// NOLINT(namespace-envoy) + +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include "extensions/quic_listeners/quiche/platform/flags_impl.h" + +#include + +#include "absl/strings/ascii.h" +#include "absl/strings/numbers.h" + +namespace quiche { + +namespace { + +absl::flat_hash_map MakeFlagMap() { + absl::flat_hash_map flags; + +#define QUICHE_FLAG(type, flag, value, help) flags.emplace(FLAGS_##flag->name(), FLAGS_##flag); +#include "extensions/quic_listeners/quiche/platform/flags_list.h" +#undef QUICHE_FLAG + + return flags; +} + +} // namespace + +// static +FlagRegistry& FlagRegistry::GetInstance() { + static auto* instance = new FlagRegistry(); + return *instance; +} + +FlagRegistry::FlagRegistry() : flags_(MakeFlagMap()) {} + +void FlagRegistry::ResetFlags() const { + for (auto& kv : flags_) { + kv.second->ResetValue(); + } +} + +Flag* FlagRegistry::FindFlag(const std::string& name) const { + auto it = flags_.find(name); + return (it != flags_.end()) ? it->second : nullptr; +} + +template <> bool TypedFlag::SetValueFromString(const std::string& value_str) { + static const auto* kTrueValues = new std::set({"1", "t", "true", "y", "yes"}); + static const auto* kFalseValues = new std::set({"0", "f", "false", "n", "no"}); + auto lower = absl::AsciiStrToLower(value_str); + if (kTrueValues->find(lower) != kTrueValues->end()) { + SetValue(true); + return true; + } + if (kFalseValues->find(lower) != kFalseValues->end()) { + SetValue(false); + return true; + } + return false; +} + +template <> bool TypedFlag::SetValueFromString(const std::string& value_str) { + int32_t value; + if (absl::SimpleAtoi(value_str, &value)) { + SetValue(value); + return true; + } + return false; +} + +template <> bool TypedFlag::SetValueFromString(const std::string& value_str) { + int64_t value; + if (absl::SimpleAtoi(value_str, &value)) { + SetValue(value); + return true; + } + return false; +} + +template <> bool TypedFlag::SetValueFromString(const std::string& value_str) { + double value; + if (absl::SimpleAtod(value_str, &value)) { + SetValue(value); + return true; + } + return false; +} + +template <> bool TypedFlag::SetValueFromString(const std::string& value_str) { + SetValue(value_str); + return true; +} + +// Flag definitions +#define QUICHE_FLAG(type, flag, value, help) \ + TypedFlag* FLAGS_##flag = new TypedFlag(#flag, value, help); +#include "extensions/quic_listeners/quiche/platform/flags_list.h" +#undef QUICHE_FLAG + +} // namespace quiche diff --git a/source/extensions/quic_listeners/quiche/platform/flags_impl.h b/source/extensions/quic_listeners/quiche/platform/flags_impl.h new file mode 100644 index 0000000000000..22aca0ef995f7 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/flags_impl.h @@ -0,0 +1,110 @@ +#pragma once + +// NOLINT(namespace-envoy) + +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include + +#include "absl/container/flat_hash_map.h" +#include "absl/synchronization/mutex.h" + +namespace quiche { + +class Flag; + +// TODO: modify flags implementation to be backed by +// Runtime::runtimeFeatureEnabled(), which is the canonical Envoy way of +// enabling and disabling features. + +// Registry of QUICHE flags. Can be used to reset all flags to default values, +// and to look up and set flags by name. +class FlagRegistry { +public: + ~FlagRegistry() = default; + + // Return singleton instance. + static FlagRegistry& GetInstance(); + + // Reset all registered flags to their default values. + void ResetFlags() const; + + // Look up a flag by name. + Flag* FindFlag(const std::string& name) const; + +private: + FlagRegistry(); + + const absl::flat_hash_map flags_; +}; + +// Abstract class for QUICHE protocol and feature flags. +class Flag { +public: + // Construct Flag with the given name and help string. + Flag(const char* name, const char* help) : name_(name), help_(help) {} + virtual ~Flag() = default; + + // Set flag value from given string, returning true iff successful. + virtual bool SetValueFromString(const std::string& value_str) = 0; + + // Reset flag to default value. + virtual void ResetValue() = 0; + + // Return flag name. + std::string name() const { return name_; } + + // Return flag help string. + std::string help() const { return help_; } + +private: + std::string name_; + std::string help_; +}; + +// Concrete class for QUICHE protocol and feature flags, templated by flag type. +template class TypedFlag : public Flag { +public: + TypedFlag(const char* name, T default_value, const char* help) + : Flag(name, help), value_(default_value), default_value_(default_value) {} + + bool SetValueFromString(const std::string& value_str) override; + + void ResetValue() override { + absl::MutexLock lock(&mutex_); + value_ = default_value_; + } + + // Set flag value. + void SetValue(T value) { + absl::MutexLock lock(&mutex_); + value_ = value; + } + + // Return flag value. + T value() const { + absl::MutexLock lock(&mutex_); + return value_; + } + +private: + mutable absl::Mutex mutex_; + T value_ GUARDED_BY(mutex_); + T default_value_; +}; + +// SetValueFromString specializations +template <> bool TypedFlag::SetValueFromString(const std::string& value_str); +template <> bool TypedFlag::SetValueFromString(const std::string& value_str); +template <> bool TypedFlag::SetValueFromString(const std::string& value_str); +template <> bool TypedFlag::SetValueFromString(const std::string& value_str); +template <> bool TypedFlag::SetValueFromString(const std::string& value_str); + +// Flag declarations +#define QUICHE_FLAG(type, flag, value, help) extern TypedFlag* FLAGS_##flag; +#include "extensions/quic_listeners/quiche/platform/flags_list.h" +#undef QUICHE_FLAG + +} // namespace quiche diff --git a/source/extensions/quic_listeners/quiche/platform/flags_list.h b/source/extensions/quic_listeners/quiche/platform/flags_list.h new file mode 100644 index 0000000000000..2c99774b557cf --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/flags_list.h @@ -0,0 +1,414 @@ +// This file intentionally does not have header guards, it's intended to be +// included multiple times, each time with a different definition of QUIC_FLAG. + +// NOLINT(namespace-envoy) + +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +// The contents of this file are based off of +// //third_party/quic/core:quic_flags_list in google3, with the addition of +// testonly flags for testing http2 and spdy flags APIs. +// TODO(mpwarres): include generated flags_list.h as part of QUICHE. + +#if defined(QUICHE_FLAG) + +QUICHE_FLAG(bool, quic_reloadable_flag_advertise_quic_for_https_for_debugips, false, "") + +QUICHE_FLAG(bool, quic_reloadable_flag_advertise_quic_for_https_for_external_users, false, "") + +QUICHE_FLAG(bool, quic_reloadable_flag_enable_quic_stateless_reject_support, true, + "Enables server-side support for QUIC stateless rejects.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_allow_backend_set_stream_ttl, false, + "If true, check backend response header for X-Response-Ttl. If it is " + "provided, the stream TTL is set. A QUIC stream will be immediately " + "canceled when tries to write data if this TTL expired.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_alpn_dispatch, false, + "Support different QUIC sessions, as indicated by ALPN. Used for QBONE.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_always_reset_short_header_packets, true, + "If true, instead of send encryption none termination packets, send " + "stateless reset in response to short headers.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_app_limited_recovery, false, + "When you're app-limited entering recovery, stay app-limited until " + "you exit recovery in QUIC BBR.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_flexible_app_limited, false, + "When true and the BBR9 connection option is present, BBR only considers " + "bandwidth samples app-limited if they're not filling the pipe.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_less_probe_rtt, false, + "Enables 3 new connection options to make PROBE_RTT more aggressive.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_no_bytes_acked_in_startup_recovery, false, + "When in STARTUP and recovery, do not add bytes_acked to QUIC BBR's " + "CWND in CalculateCongestionWindow()") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_one_mss_conservation, false, + "When true, ensure BBR allows at least one MSS to be sent in " + "response to an ACK in packet conservation.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_slower_startup3, false, + "Add 3 connection options to decrease the pacing and CWND gain in " + "QUIC BBR STARTUP.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_slower_startup4, false, + "Enables the BBQ5 connection option, which forces saved aggregation values " + "to expire when the bandwidth increases more than 25% in QUIC BBR STARTUP.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_startup_rate_reduction, false, + "When true, enables the BBS4 and BBS5 connection options, which reduce " + "BBR's pacing rate in STARTUP as more losses occur as a fraction of CWND.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_debug_wrong_qos, false, + "If true, consider getting QoS after stream has been detached as GFE bug.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_default_to_bbr, true, + "When true, defaults to BBR congestion control instead of Cubic.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_deprecate_ack_bundling_mode, false, + "If true, stop using AckBundling mode to send ACK, also deprecate " + "ack_queued from QuicConnection.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_connection_migration_for_udp_proxying, true, + "If true, GFE disables connection migration in connection option for " + "proxied sessions.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_39, false, + "If true, disable QUIC version 39.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_donot_reset_ideal_next_packet_send_time, false, + "If true, stop resetting ideal_next_packet_send_time_ in pacing sender.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_eighth_rtt_loss_detection, false, + "When true, the LOSS connection option allows for 1/8 RTT of " + "reording instead of the current 1/8th threshold which has been " + "found to be too large for fast loss recovery.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_ack_decimation, false, + "Default enables QUIC ack decimation and adds a connection option to " + "disable it.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_pcc3, false, + "If true, enable experiment for testing PCC congestion-control.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_43, true, + "If true, enable QUIC version 43.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_44, true, + "If true, enable version 44 which uses IETF header format.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_46, true, + "If true, enable QUIC version 46.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_47, false, + "If true, enable QUIC version 47 which adds support for variable " + "length connection IDs.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_99, false, "If true, enable version 99.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_enabled, false, "") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_faster_interval_add_in_sequence_buffer, false, + "If true, QuicStreamSequencerBuffer will switch to a new " + "QuicIntervalSet::AddOptimizedForAppend method in OnStreamData().") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_adaptive_time_loss, false, + "Simplify QUIC's adaptive time loss detection to measure the " + "necessary reordering window for every spurious retransmit.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_has_pending_crypto_data, false, + "If true, QuicSession::HasPendingCryptoData checks whether the " + "crypto stream's send buffer is empty. This flag fixes a bug where " + "the retransmission alarm mode is wrong for the first CHLO packet.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_spurious_ack_alarm, false, + "If true, do not schedule ack alarm if should_send_ack is set in the " + "generator.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_termination_packets, false, + "If true, GFE time wait list will send termination packets based on " + "current packet's encryption level.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_time_of_first_packet_sent_after_receiving, false, + "When true, fix initialization and updating of " + "|time_of_first_packet_sent_after_receiving_| in QuicConnection.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_limit_window_updates_in_traces, false, + "Limits the number of window update events recorded in Tracegraf logs.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_listener_never_fake_epollout, false, + "If true, QuicListener::OnSocketIsWritable will always return false, " + "which means there will never be a fake EPOLLOUT event in the next " + "epoll iteration.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_log_cert_name_for_empty_sct, true, + "If true, log leaf cert subject name into warning log.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_log_is_proxy_in_tcs, false, + "If true, log whether a GFE QUIC server session is UDP proxied and whether " + "it is a health check connection, in transport connection stats.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_logging_frames_in_tracegraf, false, + "If true, populate frames info when logging tracegraf.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_monotonic_epoll_clock, false, + "If true, QuicEpollClock::Now() will monotonically increase.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_no_client_conn_ver_negotiation, false, + "If true, a client connection would be closed when a version " + "negotiation packet is received. It would be the higher layer's " + "responsibility to do the reconnection.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_no_cloud_domain_sni_lookup_on_missing_sni, false, + "Do not attempt to match an empty Server Name Indication (SNI) " + "against names extracted from Cloud customer certificates.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_no_dup_experiment_id_2, false, + "If true, transport connection stats doesn't report duplicated " + "experiments for same connection.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_no_goaway_for_proxied_port_change, false, + "If true, for proxied quic sessions, GFE will not send a GOAWAY in " + "response to a client port change.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_no_v2_scaling_factor, false, + "When true, don't use an extra scaling factor when reading packets " + "from QUIC's RX_RING with TPACKET_V2.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_optimize_inflight_check, false, + "Stop checking QuicUnackedPacketMap::HasUnackedRetransmittableFrames " + "and instead rely on the existing check that bytes_in_flight > 0") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_print_tag_hex, false, + "When true, non-ASCII QUIC tags are printed as hex instead of integers.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_proxy_check_toss_on_insertion_failure, false, + "If true, enable the code that fixes a race condition for quic udp " + "proxying in L0. See b/70036019.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_proxy_munge_response_for_healthcheck, true, + "If true, for udp proxy, the health check packets from L1 to L0 will " + "be munged.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_proxy_read_packed_strings, true, + "If true, QuicProxyDispatcher will prefer to extract client_address " + "and server_vip from packed_client_address and packed_server_vip, " + "respectively.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_proxy_write_packed_strings, false, + "If true, QuicProxyDispatcher will write packed_client_address and " + "packed_server_vip in TcpProxyHeaderProto.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_record_frontend_service_vip_mapping, false, + "If true, for L1 GFE, as requests come in, record frontend service to VIP " + "mapping which is used to announce VIP in SHLO for proxied sessions. ") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_reject_all_traffic, false, "") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_require_handshake_confirmation, false, + "If true, require handshake confirmation for QUIC connections, " + "functionally disabling 0-rtt handshakes.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_rpm_decides_when_to_send_acks, false, + "If both this flag and " + "gfe2_reloadable_flag_quic_deprecate_ack_bundling_mode are true, " + "QuicReceivedPacketManager decides when to send ACKs.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_send_timestamps, false, + "When the STMP connection option is sent by the client, timestamps " + "in the QUIC ACK frame are sent and processed.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_server_push, true, + "If true, enable server push feature on QUIC.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_set_transmission_type_for_next_frame, true, + "If true, QuicPacketCreator::SetTransmissionType will set the " + "transmission type of the next successfully added frame.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_simplify_build_connectivity_probing_packet, true, + "If true, simplifies the implementation of " + "QuicFramer::BuildConnectivityProbingPacket().") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_stateless_proxy, false, + "If true, UDP proxy will not drop versionless packets, in other " + "words, it will proxy all packets from client.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_stop_reading_when_level_triggered, false, + "When true, calling StopReading() on a level-triggered QUIC stream " + "sequencer will cause the sequencer to discard future data.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_testonly_default_false, false, + "A testonly reloadable flag that will always default to false.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_testonly_default_true, true, + "A testonly reloadable flag that will always default to true.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_tolerate_reneging, false, + "If true, do not close connection if received largest acked decreases.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_unified_iw_options, false, + "When true, set the initial congestion control window from connection " + "options in QuicSentPacketManager rather than TcpCubicSenderBytes.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_cheap_stateless_rejects, true, + "If true, QUIC will use cheap stateless rejects without creating a full " + "connection. Prerequisite: --quic_allow_chlo_buffering has to be true.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_common_stream_check, false, + "If true, use common code for checking whether a new stream ID may " + "be allocated.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_header_stage_idle_list2, false, + "If true, use header stage idle list for QUIC connections in GFE.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_leto_key_exchange, false, + "If true, QUIC will attempt to use the Leto key exchange service and " + "only fall back to local key exchange if that fails.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_new_append_connection_id, false, + "When true QuicFramer will use AppendIetfConnectionIdsNew instead of " + "AppendIetfConnectionId.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_pigeon_sockets, false, + "Use USPS Direct Path for QUIC egress.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_quic_time_for_received_timestamp, false, + "If true, use QuicClock::Now() for the fallback source of packet " + "received time instead of WallNow().") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_uber_loss_algorithm, false, + "If true, use one loss algorithm per encryption level.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_uber_received_packet_manager, false, + "If this flag and quic_rpm_decides_when_to_send_acks is true, use uber " + "received packet manager instead of the single received packet manager.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_validate_packet_number_post_decryption, false, + "If true, a QUIC endpoint will valid a received packet number after " + "successfully decrypting the packet.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_v44_disable_trial_decryption, false, + "Disables trial decryption in QUIC v44 and above.") + +QUICHE_FLAG(bool, quic_restart_flag_quic_allow_loas_multipacket_chlo, false, + "If true, inspects QUIC CHLOs for kLOAS and early creates sessions " + "to allow multi-packet CHLOs") + +QUICHE_FLAG(bool, quic_restart_flag_quic_enable_accept_random_ipn, false, + "Allow QUIC to accept initial packet numbers that are random, not 1.") + +QUICHE_FLAG(bool, quic_restart_flag_quic_enable_gso_for_udp_egress, false, + "If 1) flag is true, 2) UDP egress_method is used in GFE config, and " + "3) UDP GSO is supported by the kernel, GFE will use UDP GSO for " + "egress, except for UDP proxy.") + +QUICHE_FLAG(bool, quic_restart_flag_quic_enable_sendmmsg_for_udp_egress, false, + "If 1) flag is true, 2) UDP egress_method is used in GFE config, and " + "3) --gfe2_restart_flag_quic_enable_gso_for_udp_egress is false OR " + "gso is not supported by kernel, GFE will use sendmmsg for egress, " + "except for UDP proxy.") + +QUICHE_FLAG(bool, quic_restart_flag_quic_no_server_conn_ver_negotiation2, false, + "If true, dispatcher passes in a single version when creating a server " + "connection, such that version negotiation is not supported in connection.") + +QUICHE_FLAG(bool, quic_restart_flag_quic_offload_pacing_to_usps2, false, + "If true, QUIC offload pacing when using USPS as egress method.") + +QUICHE_FLAG(bool, quic_restart_flag_quic_pigeon_use_memfd_packet_memory, false, + "If true, GFE QUIC will forcefully use memfd to create packet memory " + "for pigeon socket. Otherwise memfd is used if " + "--pigeon_sealable_files_enabled is true.") + +QUICHE_FLAG(bool, quic_restart_flag_quic_rx_ring_use_tpacket_v3, false, + "If true, use TPACKET_V3 for QuicRxRing instead of TPACKET_V2.") + +QUICHE_FLAG(bool, quic_restart_flag_quic_server_handle_egress_epoll_err, false, + "If true, handle EPOLLERRs from QUIC server egress sockets.") + +QUICHE_FLAG(bool, quic_restart_flag_quic_testonly_default_false, false, + "A testonly restart flag that will always default to false.") + +QUICHE_FLAG(bool, quic_restart_flag_quic_testonly_default_true, true, + "A testonly restart flag that will always default to true.") + +QUICHE_FLAG(bool, quic_restart_flag_quic_use_leto_for_quic_configs, false, + "If true, use Leto to fetch QUIC server configs instead of using the " + "seeds from Memento.") + +QUICHE_FLAG(bool, quic_restart_flag_quic_use_pigeon_socket_to_backend, false, + "If true, create a shared pigeon socket for all quic to backend " + "connections and switch to use it after successful handshake.") + +QUICHE_FLAG(bool, quic_allow_chlo_buffering, true, + "If true, allows packets to be buffered in anticipation of a " + "future CHLO, and allow CHLO packets to be buffered until next " + "iteration of the event loop.") + +QUICHE_FLAG(bool, quic_disable_pacing_for_perf_tests, false, "If true, disable pacing in QUIC") + +QUICHE_FLAG(bool, quic_enforce_single_packet_chlo, true, + "If true, enforce that QUIC CHLOs fit in one packet") + +// Currently, this number is quite conservative. At a hypothetical 1000 qps, +// this means that the longest time-wait list we should see is: +// 200 seconds * 1000 qps = 200000. +// Of course, there are usually many queries per QUIC connection, so we allow a +// factor of 3 leeway. +QUICHE_FLAG(int64_t, quic_time_wait_list_max_connections, 600000, + "Maximum number of connections on the time-wait list. " + "A negative value implies no configured limit.") + +QUICHE_FLAG(int64_t, quic_time_wait_list_seconds, 200, + "Time period for which a given connection_id should live in " + "the time-wait state.") + +QUICHE_FLAG(double, quic_bbr_cwnd_gain, 2.0f, + "Congestion window gain for QUIC BBR during PROBE_BW phase.") + +QUICHE_FLAG(int32_t, quic_buffered_data_threshold, 8 * 1024, + "If buffered data in QUIC stream is less than this " + "threshold, buffers all provided data or asks upper layer for more data") + +QUICHE_FLAG(int32_t, quic_ietf_draft_version, 0, + "Mechanism to override version label and ALPN for IETF interop.") + +QUICHE_FLAG(int32_t, quic_send_buffer_max_data_slice_size, 4 * 1024, + "Max size of data slice in bytes for QUIC stream send buffer.") + +QUICHE_FLAG(bool, quic_supports_tls_handshake, false, + "If true, QUIC supports both QUIC Crypto and TLS 1.3 for the " + "handshake protocol") + +QUICHE_FLAG(int32_t, quic_lumpy_pacing_size, 1, + "Number of packets that the pacing sender allows in bursts during pacing.") + +QUICHE_FLAG(double, quic_lumpy_pacing_cwnd_fraction, 0.25f, + "Congestion window fraction that the pacing sender allows in bursts " + "during pacing.") + +QUICHE_FLAG(int32_t, quic_max_pace_time_into_future_ms, 10, + "Max time that QUIC can pace packets into the future in ms.") + +QUICHE_FLAG(double, quic_pace_time_into_future_srtt_fraction, + 0.125f, // One-eighth smoothed RTT + "Smoothed RTT fraction that a connection can pace packets into the future.") + +QUICHE_FLAG(bool, http2_reloadable_flag_http2_testonly_default_false, false, + "A testonly reloadable flag that will always default to false.") + +QUICHE_FLAG(bool, http2_restart_flag_http2_testonly_default_false, false, + "A testonly restart flag that will always default to false.") + +QUICHE_FLAG(bool, spdy_reloadable_flag_spdy_testonly_default_false, false, + "A testonly reloadable flag that will always default to false.") + +QUICHE_FLAG(bool, spdy_restart_flag_spdy_testonly_default_false, false, + "A testonly restart flag that will always default to false.") + +#endif diff --git a/source/extensions/quic_listeners/quiche/platform/http2_bug_tracker_impl.h b/source/extensions/quic_listeners/quiche/platform/http2_bug_tracker_impl.h new file mode 100644 index 0000000000000..58c7039d536bb --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/http2_bug_tracker_impl.h @@ -0,0 +1,13 @@ +#pragma once + +// NOLINT(namespace-envoy) +// +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include "extensions/quic_listeners/quiche/platform/quic_bug_tracker_impl.h" + +#define HTTP2_BUG_IMPL QUIC_BUG_IMPL +#define HTTP2_BUG_IF_IMPL QUIC_BUG_IF_IMPL +#define FLAGS_http2_always_log_bugs_for_tests_IMPL true diff --git a/source/extensions/quic_listeners/quiche/platform/http2_flags_impl.h b/source/extensions/quic_listeners/quiche/platform/http2_flags_impl.h new file mode 100644 index 0000000000000..f1b53f4067f42 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/http2_flags_impl.h @@ -0,0 +1,14 @@ +#pragma once + +// NOLINT(namespace-envoy) + +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include "extensions/quic_listeners/quiche/platform/flags_impl.h" + +#define GetHttp2ReloadableFlagImpl(flag) quiche::FLAGS_http2_reloadable_flag_##flag->value() + +#define SetHttp2ReloadableFlagImpl(flag, value) \ + quiche::FLAGS_http2_reloadable_flag_##flag->SetValue(value) diff --git a/source/extensions/quic_listeners/quiche/platform/http2_logging_impl.h b/source/extensions/quic_listeners/quiche/platform/http2_logging_impl.h new file mode 100644 index 0000000000000..473c2d00d4bd4 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/http2_logging_impl.h @@ -0,0 +1,23 @@ +#pragma once + +// NOLINT(namespace-envoy) +// +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include "extensions/quic_listeners/quiche/platform/quic_logging_impl.h" + +#define HTTP2_LOG_IMPL(severity) QUIC_LOG_IMPL(severity) + +#define HTTP2_VLOG_IMPL(verbose_level) QUIC_VLOG_IMPL(verbose_level) + +#define HTTP2_DLOG_IMPL(severity) QUIC_DLOG_IMPL(severity) + +#define HTTP2_DLOG_IF_IMPL(severity, condition) QUIC_DLOG_IF_IMPL(severity, condition) + +#define HTTP2_DVLOG_IMPL(verbose_level) QUIC_DVLOG_IMPL(verbose_level) + +#define HTTP2_DVLOG_IF_IMPL(verbose_level, condition) QUIC_DVLOG_IF_IMPL(verbose_level, condition) + +#define HTTP2_DLOG_EVERY_N_IMPL(severity, n) QUIC_DLOG_EVERY_N_IMPL(severity, n) diff --git a/source/extensions/quic_listeners/quiche/platform/http2_macros_impl.h b/source/extensions/quic_listeners/quiche/platform/http2_macros_impl.h index 3d7df5563e9f1..4f99f2d2f42b8 100644 --- a/source/extensions/quic_listeners/quiche/platform/http2_macros_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/http2_macros_impl.h @@ -1,15 +1,26 @@ #pragma once -#include "absl/base/macros.h" - // NOLINT(namespace-envoy) // This file is part of the QUICHE platform implementation, and is not to be // consumed or referenced directly by other Envoy code. It serves purely as a // porting layer for QUICHE. +#include + +#include "extensions/quic_listeners/quiche/platform/quic_logging_impl.h" + +#include "absl/base/macros.h" + #define HTTP2_FALLTHROUGH_IMPL ABSL_FALLTHROUGH_INTENDED -#define HTTP2_DIE_IF_NULL_IMPL(ptr) ABSL_DIE_IF_NULL(ptr) +#define HTTP2_DIE_IF_NULL_IMPL(ptr) dieIfNull(ptr) +#define HTTP2_UNREACHABLE_IMPL() DCHECK(false) + +namespace http2 { + +template inline T dieIfNull(T&& ptr) { + CHECK((ptr) != nullptr); + return std::forward(ptr); +} -// TODO: implement -#define HTTP2_UNREACHABLE_IMPL() 0 +} // namespace http2 diff --git a/source/extensions/quic_listeners/quiche/platform/http2_string_utils_impl.h b/source/extensions/quic_listeners/quiche/platform/http2_string_utils_impl.h new file mode 100644 index 0000000000000..d4df004f4c51f --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/http2_string_utils_impl.h @@ -0,0 +1,47 @@ +#pragma once + +// NOLINT(namespace-envoy) +// +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include "extensions/quic_listeners/quiche/platform/string_utils.h" + +#include "absl/strings/escaping.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/str_format.h" +#include "fmt/printf.h" + +namespace http2 { + +template inline std::string Http2StrCatImpl(const Args&... args) { + return absl::StrCat(std::forward(args)...); +} + +template +inline void Http2StrAppendImpl(std::string* output, const Args&... args) { + absl::StrAppend(output, std::forward(args)...); +} + +template inline std::string Http2StringPrintfImpl(const Args&... args) { + return fmt::sprintf(std::forward(args)...); +} + +inline std::string Http2HexEncodeImpl(const void* bytes, size_t size) { + return absl::BytesToHexString(absl::string_view(static_cast(bytes), size)); +} + +inline std::string Http2HexDecodeImpl(absl::string_view data) { + return absl::HexStringToBytes(data); +} + +inline std::string Http2HexDumpImpl(absl::string_view data) { return quiche::HexDump(data); } + +inline std::string Http2HexEscapeImpl(absl::string_view data) { return absl::CHexEscape(data); } + +template inline std::string Http2HexImpl(Number number) { + return absl::StrCat(absl::Hex(number)); +} + +} // namespace http2 diff --git a/source/extensions/quic_listeners/quiche/platform/quic_bug_tracker_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_bug_tracker_impl.h new file mode 100644 index 0000000000000..050bd385d8818 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/quic_bug_tracker_impl.h @@ -0,0 +1,16 @@ +#pragma once + +// NOLINT(namespace-envoy) +// +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include "extensions/quic_listeners/quiche/platform/quic_logging_impl.h" + +// TODO(wub): Implement exponential back off to avoid performance problems due +// to excessive QUIC_BUG. +#define QUIC_BUG_IMPL QUIC_LOG_IMPL(DFATAL) +#define QUIC_BUG_IF_IMPL(condition) QUIC_LOG_IF_IMPL(DFATAL, condition) +#define QUIC_PEER_BUG_IMPL QUIC_LOG_IMPL(ERROR) +#define QUIC_PEER_BUG_IF_IMPL(condition) QUIC_LOG_IF_IMPL(ERROR, condition) diff --git a/source/extensions/quic_listeners/quiche/platform/quic_cert_utils_impl.cc b/source/extensions/quic_listeners/quiche/platform/quic_cert_utils_impl.cc new file mode 100644 index 0000000000000..c9b347d57dea6 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/quic_cert_utils_impl.cc @@ -0,0 +1,68 @@ +// NOLINT(namespace-envoy) + +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include "extensions/quic_listeners/quiche/platform/quic_cert_utils_impl.h" + +#include "openssl/bytestring.h" + +namespace quic { + +// static +bool QuicCertUtilsImpl::ExtractSubjectNameFromDERCert(QuicStringPiece cert, + QuicStringPiece* subject_out) { + CBS tbs_certificate; + if (!SeekToSubject(cert, &tbs_certificate)) { + return false; + } + + CBS subject; + if (!CBS_get_asn1_element(&tbs_certificate, &subject, CBS_ASN1_SEQUENCE)) { + return false; + } + *subject_out = + absl::string_view(reinterpret_cast(CBS_data(&subject)), CBS_len(&subject)); + return true; +} + +// static +bool QuicCertUtilsImpl::SeekToSubject(QuicStringPiece cert, CBS* tbs_certificate) { + CBS der; + CBS_init(&der, reinterpret_cast(cert.data()), cert.size()); + CBS certificate; + // From RFC 5280, section 4.1 + // Certificate ::= SEQUENCE { + // tbsCertificate TBSCertificate, + // signatureAlgorithm AlgorithmIdentifier, + // signatureValue BIT STRING } + + // TBSCertificate ::= SEQUENCE { + // version [0] EXPLICIT Version DEFAULT v1, + // serialNumber CertificateSerialNumber, + // signature AlgorithmIdentifier, + // issuer Name, + // validity Validity, + // subject Name, + // subjectPublicKeyInfo SubjectPublicKeyInfo, + if (!CBS_get_asn1(&der, &certificate, CBS_ASN1_SEQUENCE) || + CBS_len(&der) != 0 || // We don't allow junk after the certificate. + !CBS_get_asn1(&certificate, tbs_certificate, CBS_ASN1_SEQUENCE) || + // version. + !CBS_get_optional_asn1(tbs_certificate, nullptr, nullptr, + CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 0) || + // Serial number. + !CBS_get_asn1(tbs_certificate, nullptr, CBS_ASN1_INTEGER) || + // Signature. + !CBS_get_asn1(tbs_certificate, nullptr, CBS_ASN1_SEQUENCE) || + // Issuer. + !CBS_get_asn1(tbs_certificate, nullptr, CBS_ASN1_SEQUENCE) || + // Validity. + !CBS_get_asn1(tbs_certificate, nullptr, CBS_ASN1_SEQUENCE)) { + return false; + } + return true; +} + +} // namespace quic diff --git a/source/extensions/quic_listeners/quiche/platform/quic_cert_utils_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_cert_utils_impl.h new file mode 100644 index 0000000000000..fea2a75ad7698 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/quic_cert_utils_impl.h @@ -0,0 +1,22 @@ +#pragma once + +// NOLINT(namespace-envoy) + +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include "openssl/base.h" +#include "quiche/quic/platform/api/quic_string_piece.h" + +namespace quic { + +class QuicCertUtilsImpl { +public: + static bool ExtractSubjectNameFromDERCert(QuicStringPiece cert, QuicStringPiece* subject_out); + +private: + static bool SeekToSubject(QuicStringPiece cert, CBS* tbs_certificate); +}; + +} // namespace quic diff --git a/source/extensions/quic_listeners/quiche/platform/quic_endian_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_endian_impl.h index 02e5356830bf0..c456da321fa73 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_endian_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/quic_endian_impl.h @@ -1,31 +1,28 @@ #pragma once -#include - -#include - // NOLINT(namespace-envoy) // This file is part of the QUICHE platform implementation, and is not to be // consumed or referenced directly by other Envoy code. It serves purely as a // porting layer for QUICHE. +#include + +#include "common/common/byte_order.h" + namespace quic { class QuicEndianImpl { public: - static uint16_t HostToNet16(uint16_t x) { return htons(x); } - static uint32_t HostToNet32(uint32_t x) { return htonl(x); } - // TODO: implement - static uint64_t HostToNet64(uint64_t /*x*/) { return 0; } - - static uint16_t NetToHost16(uint16_t x) { return ntohs(x); } - static uint32_t NetToHost32(uint32_t x) { return ntohl(x); } - // TODO: implement - static uint64_t NetToHost64(uint64_t /*x*/) { return 0; } - - // TODO: implement - static bool HostIsLittleEndian() { return false; } + static uint16_t HostToNet16(uint16_t x) { return toEndianness(x); } + static uint32_t HostToNet32(uint32_t x) { return toEndianness(x); } + static uint64_t HostToNet64(uint64_t x) { return toEndianness(x); } + + static uint16_t NetToHost16(uint16_t x) { return fromEndianness(x); } + static uint32_t NetToHost32(uint32_t x) { return fromEndianness(x); } + static uint64_t NetToHost64(uint64_t x) { return fromEndianness(x); } + + static bool HostIsLittleEndian() { return NetToHost16(0x1234) != 0x1234; } }; } // namespace quic diff --git a/source/extensions/quic_listeners/quiche/platform/quic_file_utils_impl.cc b/source/extensions/quic_listeners/quiche/platform/quic_file_utils_impl.cc new file mode 100644 index 0000000000000..00aaeef0161cb --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/quic_file_utils_impl.cc @@ -0,0 +1,55 @@ +// NOLINT(namespace-envoy) + +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include "extensions/quic_listeners/quiche/platform/quic_file_utils_impl.h" + +#include "common/filesystem/directory.h" +#include "common/filesystem/filesystem_impl.h" + +#include "absl/strings/str_cat.h" + +namespace quic { +namespace { + +void depthFirstTraverseDirectory(const std::string& dirname, std::vector& files) { + Envoy::Filesystem::Directory directory(dirname); + for (const Envoy::Filesystem::DirectoryEntry& entry : directory) { + switch (entry.type_) { + case Envoy::Filesystem::FileType::Regular: + files.push_back(absl::StrCat(dirname, "/", entry.name_)); + break; + case Envoy::Filesystem::FileType::Directory: + if (entry.name_ != "." && entry.name_ != "..") { + depthFirstTraverseDirectory(absl::StrCat(dirname, "/", entry.name_), files); + } + break; + default: + ASSERT(false, + absl::StrCat("Unknow file entry type ", entry.type_, " under directory ", dirname)); + } + } +} + +} // namespace + +// Traverses the directory |dirname| and returns all of the files it contains. +std::vector ReadFileContentsImpl(const std::string& dirname) { + std::vector files; + depthFirstTraverseDirectory(dirname, files); + return files; +} + +// Reads the contents of |filename| as a string into |contents|. +void ReadFileContentsImpl(QuicStringPiece filename, std::string* contents) { +#ifdef WIN32 + Envoy::Filesystem::InstanceImplWin32 fs; +#else + Envoy::Filesystem::InstanceImplPosix fs; +#endif + *contents = fs.fileReadToEnd(std::string(filename.data(), filename.size())); +} + +} // namespace quic diff --git a/source/extensions/quic_listeners/quiche/platform/quic_file_utils_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_file_utils_impl.h new file mode 100644 index 0000000000000..ceef1dabbab2a --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/quic_file_utils_impl.h @@ -0,0 +1,28 @@ +#pragma once + +// NOLINT(namespace-envoy) + +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include + +#include "quiche/quic/platform/api/quic_string_piece.h" + +namespace quic { + +/** + * Traverses the directory |dirname| and returns all of the files it contains. + * @param dirname full path without trailing '/'. + */ +std::vector ReadFileContentsImpl(const std::string& dirname); + +/** + * Reads the contents of |filename| as a string into |contents|. + * @param filename the full path to the file. + * @param contents output location of the file content. + */ +void ReadFileContentsImpl(QuicStringPiece filename, std::string* contents); + +} // namespace quic diff --git a/source/extensions/quic_listeners/quiche/platform/quic_flags_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_flags_impl.h new file mode 100644 index 0000000000000..872495f2db8ed --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/quic_flags_impl.h @@ -0,0 +1,44 @@ +#pragma once + +// NOLINT(namespace-envoy) + +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include +#include + +#include "extensions/quic_listeners/quiche/platform/flags_impl.h" + +// |flag| is the global flag variable, which is a pointer to TypedFlag. +#define GetQuicFlagImpl(flag) (quiche::flag)->value() + +// |flag| is the global flag variable, which is a pointer to TypedFlag. +#define SetQuicFlagImpl(flag, value) (quiche::flag)->SetValue(value) + +#define GetQuicReloadableFlagImpl(flag) quiche::FLAGS_quic_reloadable_flag_##flag->value() + +#define SetQuicReloadableFlagImpl(flag, value) \ + quiche::FLAGS_quic_reloadable_flag_##flag->SetValue(value) + +#define GetQuicRestartFlagImpl(flag) quiche::FLAGS_quic_restart_flag_##flag->value() + +#define SetQuicRestartFlagImpl(flag, value) quiche::FLAGS_quic_restart_flag_##flag->SetValue(value) + +// Not wired into command-line parsing. +#define DEFINE_QUIC_COMMAND_LINE_FLAG_IMPL(type, flag, value, help) \ + quiche::TypedFlag* FLAGS_##flag = new TypedFlag(#flag, value, help); + +namespace quic { + +// TODO(mpwarres): implement. Lower priority since only used by QUIC command-line tools. +inline std::vector QuicParseCommandLineFlagsImpl(const char* /*usage*/, int /*argc*/, + const char* const* /*argv*/) { + return std::vector(); +} + +// TODO(mpwarres): implement. Lower priority since only used by QUIC command-line tools. +inline void QuicPrintCommandLineFlagHelpImpl(const char* /*usage*/) {} + +} // namespace quic diff --git a/source/extensions/quic_listeners/quiche/platform/quic_hostname_utils_impl.cc b/source/extensions/quic_listeners/quiche/platform/quic_hostname_utils_impl.cc new file mode 100644 index 0000000000000..e047e08672b5f --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/quic_hostname_utils_impl.cc @@ -0,0 +1,48 @@ +// NOLINT(namespace-envoy) + +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include "extensions/quic_listeners/quiche/platform/quic_hostname_utils_impl.h" + +#include + +#include "common/http/utility.h" + +#include "absl/strings/ascii.h" +#include "absl/strings/str_cat.h" + +// TODO(wub): Implement both functions on top of GoogleUrl, then enable +// quiche/quic/platform/api/quic_hostname_utils_test.cc. + +namespace quic { + +// static +bool QuicHostnameUtilsImpl::IsValidSNI(QuicStringPiece sni) { + // TODO(wub): Implement it on top of GoogleUrl, once it is available. + + return sni.find_last_of('.') != std::string::npos && + Envoy::Http::Utility::Url().initialize(absl::StrCat("http://", sni)); +} + +// static +std::string QuicHostnameUtilsImpl::NormalizeHostname(QuicStringPiece hostname) { + // TODO(wub): Implement it on top of GoogleUrl, once it is available. + std::string host = absl::AsciiStrToLower(hostname); + + // Walk backwards over the string, stopping at the first trailing dot. + size_t host_end = host.length(); + while (host_end != 0 && host[host_end - 1] == '.') { + host_end--; + } + + // Erase the trailing dots. + if (host_end != host.length()) { + host.erase(host_end, host.length() - host_end); + } + + return host; +} + +} // namespace quic diff --git a/source/extensions/quic_listeners/quiche/platform/quic_hostname_utils_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_hostname_utils_impl.h new file mode 100644 index 0000000000000..97d99b29032db --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/quic_hostname_utils_impl.h @@ -0,0 +1,36 @@ +#pragma once + +// NOLINT(namespace-envoy) +// +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include "quiche/quic/platform/api/quic_export.h" +#include "quiche/quic/platform/api/quic_string_piece.h" + +namespace quic { + +class QUIC_EXPORT_PRIVATE QuicHostnameUtilsImpl { +public: + // Returns true if the sni is valid, false otherwise. + // (1) disallow IP addresses; + // (2) check that the hostname contains valid characters only; and + // (3) contains at least one dot. + // NOTE(wub): Only (3) is implemented for now. + static bool IsValidSNI(QuicStringPiece sni); + + // Normalize a hostname: + // (1) Canonicalize it, similar to what Chromium does in + // https://cs.chromium.org/chromium/src/net/base/url_util.h?q=net::CanonicalizeHost + // (2) Convert it to lower case. + // (3) Remove the trailing '.'. + // WARNING: May mutate |hostname| in place. + // NOTE(wub): Only (2) and (3) are implemented for now. + static std::string NormalizeHostname(QuicStringPiece hostname); + +private: + QuicHostnameUtilsImpl() = delete; +}; + +} // namespace quic diff --git a/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.cc b/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.cc index 5f6b16d6fde57..895f1bbcb8d3c 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.cc +++ b/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.cc @@ -11,10 +11,14 @@ namespace quic { namespace { -std::atomic& VerbosityLogThreshold() { - static std::atomic threshold(0); - return threshold; -} +std::atomic g_verbosity_threshold; +std::atomic g_dfatal_exit_disabled; + +// Pointer to the global log sink, usually it is nullptr. +// If not nullptr, as in some tests, the sink will receive a copy of the log message right after the +// message is emitted from the QUIC_LOG... macros. +std::atomic g_quic_log_sink; +absl::Mutex g_quic_log_sink_mutex; } // namespace QuicLogEmitter::QuicLogEmitter(QuicLogLevel level) : level_(level), saved_errno_(errno) {} @@ -24,16 +28,48 @@ QuicLogEmitter::~QuicLogEmitter() { // TODO(wub): Change to a thread-safe version of strerror. stream_ << ": " << strerror(saved_errno_) << " [" << saved_errno_ << "]"; } - GetLogger().log(level_, "quic: {}", stream_.str().c_str()); + GetLogger().log(level_, "{}", stream_.str().c_str()); + + // Normally there is no log sink and we can avoid acquiring the lock. + if (g_quic_log_sink.load(std::memory_order_relaxed) != nullptr) { + absl::MutexLock lock(&g_quic_log_sink_mutex); + QuicLogSink* sink = g_quic_log_sink.load(std::memory_order_relaxed); + if (sink != nullptr) { + sink->Log(level_, stream_.str()); + } + } + if (level_ == FATAL) { + GetLogger().flush(); +#ifdef NDEBUG + // Release mode. abort(); +#else + // Debug mode. + if (!g_dfatal_exit_disabled) { + abort(); + } +#endif } } -int GetVerbosityLogThreshold() { return VerbosityLogThreshold().load(std::memory_order_relaxed); } +int GetVerbosityLogThreshold() { return g_verbosity_threshold.load(std::memory_order_relaxed); } void SetVerbosityLogThreshold(int new_verbosity) { - VerbosityLogThreshold().store(new_verbosity, std::memory_order_relaxed); + g_verbosity_threshold.store(new_verbosity, std::memory_order_relaxed); +} + +bool IsDFatalExitDisabled() { return g_dfatal_exit_disabled.load(std::memory_order_relaxed); } + +void SetDFatalExitDisabled(bool is_disabled) { + g_dfatal_exit_disabled.store(is_disabled, std::memory_order_relaxed); +} + +QuicLogSink* SetLogSink(QuicLogSink* new_sink) { + absl::MutexLock lock(&g_quic_log_sink_mutex); + QuicLogSink* old_sink = g_quic_log_sink.load(std::memory_order_relaxed); + g_quic_log_sink.store(new_sink, std::memory_order_relaxed); + return old_sink; } -} // namespace quic \ No newline at end of file +} // namespace quic diff --git a/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.h index ff577f0d341c8..942922fedd4d3 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.h @@ -10,14 +10,12 @@ #include #include #include +#include -#include "common/common/assert.h" #include "common/common/logger.h" #include "absl/base/optimization.h" - -// TODO(wub): Add CHECK/DCHECK and variants, which are not explicitly exposed by quic_logging.h. -// TODO(wub): Implement quic_mock_log_impl.h for testing. +#include "absl/synchronization/mutex.h" // If |condition| is true, use |logstream| to stream the log message and send it to spdlog. // If |condition| is false, |logstream| will not be instantiated. @@ -44,6 +42,9 @@ // TODO(wub): Implement QUIC_LOG_FIRST_N_IMPL. #define QUIC_LOG_FIRST_N_IMPL(severity, n) QUIC_LOG_IMPL(severity) +// TODO(wub): Implement QUIC_LOG_EVERY_N_IMPL. +#define QUIC_LOG_EVERY_N_IMPL(severity, n) QUIC_LOG_IMPL(severity) + // TODO(wub): Implement QUIC_LOG_EVERY_N_SEC_IMPL. #define QUIC_LOG_EVERY_N_SEC_IMPL(severity, seconds) QUIC_LOG_IMPL(severity) @@ -51,29 +52,51 @@ QUIC_LOG_IMPL_INTERNAL(quic::IsLogLevelEnabled(quic::severity), \ quic::QuicLogEmitter(quic::severity).SetPerror().stream()) -#define QUIC_LOG_INFO_IS_ON_IMPL quic::IsLogLevelEnabled(quic::INFO) -#define QUIC_LOG_WARNING_IS_ON_IMPL quic::IsLogLevelEnabled(quic::WARNING) -#define QUIC_LOG_ERROR_IS_ON_IMPL quic::IsLogLevelEnabled(quic::ERROR) +#define QUIC_LOG_INFO_IS_ON_IMPL() quic::IsLogLevelEnabled(quic::INFO) +#define QUIC_LOG_WARNING_IS_ON_IMPL() quic::IsLogLevelEnabled(quic::WARNING) +#define QUIC_LOG_ERROR_IS_ON_IMPL() quic::IsLogLevelEnabled(quic::ERROR) + +#define CHECK(condition) \ + QUIC_LOG_IF_IMPL(FATAL, ABSL_PREDICT_FALSE(!(condition))) << "CHECK failed: " #condition "." + +#define CHECK_GT(a, b) CHECK((a) > (b)) +#define CHECK_GE(a, b) CHECK((a) >= (b)) +#define CHECK_LT(a, b) CHECK((a) < (b)) +#define CHECK_LE(a, b) CHECK((a) <= (b)) +#define CHECK_NE(a, b) CHECK((a) != (b)) +#define CHECK_EQ(a, b) CHECK((a) == (b)) #ifdef NDEBUG // Release build -#define QUIC_COMPILED_OUT_LOG() QUIC_LOG_IMPL_INTERNAL(false, quic::NullLogStream().stream()) -#define QUIC_DVLOG_IMPL(verbosity) QUIC_COMPILED_OUT_LOG() -#define QUIC_DVLOG_IF_IMPL(verbosity, condition) QUIC_COMPILED_OUT_LOG() -#define QUIC_DLOG_IMPL(severity) QUIC_COMPILED_OUT_LOG() -#define QUIC_DLOG_IF_IMPL(severity, condition) QUIC_COMPILED_OUT_LOG() -#define QUIC_DLOG_INFO_IS_ON_IMPL 0 +#define DCHECK(condition) QUIC_COMPILED_OUT_LOG(condition) +#define QUIC_COMPILED_OUT_LOG(condition) \ + QUIC_LOG_IMPL_INTERNAL(false && (condition), quic::NullLogStream().stream()) +#define QUIC_DVLOG_IMPL(verbosity) QUIC_COMPILED_OUT_LOG(false) +#define QUIC_DVLOG_IF_IMPL(verbosity, condition) QUIC_COMPILED_OUT_LOG(condition) +#define QUIC_DLOG_IMPL(severity) QUIC_COMPILED_OUT_LOG(false) +#define QUIC_DLOG_IF_IMPL(severity, condition) QUIC_COMPILED_OUT_LOG(condition) +#define QUIC_DLOG_INFO_IS_ON_IMPL() 0 +#define QUIC_DLOG_EVERY_N_IMPL(severity, n) QUIC_COMPILED_OUT_LOG(false) #define QUIC_NOTREACHED_IMPL() #else // Debug build +#define DCHECK(condition) CHECK(condition) #define QUIC_DVLOG_IMPL(verbosity) QUIC_VLOG_IMPL(verbosity) #define QUIC_DVLOG_IF_IMPL(verbosity, condition) QUIC_VLOG_IF_IMPL(verbosity, condition) #define QUIC_DLOG_IMPL(severity) QUIC_LOG_IMPL(severity) #define QUIC_DLOG_IF_IMPL(severity, condition) QUIC_LOG_IF_IMPL(severity, condition) -#define QUIC_DLOG_INFO_IS_ON_IMPL QUIC_LOG_INFO_IS_ON_IMPL +#define QUIC_DLOG_INFO_IS_ON_IMPL() QUIC_LOG_INFO_IS_ON_IMPL() +#define QUIC_DLOG_EVERY_N_IMPL(severity, n) QUIC_LOG_EVERY_N_IMPL(severity, n) #define QUIC_NOTREACHED_IMPL() NOT_REACHED_GCOVR_EXCL_LINE #endif +#define DCHECK_GE(a, b) DCHECK((a) >= (b)) +#define DCHECK_GT(a, b) DCHECK((a) > (b)) +#define DCHECK_LT(a, b) DCHECK((a) < (b)) +#define DCHECK_LE(a, b) DCHECK((a) <= (b)) +#define DCHECK_NE(a, b) DCHECK((a) != (b)) +#define DCHECK_EQ(a, b) DCHECK((a) == (b)) + #define QUIC_PREDICT_FALSE_IMPL(x) ABSL_PREDICT_FALSE(x) namespace quic { @@ -132,4 +155,21 @@ inline bool IsVerboseLogEnabled(int verbosity) { return IsLogLevelEnabled(INFO) && verbosity <= GetVerbosityLogThreshold(); } +bool IsDFatalExitDisabled(); +void SetDFatalExitDisabled(bool is_disabled); + +// QuicLogSink is used to capture logs emitted from the QUIC_LOG... macros. +class QuicLogSink { +public: + virtual ~QuicLogSink() = default; + + // Called when |message| is emitted at |level|. + virtual void Log(QuicLogLevel level, const std::string& message) = 0; +}; + +// Only one QuicLogSink can capture log at a time. SetLogSink causes future logs +// to be captured by the |new_sink|. +// Return the previous sink. +QuicLogSink* SetLogSink(QuicLogSink* new_sink); + } // namespace quic diff --git a/source/extensions/quic_listeners/quiche/platform/quic_map_util_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_map_util_impl.h new file mode 100644 index 0000000000000..2bf549d9c353a --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/quic_map_util_impl.h @@ -0,0 +1,23 @@ +#pragma once + +// NOLINT(namespace-envoy) +// +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include + +namespace quic { + +template +bool QuicContainsKeyImpl(const Collection& collection, const Key& key) { + return collection.find(key) != collection.end(); +} + +template +bool QuicContainsValueImpl(const Collection& collection, const Value& value) { + return std::find(collection.begin(), collection.end(), value) != collection.end(); +} + +} // namespace quic diff --git a/source/extensions/quic_listeners/quiche/platform/quic_pcc_sender_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_pcc_sender_impl.h new file mode 100644 index 0000000000000..52d7836b99117 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/quic_pcc_sender_impl.h @@ -0,0 +1,32 @@ +#pragma once + +// NOLINT(namespace-envoy) + +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include "common/common/assert.h" + +#include "quiche/quic/core/quic_types.h" + +namespace quic { + +class QuicClock; +class QuicConnectionStats; +class QuicRandom; +class QuicUnackedPacketMap; +class RttStats; +class SendAlgorithmInterface; + +// Interface for creating a PCC SendAlgorithmInterface. +SendAlgorithmInterface* CreatePccSenderImpl(const QuicClock* clock, const RttStats* rtt_stats, + const QuicUnackedPacketMap* unacked_packets, + QuicRandom* random, QuicConnectionStats* stats, + QuicPacketCount initial_congestion_window, + QuicPacketCount max_congestion_window) { + PANIC("PccSender is not supported."); + return nullptr; +} + +} // namespace quic diff --git a/source/extensions/quic_listeners/quiche/platform/quic_reference_counted_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_reference_counted_impl.h new file mode 100644 index 0000000000000..648f742139dce --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/quic_reference_counted_impl.h @@ -0,0 +1,108 @@ +#pragma once + +// NOLINT(namespace-envoy) + +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include + +#include "quiche/quic/platform/api/quic_export.h" + +namespace quic { + +/** The implementation of reference counted object is merely wrapping + * std::shared_ptr. So QuicReferenceCountedImpl class does not do anything + * related to reference counting as shared_ptr already takes care of that. But + * it customizes destruction to provide a interface for shared_ptr to destroy + * the object, because according to the API declared in QuicReferenceCounted, + * this class has to hide its destructor. + */ +class QuicReferenceCountedImpl { +public: + QuicReferenceCountedImpl() = default; + + // Expose destructor through this method. + static void destroy(QuicReferenceCountedImpl* impl) { delete impl; } + +protected: + // Non-public destructor to match API declared in QuicReferenceCounted. + virtual ~QuicReferenceCountedImpl() = default; +}; + +template class QuicReferenceCountedPointerImpl { +public: + static_assert(std::is_base_of::value, + "T must derive from QuicReferenceCounted"); + QuicReferenceCountedPointerImpl() : refptr_(nullptr, T::destroy) {} + QuicReferenceCountedPointerImpl(T* p) : refptr_(p, T::destroy) {} + QuicReferenceCountedPointerImpl(std::nullptr_t) : refptr_(nullptr, T::destroy) {} + // Copy constructor. + template + QuicReferenceCountedPointerImpl(const QuicReferenceCountedPointerImpl& other) + : refptr_(other.refptr()) {} + QuicReferenceCountedPointerImpl(const QuicReferenceCountedPointerImpl& other) + : refptr_(other.refptr()) {} + + // Move constructor. + template + QuicReferenceCountedPointerImpl(QuicReferenceCountedPointerImpl&& other) noexcept + : refptr_(std::move(other.refptr())) {} + QuicReferenceCountedPointerImpl(QuicReferenceCountedPointerImpl&& other) noexcept + : refptr_(std::move(other.refptr())) {} + + ~QuicReferenceCountedPointerImpl() = default; + + // Copy assignments. + QuicReferenceCountedPointerImpl& operator=(const QuicReferenceCountedPointerImpl& other) { + refptr_ = other.refptr(); + return *this; + } + template + QuicReferenceCountedPointerImpl& operator=(const QuicReferenceCountedPointerImpl& other) { + refptr_ = other.refptr(); + return *this; + } + + // Move assignments. + QuicReferenceCountedPointerImpl& operator=(QuicReferenceCountedPointerImpl&& other) noexcept { + refptr_ = std::move(other.refptr()); + return *this; + } + template + QuicReferenceCountedPointerImpl& operator=(QuicReferenceCountedPointerImpl&& other) noexcept { + refptr_ = std::move(other.refptr()); + return *this; + } + + QuicReferenceCountedPointerImpl& operator=(T* p) { + refptr_.reset(p, T::destroy); + return *this; + } + + // Returns the raw pointer with no change in reference. + T* get() const { return refptr_.get(); } + + // Accessors for the referenced object. + // operator* and operator-> will assert() if there is no current object. + T& operator*() const { + assert(refptr_ != nullptr); + return *refptr_; + } + T* operator->() const { + assert(refptr_ != nullptr); + return refptr_.get(); + } + + explicit operator bool() const { return static_cast(refptr_); } + + const std::shared_ptr& refptr() const { return refptr_; } + + std::shared_ptr& refptr() { return refptr_; } + +private: + std::shared_ptr refptr_; +}; + +} // namespace quic diff --git a/source/extensions/quic_listeners/quiche/platform/quic_server_stats_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_server_stats_impl.h new file mode 100644 index 0000000000000..0d57ebd2809a1 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/quic_server_stats_impl.h @@ -0,0 +1,23 @@ +#pragma once + +// NOLINT(namespace-envoy) +// +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#define QUIC_SERVER_HISTOGRAM_ENUM_IMPL(name, sample, enum_size, docstring) \ + do { \ + } while (0) + +#define QUIC_SERVER_HISTOGRAM_BOOL_IMPL(name, sample, docstring) \ + do { \ + } while (0) + +#define QUIC_SERVER_HISTOGRAM_TIMES_IMPL(name, sample, min, max, bucket_count, docstring) \ + do { \ + } while (0) + +#define QUIC_SERVER_HISTOGRAM_COUNTS_IMPL(name, sample, min, max, bucket_count, docstring) \ + do { \ + } while (0) diff --git a/source/extensions/quic_listeners/quiche/platform/quic_stack_trace_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_stack_trace_impl.h new file mode 100644 index 0000000000000..8ea8fb6d7eb0a --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/quic_stack_trace_impl.h @@ -0,0 +1,21 @@ +#pragma once + +// NOLINT(namespace-envoy) +// +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include "server/backtrace.h" + +namespace quic { + +inline std::string QuicStackTraceImpl() { + Envoy::BackwardsTrace t; + t.capture(); + std::ostringstream os; + t.printTrace(os); + return os.str(); +} + +} // namespace quic diff --git a/source/extensions/quic_listeners/quiche/platform/quic_str_cat_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_str_cat_impl.h index f689f40a6204a..dafbee10b52af 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_str_cat_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/quic_str_cat_impl.h @@ -2,7 +2,6 @@ #include "absl/strings/str_cat.h" #include "fmt/printf.h" -#include "quiche/quic/platform/api/quic_string.h" // NOLINT(namespace-envoy) @@ -12,11 +11,11 @@ namespace quic { -template inline QuicString QuicStrCatImpl(const Args&... args) { +template inline std::string QuicStrCatImpl(const Args&... args) { return absl::StrCat(args...); } -template inline QuicString QuicStringPrintfImpl(const Args&... args) { +template inline std::string QuicStringPrintfImpl(const Args&... args) { return fmt::sprintf(std::forward(args)...); } diff --git a/source/extensions/quic_listeners/quiche/platform/quic_stream_buffer_allocator_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_stream_buffer_allocator_impl.h new file mode 100644 index 0000000000000..c2a40a0e7427a --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/quic_stream_buffer_allocator_impl.h @@ -0,0 +1,18 @@ +#pragma once + +// NOLINT(namespace-envoy) + +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include "quiche/quic/core/quic_simple_buffer_allocator.h" + +namespace quic { + +// Implements the interface required by +// https://quiche.googlesource.com/quiche/+/refs/heads/master/quic/platform/api/quic_stream_buffer_allocator.h +// with the default implementation provided by QUICHE. +using QuicStreamBufferAllocatorImpl = SimpleBufferAllocator; + +} // namespace quic diff --git a/source/extensions/quic_listeners/quiche/platform/quic_string_utils_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_string_utils_impl.h index b3ab8277df196..5e1f3e712dbc9 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_string_utils_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/quic_string_utils_impl.h @@ -1,7 +1,6 @@ #pragma once #include "absl/strings/str_cat.h" -#include "quiche/quic/platform/api/quic_string.h" // NOLINT(namespace-envoy) @@ -11,7 +10,8 @@ namespace quic { -template inline void QuicStrAppendImpl(QuicString* output, const Args&... args) { +template +inline void QuicStrAppendImpl(std::string* output, const Args&... args) { absl::StrAppend(output, args...); } diff --git a/source/extensions/quic_listeners/quiche/platform/quic_text_utils_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_text_utils_impl.h new file mode 100644 index 0000000000000..42bb24e6828af --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/quic_text_utils_impl.h @@ -0,0 +1,77 @@ +#pragma once + +#include "extensions/quic_listeners/quiche/platform/quic_string_piece_impl.h" +#include "extensions/quic_listeners/quiche/platform/string_utils.h" + +#include "absl/strings/ascii.h" +#include "absl/strings/escaping.h" +#include "absl/strings/match.h" +#include "absl/strings/numbers.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/str_format.h" +#include "absl/strings/str_split.h" + +// NOLINT(namespace-envoy) + +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +namespace quic { + +class QuicTextUtilsImpl { +public: + static bool StartsWith(QuicStringPieceImpl data, QuicStringPieceImpl prefix) { + return absl::StartsWith(data, prefix); + } + + static bool EndsWithIgnoreCase(QuicStringPieceImpl data, QuicStringPieceImpl suffix) { + return absl::EndsWithIgnoreCase(data, suffix); + } + + static std::string ToLower(QuicStringPieceImpl data) { return absl::AsciiStrToLower(data); } + + static void RemoveLeadingAndTrailingWhitespace(QuicStringPieceImpl* data) { + *data = absl::StripAsciiWhitespace(*data); + } + + static bool StringToUint64(QuicStringPieceImpl in, uint64_t* out) { + return absl::SimpleAtoi(in, out); + } + + static bool StringToInt(QuicStringPieceImpl in, int* out) { return absl::SimpleAtoi(in, out); } + + static bool StringToUint32(QuicStringPieceImpl in, uint32_t* out) { + return absl::SimpleAtoi(in, out); + } + + static bool StringToSizeT(QuicStringPieceImpl in, size_t* out) { + return absl::SimpleAtoi(in, out); + } + + static std::string Uint64ToString(uint64_t in) { return absl::StrCat(in); } + + static std::string HexEncode(QuicStringPieceImpl data) { return absl::BytesToHexString(data); } + + static std::string Hex(uint32_t v) { return absl::StrCat(absl::Hex(v)); } + + static std::string HexDecode(QuicStringPieceImpl data) { return absl::HexStringToBytes(data); } + + static void Base64Encode(const uint8_t* data, size_t data_len, std::string* output) { + return quiche::Base64Encode(data, data_len, output); + } + + static std::string HexDump(QuicStringPieceImpl binary_data) { + return quiche::HexDump(binary_data); + } + + static bool ContainsUpperCase(QuicStringPieceImpl data) { + return std::any_of(data.begin(), data.end(), absl::ascii_isupper); + } + + static std::vector Split(QuicStringPieceImpl data, char delim) { + return absl::StrSplit(data, delim); + } +}; + +} // namespace quic diff --git a/source/extensions/quic_listeners/quiche/platform/spdy_bug_tracker_impl.h b/source/extensions/quic_listeners/quiche/platform/spdy_bug_tracker_impl.h new file mode 100644 index 0000000000000..93cb60e469695 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/spdy_bug_tracker_impl.h @@ -0,0 +1,13 @@ +#pragma once + +// NOLINT(namespace-envoy) +// +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include "extensions/quic_listeners/quiche/platform/quic_bug_tracker_impl.h" + +#define SPDY_BUG_IMPL QUIC_BUG_IMPL +#define SPDY_BUG_IF_IMPL QUIC_BUG_IF_IMPL +#define FLAGS_spdy_always_log_bugs_for_tests_impl true diff --git a/source/extensions/quic_listeners/quiche/platform/spdy_flags_impl.h b/source/extensions/quic_listeners/quiche/platform/spdy_flags_impl.h new file mode 100644 index 0000000000000..b0d690b4bf07d --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/spdy_flags_impl.h @@ -0,0 +1,13 @@ +#pragma once + +// NOLINT(namespace-envoy) + +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include "extensions/quic_listeners/quiche/platform/flags_impl.h" + +#define GetSpdyReloadableFlagImpl(flag) quiche::FLAGS_spdy_reloadable_flag_##flag->value() + +#define GetSpdyRestartFlagImpl(flag) quiche::FLAGS_spdy_restart_flag_##flag->value() diff --git a/source/extensions/quic_listeners/quiche/platform/spdy_logging_impl.h b/source/extensions/quic_listeners/quiche/platform/spdy_logging_impl.h new file mode 100644 index 0000000000000..4a21b95ab34d6 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/spdy_logging_impl.h @@ -0,0 +1,21 @@ +#pragma once + +// NOLINT(namespace-envoy) +// +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include "extensions/quic_listeners/quiche/platform/quic_logging_impl.h" + +#define SPDY_LOG_IMPL(severity) QUIC_LOG_IMPL(severity) + +#define SPDY_VLOG_IMPL(verbose_level) QUIC_VLOG_IMPL(verbose_level) + +#define SPDY_DLOG_IMPL(severity) QUIC_DLOG_IMPL(severity) + +#define SPDY_DLOG_IF_IMPL(severity, condition) QUIC_DLOG_IF_IMPL(severity, condition) + +#define SPDY_DVLOG_IMPL(verbose_level) QUIC_DVLOG_IMPL(verbose_level) + +#define SPDY_DVLOG_IF_IMPL(verbose_level, condition) QUIC_DVLOG_IF_IMPL(verbose_level, condition) diff --git a/source/extensions/quic_listeners/quiche/platform/spdy_mem_slice_impl.h b/source/extensions/quic_listeners/quiche/platform/spdy_mem_slice_impl.h new file mode 100644 index 0000000000000..76c5ac3550fa9 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/spdy_mem_slice_impl.h @@ -0,0 +1,45 @@ +#pragma once + +// NOLINT(namespace-envoy) +// +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include + +namespace spdy { + +// TODO(danzh): Fill out SpdyMemSliceImpl. +// +// SpdyMemSliceImpl wraps a reference counted MemSlice and only provides partial +// interfaces of MemSlice. +class SpdyMemSliceImpl { +public: + // Constructs an empty SpdyMemSliceImpl that contains an empty MemSlice. + SpdyMemSliceImpl(); + + // Constructs a SpdyMemSlice with reference count 1 to a newly allocated data + // buffer of |length| bytes. + explicit SpdyMemSliceImpl(size_t length); + + // Constructs a reference-counted MemSlice to |data|. + SpdyMemSliceImpl(const char* data, size_t length); + + SpdyMemSliceImpl(const SpdyMemSliceImpl& other) = delete; + SpdyMemSliceImpl& operator=(const SpdyMemSliceImpl& other) = delete; + + // Move constructors. |other| will not hold a reference to the data buffer + // after this call completes. + SpdyMemSliceImpl(SpdyMemSliceImpl&& other) = default; + SpdyMemSliceImpl& operator=(SpdyMemSliceImpl&& other) = default; + + ~SpdyMemSliceImpl(); + + // Returns a char pointer to underlying data buffer. + const char* data() const; + // Returns the length of underlying data buffer. + size_t length() const; +}; + +} // namespace spdy diff --git a/source/extensions/quic_listeners/quiche/platform/spdy_string_utils_impl.h b/source/extensions/quic_listeners/quiche/platform/spdy_string_utils_impl.h new file mode 100644 index 0000000000000..e2af90641e43f --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/spdy_string_utils_impl.h @@ -0,0 +1,47 @@ +#pragma once + +// NOLINT(namespace-envoy) +// +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include "extensions/quic_listeners/quiche/platform/string_utils.h" + +#include "absl/strings/escaping.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/str_format.h" +#include "fmt/printf.h" + +namespace spdy { + +template inline std::string SpdyStrCatImpl(const Args&... args) { + return absl::StrCat(std::forward(args)...); +} + +template +inline void SpdyStrAppendImpl(std::string* output, const Args&... args) { + absl::StrAppend(output, std::forward(args)...); +} + +char SpdyHexDigitToIntImpl(char c) { return quiche::HexDigitToInt(c); } + +inline std::string SpdyHexDecodeImpl(absl::string_view data) { + return absl::HexStringToBytes(data); +} + +bool SpdyHexDecodeToUInt32Impl(absl::string_view data, uint32_t* out) { + return quiche::HexDecodeToUInt32(data, out); +} + +inline std::string SpdyHexEncodeImpl(const void* bytes, size_t size) { + return absl::BytesToHexString(absl::string_view(static_cast(bytes), size)); +} + +inline std::string SpdyHexEncodeUInt32AndTrimImpl(uint32_t data) { + return absl::StrCat(absl::Hex(data)); +} + +std::string SpdyHexDumpImpl(absl::string_view data) { return quiche::HexDump(data); } + +} // namespace spdy diff --git a/source/extensions/quic_listeners/quiche/platform/spdy_unsafe_arena_impl.h b/source/extensions/quic_listeners/quiche/platform/spdy_unsafe_arena_impl.h index 8db8e4a20bd44..d731d001dcef0 100644 --- a/source/extensions/quic_listeners/quiche/platform/spdy_unsafe_arena_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/spdy_unsafe_arena_impl.h @@ -6,9 +6,10 @@ // consumed or referenced directly by other Envoy code. It serves purely as a // porting layer for QUICHE. +#include "quiche/spdy/core/spdy_simple_arena.h" + namespace spdy { -// TODO: implement -class SpdyUnsafeArenaImpl {}; +using SpdyUnsafeArenaImpl = SpdySimpleArena; } // namespace spdy diff --git a/source/extensions/quic_listeners/quiche/platform/string_utils.cc b/source/extensions/quic_listeners/quiche/platform/string_utils.cc new file mode 100644 index 0000000000000..2bcbc11c41901 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/string_utils.cc @@ -0,0 +1,96 @@ +#include "extensions/quic_listeners/quiche/platform/string_utils.h" + +// NOLINT(namespace-envoy) +// +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include +#include +#include + +#include "absl/strings/ascii.h" +#include "absl/strings/escaping.h" +#include "absl/strings/str_format.h" +#include "common/common/assert.h" +#include "common/common/base64.h" + +namespace quiche { + +void Base64Encode(const uint8_t* data, size_t data_len, std::string* output) { + *output = + Envoy::Base64::encode(reinterpret_cast(data), data_len, /*add_padding=*/false); +} + +std::string HexDump(absl::string_view data) { + const int kBytesPerLine = 16; + const char* buf = data.data(); + int bytes_remaining = data.size(); + int offset = 0; + std::string out; + const char* p = buf; + while (bytes_remaining > 0) { + const int line_bytes = std::min(bytes_remaining, kBytesPerLine); + absl::StrAppendFormat(&out, "0x%04x: ", offset); // Do the line header + for (int i = 0; i < kBytesPerLine; ++i) { + if (i < line_bytes) { + absl::StrAppendFormat(&out, "%02x", p[i]); + } else { + out += " "; // two-space filler instead of two-space hex digits + } + if (i % 2) { + out += ' '; + } + } + out += ' '; + for (int i = 0; i < line_bytes; ++i) { // Do the ASCII dump + out += absl::ascii_isgraph(p[i]) ? p[i] : '.'; + } + + bytes_remaining -= line_bytes; + offset += line_bytes; + p += line_bytes; + out += '\n'; + } + return out; +} + +char HexDigitToInt(char c) { + ASSERT(std::isxdigit(c)); + + if (std::isdigit(c)) { + return c - '0'; + } + if (c >= 'A' && c <= 'F') { + return c - 'A' + 10; + } + if (c >= 'a' && c <= 'f') { + return c - 'a' + 10; + } + return 0; +} + +bool HexDecodeToUInt32(absl::string_view data, uint32_t* out) { + if (data.empty() || data.size() > 8u) { + return false; + } + + for (char c : data) { + if (!absl::ascii_isxdigit(c)) { + return false; + } + } + + // Pad with leading zeros. + std::string data_padded(data.data(), data.size()); + data_padded.insert(0, 8u - data.size(), '0'); + + std::string byte_string = absl::HexStringToBytes(data_padded); + + ASSERT(byte_string.size() == 4u); + *out = ntohl(*reinterpret_cast(byte_string.c_str())); + return true; +} + +} // namespace quiche diff --git a/source/extensions/quic_listeners/quiche/platform/string_utils.h b/source/extensions/quic_listeners/quiche/platform/string_utils.h new file mode 100644 index 0000000000000..0cb9975b5400b --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/string_utils.h @@ -0,0 +1,25 @@ +// NOLINT(namespace-envoy) +// +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include +#include + +#include "absl/strings/string_view.h" + +namespace quiche { + +void Base64Encode(const uint8_t* data, size_t data_len, std::string* output); + +std::string HexDump(absl::string_view data); + +// '0' => 0, '1' => 1, 'a' or 'A' => 10, etc. +char HexDigitToInt(char c); + +// Turns a 8-byte hexstring into a uint32 in host byte order. +// e.g. "12345678" => 0x12345678 +bool HexDecodeToUInt32(absl::string_view data, uint32_t* out); + +} // namespace quiche diff --git a/source/extensions/stat_sinks/common/statsd/statsd.cc b/source/extensions/stat_sinks/common/statsd/statsd.cc index 62413b83dbad4..f40a8532ea48e 100644 --- a/source/extensions/stat_sinks/common/statsd/statsd.cc +++ b/source/extensions/stat_sinks/common/statsd/statsd.cc @@ -29,9 +29,8 @@ Writer::Writer(Network::Address::InstanceConstSharedPtr address) } Writer::~Writer() { - if (io_handle_->fd() != -1) { - RELEASE_ASSERT(close(io_handle_->fd()) == 0, ""); - io_handle_->close(); + if (io_handle_->isOpen()) { + RELEASE_ASSERT(io_handle_->close().err_ == nullptr, ""); } } diff --git a/source/extensions/stat_sinks/common/statsd/statsd.h b/source/extensions/stat_sinks/common/statsd/statsd.h index c96b001cde72f..d80c4af3552ad 100644 --- a/source/extensions/stat_sinks/common/statsd/statsd.h +++ b/source/extensions/stat_sinks/common/statsd/statsd.h @@ -30,7 +30,7 @@ class Writer : public ThreadLocal::ThreadLocalObject { public: Writer(Network::Address::InstanceConstSharedPtr address); // For testing. - Writer() : io_handle_(std::make_unique()) {} + Writer() : io_handle_(std::make_unique()) {} virtual ~Writer(); virtual void write(const std::string& message); diff --git a/source/extensions/stat_sinks/hystrix/BUILD b/source/extensions/stat_sinks/hystrix/BUILD index 418c680513cca..df82734226759 100644 --- a/source/extensions/stat_sinks/hystrix/BUILD +++ b/source/extensions/stat_sinks/hystrix/BUILD @@ -37,5 +37,7 @@ envoy_cc_library( "//source/common/common:logger_lib", "//source/common/config:well_known_names", "//source/common/http:headers_lib", + "//source/common/stats:symbol_table_lib", + "//source/common/stats:utility_lib", ], ) diff --git a/source/extensions/stat_sinks/hystrix/hystrix.cc b/source/extensions/stat_sinks/hystrix/hystrix.cc index f3a860032ba33..ede5d6021c070 100644 --- a/source/extensions/stat_sinks/hystrix/hystrix.cc +++ b/source/extensions/stat_sinks/hystrix/hystrix.cc @@ -11,6 +11,7 @@ #include "common/common/logger.h" #include "common/config/well_known_names.h" #include "common/http/headers.h" +#include "common/stats/utility.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_split.h" @@ -153,7 +154,7 @@ void HystrixSink::addHystrixCommand(ClusterStatsCache& cluster_stats_cache, std::chrono::milliseconds rolling_window_ms, const QuantileLatencyMap& histogram, std::stringstream& ss) { - std::time_t currentTime = std::chrono::system_clock::to_time_t(server_.timeSystem().systemTime()); + std::time_t currentTime = std::chrono::system_clock::to_time_t(server_.timeSource().systemTime()); ss << "data: {"; addStringToStream("type", "HystrixCommand", ss, true); @@ -267,7 +268,10 @@ const std::string HystrixSink::printRollingWindows() { HystrixSink::HystrixSink(Server::Instance& server, const uint64_t num_buckets) : server_(server), current_index_(num_buckets > 0 ? num_buckets : DEFAULT_NUM_BUCKETS), - window_size_(current_index_ + 1) { + window_size_(current_index_ + 1), stat_name_pool_(server.stats().symbolTable()), + cluster_name_(stat_name_pool_.add(Config::TagNames::get().CLUSTER_NAME)), + cluster_upstream_rq_time_(stat_name_pool_.add("cluster.upstream_rq_time")) { + Server::Admin& admin = server_.admin(); ENVOY_LOG(debug, "adding hystrix_event_stream endpoint to enable connection to hystrix dashboard"); @@ -327,16 +331,12 @@ void HystrixSink::flush(Stats::Source& source) { // Save a map of the relevant histograms per cluster in a convenient format. std::unordered_map time_histograms; for (const Stats::ParentHistogramSharedPtr& histogram : source.cachedHistograms()) { - if (histogram->tagExtractedName() == "cluster.upstream_rq_time") { - // TODO(mrice32): add an Envoy utility function to look up and return a tag for a metric. - auto it = std::find_if(histogram->tags().begin(), histogram->tags().end(), - [](const Stats::Tag& tag) { - return (tag.name_ == Config::TagNames::get().CLUSTER_NAME); - }); - + if (histogram->tagExtractedStatName() == cluster_upstream_rq_time_) { + absl::optional value = Stats::Utility::findTag(*histogram, cluster_name_); // Make sure we found the cluster name tag - ASSERT(it != histogram->tags().end()); - auto it_bool_pair = time_histograms.emplace(std::make_pair(it->value_, QuantileLatencyMap())); + ASSERT(value); + std::string value_str = server_.stats().symbolTable().toString(*value); + auto it_bool_pair = time_histograms.emplace(std::make_pair(value_str, QuantileLatencyMap())); // Make sure histogram with this name was not already added ASSERT(it_bool_pair.second); QuantileLatencyMap& hist_map = it_bool_pair.first->second; diff --git a/source/extensions/stat_sinks/hystrix/hystrix.h b/source/extensions/stat_sinks/hystrix/hystrix.h index 54d26953abc8c..f3801cb612e78 100644 --- a/source/extensions/stat_sinks/hystrix/hystrix.h +++ b/source/extensions/stat_sinks/hystrix/hystrix.h @@ -10,6 +10,8 @@ #include "envoy/stats/sink.h" #include "envoy/stats/source.h" +#include "common/stats/symbol_table_impl.h" + namespace Envoy { namespace Extensions { namespace StatSinks { @@ -155,6 +157,11 @@ class HystrixSink : public Stats::Sink, public Logger::Loggable cluster_stats_cache_map_; + + // Saved StatNames for fast comparisons in loop. + Stats::StatNamePool stat_name_pool_; + Stats::StatName cluster_name_; + Stats::StatName cluster_upstream_rq_time_; }; typedef std::unique_ptr HystrixSinkPtr; diff --git a/source/extensions/stat_sinks/metrics_service/BUILD b/source/extensions/stat_sinks/metrics_service/BUILD index 1920bf462ec5c..b44e1b0e0564d 100644 --- a/source/extensions/stat_sinks/metrics_service/BUILD +++ b/source/extensions/stat_sinks/metrics_service/BUILD @@ -25,13 +25,25 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "metrics_proto_descriptors_lib", + srcs = ["grpc_metrics_proto_descriptors.cc"], + hdrs = ["grpc_metrics_proto_descriptors.h"], + deps = [ + "//source/common/protobuf", + "@envoy_api//envoy/service/metrics/v2:metrics_service_cc", + ], +) + envoy_cc_library( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], deps = [ "//include/envoy/registry", + "//source/common/common:assert_lib", "//source/extensions/stat_sinks:well_known_names", + "//source/extensions/stat_sinks/metrics_service:metrics_proto_descriptors_lib", "//source/extensions/stat_sinks/metrics_service:metrics_service_grpc_lib", "//source/server:configuration_lib", "@envoy_api//envoy/config/metrics/v2:stats_cc", diff --git a/source/extensions/stat_sinks/metrics_service/config.cc b/source/extensions/stat_sinks/metrics_service/config.cc index 03e36ed207d45..8dc265550d4b9 100644 --- a/source/extensions/stat_sinks/metrics_service/config.cc +++ b/source/extensions/stat_sinks/metrics_service/config.cc @@ -4,9 +4,11 @@ #include "envoy/config/metrics/v2/metrics_service.pb.validate.h" #include "envoy/registry/registry.h" +#include "common/common/assert.h" #include "common/grpc/async_client_impl.h" #include "common/network/resolver_impl.h" +#include "extensions/stat_sinks/metrics_service/grpc_metrics_proto_descriptors.h" #include "extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.h" #include "extensions/stat_sinks/well_known_names.h" @@ -29,7 +31,7 @@ Stats::SinkPtr MetricsServiceSinkFactory::createStatsSink(const Protobuf::Messag grpc_service, server.stats(), false), server.localInfo()); - return std::make_unique(grpc_metrics_streamer, server.timeSystem()); + return std::make_unique(grpc_metrics_streamer, server.timeSource()); } ProtobufTypes::MessagePtr MetricsServiceSinkFactory::createEmptyConfigProto() { diff --git a/source/extensions/stat_sinks/metrics_service/grpc_metrics_proto_descriptors.cc b/source/extensions/stat_sinks/metrics_service/grpc_metrics_proto_descriptors.cc new file mode 100644 index 0000000000000..b546804a7bd13 --- /dev/null +++ b/source/extensions/stat_sinks/metrics_service/grpc_metrics_proto_descriptors.cc @@ -0,0 +1,21 @@ +#include "extensions/stat_sinks/metrics_service/grpc_metrics_proto_descriptors.h" + +#include "envoy/service/metrics/v2/metrics_service.pb.h" + +#include "common/common/fmt.h" +#include "common/protobuf/protobuf.h" + +namespace Envoy { +namespace Extensions { +namespace StatSinks { +namespace MetricsService { + +bool validateProtoDescriptors() { + const auto method = "envoy.service.metrics.v2.MetricsService.StreamMetrics"; + + return Protobuf::DescriptorPool::generated_pool()->FindMethodByName(method) != nullptr; +}; +} // namespace MetricsService +} // namespace StatSinks +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/stat_sinks/metrics_service/grpc_metrics_proto_descriptors.h b/source/extensions/stat_sinks/metrics_service/grpc_metrics_proto_descriptors.h new file mode 100644 index 0000000000000..d86371d2d6086 --- /dev/null +++ b/source/extensions/stat_sinks/metrics_service/grpc_metrics_proto_descriptors.h @@ -0,0 +1,14 @@ +#pragma once + +namespace Envoy { +namespace Extensions { +namespace StatSinks { +namespace MetricsService { + +// This function validates that the method descriptors for gRPC services and type descriptors that +// are referenced in Any messages are available in the descriptor pool. +bool validateProtoDescriptors(); +} // namespace MetricsService +} // namespace StatSinks +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.cc b/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.cc index 9e589cd03676b..a2867f358b133 100644 --- a/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.cc +++ b/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.cc @@ -34,8 +34,8 @@ void GrpcMetricsStreamerImpl::send(envoy::service::metrics::v2::StreamMetricsMes } MetricsServiceSink::MetricsServiceSink(const GrpcMetricsStreamerSharedPtr& grpc_metrics_streamer, - Event::TimeSystem& time_system) - : grpc_metrics_streamer_(grpc_metrics_streamer), time_system_(time_system) {} + TimeSource& time_source) + : grpc_metrics_streamer_(grpc_metrics_streamer), time_source_(time_source) {} void MetricsServiceSink::flushCounter(const Stats::Counter& counter) { io::prometheus::client::MetricFamily* metrics_family = message_.add_envoy_metrics(); @@ -43,7 +43,7 @@ void MetricsServiceSink::flushCounter(const Stats::Counter& counter) { metrics_family->set_name(counter.name()); auto* metric = metrics_family->add_metric(); metric->set_timestamp_ms(std::chrono::duration_cast( - time_system_.systemTime().time_since_epoch()) + time_source_.systemTime().time_since_epoch()) .count()); auto* counter_metric = metric->mutable_counter(); counter_metric->set_value(counter.value()); @@ -55,7 +55,7 @@ void MetricsServiceSink::flushGauge(const Stats::Gauge& gauge) { metrics_family->set_name(gauge.name()); auto* metric = metrics_family->add_metric(); metric->set_timestamp_ms(std::chrono::duration_cast( - time_system_.systemTime().time_since_epoch()) + time_source_.systemTime().time_since_epoch()) .count()); auto* gauage_metric = metric->mutable_gauge(); gauage_metric->set_value(gauge.value()); @@ -66,7 +66,7 @@ void MetricsServiceSink::flushHistogram(const Stats::ParentHistogram& histogram) metrics_family->set_name(histogram.name()); auto* metric = metrics_family->add_metric(); metric->set_timestamp_ms(std::chrono::duration_cast( - time_system_.systemTime().time_since_epoch()) + time_source_.systemTime().time_since_epoch()) .count()); auto* summary_metric = metric->mutable_summary(); const Stats::HistogramStatistics& hist_stats = histogram.intervalStatistics(); diff --git a/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.h b/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.h index 5ccf79fcdab49..8881a0bd1ff76 100644 --- a/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.h +++ b/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.h @@ -72,7 +72,7 @@ class MetricsServiceSink : public Stats::Sink { public: // MetricsService::Sink MetricsServiceSink(const GrpcMetricsStreamerSharedPtr& grpc_metrics_streamer, - Event::TimeSystem& time_system); + TimeSource& time_system); void flush(Stats::Source& source) override; void onHistogramComplete(const Stats::Histogram&, uint64_t) override {} @@ -83,7 +83,7 @@ class MetricsServiceSink : public Stats::Sink { private: GrpcMetricsStreamerSharedPtr grpc_metrics_streamer_; envoy::service::metrics::v2::StreamMetricsMessage message_; - Event::TimeSystem& time_system_; + TimeSource& time_source_; }; } // namespace MetricsService diff --git a/source/extensions/tracers/common/ot/opentracing_driver_impl.cc b/source/extensions/tracers/common/ot/opentracing_driver_impl.cc index 73fe3a2023410..4b5f5f5b3627d 100644 --- a/source/extensions/tracers/common/ot/opentracing_driver_impl.cc +++ b/source/extensions/tracers/common/ot/opentracing_driver_impl.cc @@ -7,6 +7,7 @@ #include "common/common/assert.h" #include "common/common/base64.h" #include "common/common/utility.h" +#include "common/tracing/http_tracer_impl.h" namespace Envoy { namespace Extensions { @@ -50,7 +51,8 @@ class OpenTracingHTTPHeadersReader : public opentracing::HTTPHeadersReader { request_headers_.lookup(Http::LowerCaseString{key}, &entry); switch (lookup_result) { case Http::HeaderMap::Lookup::Found: - return opentracing::string_view{entry->value().c_str(), entry->value().size()}; + return opentracing::string_view{entry->value().getStringView().data(), + entry->value().getStringView().length()}; case Http::HeaderMap::Lookup::NotFound: return opentracing::make_unexpected(opentracing::key_not_found_error); case Http::HeaderMap::Lookup::NotSupported: @@ -70,8 +72,10 @@ class OpenTracingHTTPHeadersReader : public opentracing::HTTPHeadersReader { static Http::HeaderMap::Iterate headerMapCallback(const Http::HeaderEntry& header, void* context) { OpenTracingCb* callback = static_cast(context); - opentracing::string_view key{header.key().c_str(), header.key().size()}; - opentracing::string_view value{header.value().c_str(), header.value().size()}; + opentracing::string_view key{header.key().getStringView().data(), + header.key().getStringView().length()}; + opentracing::string_view value{header.value().getStringView().data(), + header.value().getStringView().length()}; if ((*callback)(key, value)) { return Http::HeaderMap::Iterate::Continue; } else { @@ -85,14 +89,20 @@ OpenTracingSpan::OpenTracingSpan(OpenTracingDriver& driver, std::unique_ptr&& span) : driver_{driver}, span_(std::move(span)) {} -void OpenTracingSpan::finishSpan() { span_->Finish(); } +void OpenTracingSpan::finishSpan() { span_->FinishWithOptions(finish_options_); } -void OpenTracingSpan::setOperation(const std::string& operation) { - span_->SetOperationName(operation); +void OpenTracingSpan::setOperation(absl::string_view operation) { + span_->SetOperationName({operation.data(), operation.length()}); } -void OpenTracingSpan::setTag(const std::string& name, const std::string& value) { - span_->SetTag(name, value); +void OpenTracingSpan::setTag(absl::string_view name, absl::string_view value) { + span_->SetTag({name.data(), name.length()}, + opentracing::v2::string_view{value.data(), value.length()}); +} + +void OpenTracingSpan::log(SystemTime timestamp, const std::string& event) { + opentracing::LogRecord record{timestamp, {{Tracing::Logs::get().EventKey, event}}}; + finish_options_.log_records.emplace_back(std::move(record)); } void OpenTracingSpan::injectContext(Http::HeaderMap& request_headers) { @@ -148,7 +158,8 @@ Tracing::SpanPtr OpenTracingDriver::startSpan(const Tracing::Config& config, std::unique_ptr parent_span_ctx; if (propagation_mode == PropagationMode::SingleHeader && request_headers.OtSpanContext()) { opentracing::expected> parent_span_ctx_maybe; - std::string parent_context = Base64::decode(request_headers.OtSpanContext()->value().c_str()); + std::string parent_context = + Base64::decode(std::string(request_headers.OtSpanContext()->value().getStringView())); if (!parent_context.empty()) { InputConstMemoryStream istream{parent_context.data(), parent_context.size()}; diff --git a/source/extensions/tracers/common/ot/opentracing_driver_impl.h b/source/extensions/tracers/common/ot/opentracing_driver_impl.h index 54c5e57528e27..323afabe56a49 100644 --- a/source/extensions/tracers/common/ot/opentracing_driver_impl.h +++ b/source/extensions/tracers/common/ot/opentracing_driver_impl.h @@ -33,8 +33,9 @@ class OpenTracingSpan : public Tracing::Span, Logger::Loggable span_; }; diff --git a/source/extensions/tracers/lightstep/lightstep_tracer_impl.cc b/source/extensions/tracers/lightstep/lightstep_tracer_impl.cc index d1aa06dd93ee8..b604f83b4dda0 100644 --- a/source/extensions/tracers/lightstep/lightstep_tracer_impl.cc +++ b/source/extensions/tracers/lightstep/lightstep_tracer_impl.cc @@ -64,7 +64,7 @@ void LightStepDriver::LightStepTransporter::Send(const Protobuf::Message& reques Http::MessagePtr message = Grpc::Common::prepareHeaders( driver_.cluster()->name(), lightstep::CollectorServiceFullName(), lightstep::CollectorMethodName(), absl::optional(timeout)); - message->body() = Grpc::Common::serializeBody(request); + message->body() = Grpc::Common::serializeToGrpcFrame(request); active_request_ = driver_.clusterManager() diff --git a/source/extensions/tracers/zipkin/config.cc b/source/extensions/tracers/zipkin/config.cc index 3b0588e5907cf..47ec3d2ff2992 100644 --- a/source/extensions/tracers/zipkin/config.cc +++ b/source/extensions/tracers/zipkin/config.cc @@ -19,7 +19,7 @@ Tracing::HttpTracerPtr ZipkinTracerFactory::createHttpTracerTyped( const envoy::config::trace::v2::ZipkinConfig& proto_config, Server::Instance& server) { Tracing::DriverPtr zipkin_driver = std::make_unique( proto_config, server.clusterManager(), server.stats(), server.threadLocal(), server.runtime(), - server.localInfo(), server.random(), server.timeSystem()); + server.localInfo(), server.random(), server.timeSource()); return std::make_unique(std::move(zipkin_driver), server.localInfo()); } diff --git a/source/extensions/tracers/zipkin/span_context_extractor.cc b/source/extensions/tracers/zipkin/span_context_extractor.cc index 8baab6e47e2de..dc9413760367c 100644 --- a/source/extensions/tracers/zipkin/span_context_extractor.cc +++ b/source/extensions/tracers/zipkin/span_context_extractor.cc @@ -86,28 +86,28 @@ std::pair SpanContextExtractor::extractSpanContext(bool is_sa if (b3_span_id_entry && b3_trace_id_entry) { // Extract trace id - which can either be 128 or 64 bit. For 128 bit, // it needs to be divided into two 64 bit numbers (high and low). - const std::string tid = b3_trace_id_entry->value().c_str(); + const std::string tid(b3_trace_id_entry->value().getStringView()); if (b3_trace_id_entry->value().size() == 32) { const std::string high_tid = tid.substr(0, 16); const std::string low_tid = tid.substr(16, 16); - if (!StringUtil::atoul(high_tid.c_str(), trace_id_high, 16) || - !StringUtil::atoul(low_tid.c_str(), trace_id, 16)) { + if (!StringUtil::atoull(high_tid.c_str(), trace_id_high, 16) || + !StringUtil::atoull(low_tid.c_str(), trace_id, 16)) { throw ExtractorException( fmt::format("Invalid traceid_high {} or tracid {}", high_tid.c_str(), low_tid.c_str())); } - } else if (!StringUtil::atoul(tid.c_str(), trace_id, 16)) { + } else if (!StringUtil::atoull(tid.c_str(), trace_id, 16)) { throw ExtractorException(fmt::format("Invalid trace_id {}", tid.c_str())); } - const std::string spid = b3_span_id_entry->value().c_str(); - if (!StringUtil::atoul(spid.c_str(), span_id, 16)) { + const std::string spid(b3_span_id_entry->value().getStringView()); + if (!StringUtil::atoull(spid.c_str(), span_id, 16)) { throw ExtractorException(fmt::format("Invalid span id {}", spid.c_str())); } auto b3_parent_id_entry = request_headers_.get(ZipkinCoreConstants::get().X_B3_PARENT_SPAN_ID); - if (b3_parent_id_entry) { - const std::string pspid = b3_parent_id_entry->value().c_str(); - if (!StringUtil::atoul(pspid.c_str(), parent_id, 16)) { + if (b3_parent_id_entry && !b3_parent_id_entry->value().empty()) { + const std::string pspid(b3_parent_id_entry->value().getStringView()); + if (!StringUtil::atoull(pspid.c_str(), parent_id, 16)) { throw ExtractorException(fmt::format("Invalid parent span id {}", pspid.c_str())); } } @@ -123,7 +123,7 @@ std::pair SpanContextExtractor::extractSpanContextFromB3SingleFormat(bool is_sampled) { auto b3_head_entry = request_headers_.get(ZipkinCoreConstants::get().B3); ASSERT(b3_head_entry); - const std::string b3 = b3_head_entry->value().c_str(); + const std::string b3(b3_head_entry->value().getStringView()); if (!b3.length()) { throw ExtractorException("Invalid input: empty"); } @@ -150,18 +150,18 @@ SpanContextExtractor::extractSpanContextFromB3SingleFormat(bool is_sampled) { const std::string trace_id_str = b3.substr(pos, 16); if (b3[pos + 32] == '-') { - if (!StringUtil::atoul(trace_id_str.c_str(), trace_id_high, 16)) { + if (!StringUtil::atoull(trace_id_str.c_str(), trace_id_high, 16)) { throw ExtractorException( fmt::format("Invalid input: invalid trace id high {}", trace_id_str.c_str())); } pos += 16; const std::string trace_id_low_str = b3.substr(pos, 16); - if (!StringUtil::atoul(trace_id_low_str.c_str(), trace_id, 16)) { + if (!StringUtil::atoull(trace_id_low_str.c_str(), trace_id, 16)) { throw ExtractorException( fmt::format("Invalid input: invalid trace id {}", trace_id_low_str.c_str())); } } else { - if (!StringUtil::atoul(trace_id_str.c_str(), trace_id, 16)) { + if (!StringUtil::atoull(trace_id_str.c_str(), trace_id, 16)) { throw ExtractorException( fmt::format("Invalid input: invalid trace id {}", trace_id_str.c_str())); } @@ -173,7 +173,7 @@ SpanContextExtractor::extractSpanContextFromB3SingleFormat(bool is_sampled) { } const std::string span_id_str = b3.substr(pos, 16); - if (!StringUtil::atoul(span_id_str.c_str(), span_id, 16)) { + if (!StringUtil::atoull(span_id_str.c_str(), span_id, 16)) { throw ExtractorException(fmt::format("Invalid input: invalid span id {}", span_id_str.c_str())); } pos += 16; // spanId ended @@ -212,7 +212,7 @@ SpanContextExtractor::extractSpanContextFromB3SingleFormat(bool is_sampled) { pos++; const std::string parent_id_str = b3.substr(pos, b3.length() - pos); - if (!StringUtil::atoul(parent_id_str.c_str(), parent_id, 16)) { + if (!StringUtil::atoull(parent_id_str.c_str(), parent_id, 16)) { throw ExtractorException( fmt::format("Invalid input: invalid parent id {}", parent_id_str.c_str())); } @@ -226,4 +226,4 @@ SpanContextExtractor::extractSpanContextFromB3SingleFormat(bool is_sampled) { } // namespace Zipkin } // namespace Tracers } // namespace Extensions -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/source/extensions/tracers/zipkin/util.cc b/source/extensions/tracers/zipkin/util.cc index 447621f0a59de..d18eff673b042 100644 --- a/source/extensions/tracers/zipkin/util.cc +++ b/source/extensions/tracers/zipkin/util.cc @@ -35,7 +35,7 @@ void Util::addArrayToJson(std::string& target, const std::vector& j const std::string& field_name) { std::string stringified_json_array = "["; - if (json_array.size() > 0) { + if (!json_array.empty()) { stringified_json_array += json_array[0]; for (auto it = json_array.begin() + 1; it != json_array.end(); it++) { stringified_json_array += ","; diff --git a/source/extensions/tracers/zipkin/zipkin_core_types.cc b/source/extensions/tracers/zipkin/zipkin_core_types.cc index 03a8a18d26f74..e99e873120777 100644 --- a/source/extensions/tracers/zipkin/zipkin_core_types.cc +++ b/source/extensions/tracers/zipkin/zipkin_core_types.cc @@ -252,12 +252,20 @@ void Span::finish() { } } -void Span::setTag(const std::string& name, const std::string& value) { - if (name.size() > 0 && value.size() > 0) { +void Span::setTag(absl::string_view name, absl::string_view value) { + if (!name.empty() && !value.empty()) { addBinaryAnnotation(BinaryAnnotation(name, value)); } } +void Span::log(SystemTime timestamp, const std::string& event) { + Annotation annotation; + annotation.setTimestamp( + std::chrono::duration_cast(timestamp.time_since_epoch()).count()); + annotation.setValue(event); + addAnnotation(std::move(annotation)); +} + } // namespace Zipkin } // namespace Tracers } // namespace Extensions diff --git a/source/extensions/tracers/zipkin/zipkin_core_types.h b/source/extensions/tracers/zipkin/zipkin_core_types.h index b5db77fbe95be..9060432c25a54 100644 --- a/source/extensions/tracers/zipkin/zipkin_core_types.h +++ b/source/extensions/tracers/zipkin/zipkin_core_types.h @@ -11,6 +11,7 @@ #include "extensions/tracers/zipkin/tracer_interface.h" #include "extensions/tracers/zipkin/util.h" +#include "absl/strings/string_view.h" #include "absl/types/optional.h" namespace Envoy { @@ -224,7 +225,7 @@ class BinaryAnnotation : public ZipkinBase { * @param key The key name of the annotation. * @param value The value associated with the key. */ - BinaryAnnotation(const std::string& key, const std::string& value) + BinaryAnnotation(absl::string_view key, absl::string_view value) : key_(key), value_(value), annotation_type_(STRING) {} /** @@ -357,7 +358,7 @@ class Span : public ZipkinBase { /** * Adds an annotation to the span (move semantics). */ - void addAnnotation(const Annotation&& ann) { annotations_.push_back(ann); } + void addAnnotation(Annotation&& ann) { annotations_.emplace_back(std::move(ann)); } /** * Sets the span's binary annotations all at once. @@ -372,7 +373,9 @@ class Span : public ZipkinBase { /** * Adds a binary annotation to the span (move semantics). */ - void addBinaryAnnotation(const BinaryAnnotation&& bann) { binary_annotations_.push_back(bann); } + void addBinaryAnnotation(BinaryAnnotation&& bann) { + binary_annotations_.emplace_back(std::move(bann)); + } /** * Sets the span's debug attribute. @@ -545,7 +548,15 @@ class Span : public ZipkinBase { * @param name The binary annotation's key. * @param value The binary annotation's value. */ - void setTag(const std::string& name, const std::string& value); + void setTag(absl::string_view name, absl::string_view value); + + /** + * Adds an annotation to the span + * + * @param timestamp The annotation's timestamp. + * @param event The annotation's value. + */ + void log(SystemTime timestamp, const std::string& event); private: static const std::string EMPTY_HEX_STRING_; diff --git a/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc b/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc index 7ebf68acb6c71..fbe84ff78662b 100644 --- a/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc +++ b/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc @@ -22,12 +22,18 @@ ZipkinSpan::ZipkinSpan(Zipkin::Span& span, Zipkin::Tracer& tracer) : span_(span) void ZipkinSpan::finishSpan() { span_.finish(); } -void ZipkinSpan::setOperation(const std::string& operation) { span_.setName(operation); } +void ZipkinSpan::setOperation(absl::string_view operation) { + span_.setName(std::string(operation)); +} -void ZipkinSpan::setTag(const std::string& name, const std::string& value) { +void ZipkinSpan::setTag(absl::string_view name, absl::string_view value) { span_.setTag(name, value); } +void ZipkinSpan::log(SystemTime timestamp, const std::string& event) { + span_.log(timestamp, event); +} + void ZipkinSpan::injectContext(Http::HeaderMap& request_headers) { // Set the trace-id and span-id headers properly, based on the newly-created span structure. request_headers.setReferenceKey(ZipkinCoreConstants::get().X_B3_TRACE_ID, @@ -71,7 +77,7 @@ Driver::Driver(const envoy::config::trace::v2::ZipkinConfig& zipkin_config, cluster_ = cm_.get(zipkin_config.collector_cluster())->info(); std::string collector_endpoint = ZipkinCoreConstants::get().DEFAULT_COLLECTOR_ENDPOINT; - if (zipkin_config.collector_endpoint().size() > 0) { + if (!zipkin_config.collector_endpoint().empty()) { collector_endpoint = zipkin_config.collector_endpoint(); } @@ -101,12 +107,13 @@ Tracing::SpanPtr Driver::startSpan(const Tracing::Config& config, Http::HeaderMa auto ret_span_context = extractor.extractSpanContext(sampled); if (!ret_span_context.second) { // Create a root Zipkin span. No context was found in the headers. - new_zipkin_span = - tracer.startSpan(config, request_headers.Host()->value().c_str(), start_time); + new_zipkin_span = tracer.startSpan( + config, std::string(request_headers.Host()->value().getStringView()), start_time); new_zipkin_span->setSampled(sampled); } else { - new_zipkin_span = tracer.startSpan(config, request_headers.Host()->value().c_str(), - start_time, ret_span_context.first); + new_zipkin_span = + tracer.startSpan(config, std::string(request_headers.Host()->value().getStringView()), + start_time, ret_span_context.first); } } catch (const ExtractorException& e) { @@ -114,7 +121,7 @@ Tracing::SpanPtr Driver::startSpan(const Tracing::Config& config, Http::HeaderMa } ZipkinSpanPtr active_span(new ZipkinSpan(*new_zipkin_span, tracer)); - return std::move(active_span); + return active_span; } ReporterImpl::ReporterImpl(Driver& driver, Event::Dispatcher& dispatcher, diff --git a/source/extensions/tracers/zipkin/zipkin_tracer_impl.h b/source/extensions/tracers/zipkin/zipkin_tracer_impl.h index aef3d537ed38f..daa0bb70a1e9b 100644 --- a/source/extensions/tracers/zipkin/zipkin_tracer_impl.h +++ b/source/extensions/tracers/zipkin/zipkin_tracer_impl.h @@ -50,7 +50,7 @@ class ZipkinSpan : public Tracing::Span { * This method sets the operation name on the span. * @param operation the operation name */ - void setOperation(const std::string& operation) override; + void setOperation(absl::string_view operation) override; /** * This function adds a Zipkin "string" binary annotation to this span. @@ -60,7 +60,9 @@ class ZipkinSpan : public Tracing::Span { * Note that Tracing::HttpTracerUtility::finalizeSpan() makes several calls to this function, * associating several key-value pairs with this span. */ - void setTag(const std::string& name, const std::string& value) override; + void setTag(absl::string_view name, absl::string_view value) override; + + void log(SystemTime timestamp, const std::string& event) override; void injectContext(Http::HeaderMap& request_headers) override; Tracing::SpanPtr spawnChild(const Tracing::Config&, const std::string& name, diff --git a/source/extensions/transport_sockets/alts/config.cc b/source/extensions/transport_sockets/alts/config.cc index 7a3a45336882d..e0c728919332a 100644 --- a/source/extensions/transport_sockets/alts/config.cc +++ b/source/extensions/transport_sockets/alts/config.cc @@ -59,8 +59,24 @@ createHandshakeValidator(const envoy::config::transport_socket::alts::v2alpha::A return validator; } -Network::TransportSocketFactoryPtr -createTransportSocketFactoryHelper(const Protobuf::Message& message, bool is_upstream) { +// Manage ALTS singleton state via SingletonManager +class AltsSharedState : public Singleton::Instance { +public: + AltsSharedState() { grpc_alts_shared_resource_dedicated_init(); } + + ~AltsSharedState() { grpc_alts_shared_resource_dedicated_shutdown(); } +}; + +SINGLETON_MANAGER_REGISTRATION(alts_shared_state); + +Network::TransportSocketFactoryPtr createTransportSocketFactoryHelper( + const Protobuf::Message& message, bool is_upstream, + Server::Configuration::TransportSocketFactoryContext& factory_ctxt) { + // A reference to this is held in the factory closure to keep the singleton + // instance alive. + auto alts_shared_state = factory_ctxt.singletonManager().getTyped( + SINGLETON_MANAGER_REGISTERED_NAME(alts_shared_state), + [] { return std::make_shared(); }); auto config = MessageUtil::downcastAndValidate( message); @@ -68,10 +84,10 @@ createTransportSocketFactoryHelper(const Protobuf::Message& message, bool is_ups const std::string handshaker_service = config.handshaker_service(); HandshakerFactory factory = - [handshaker_service, - is_upstream](Event::Dispatcher& dispatcher, - const Network::Address::InstanceConstSharedPtr& local_address, - const Network::Address::InstanceConstSharedPtr&) -> TsiHandshakerPtr { + [handshaker_service, is_upstream, + alts_shared_state](Event::Dispatcher& dispatcher, + const Network::Address::InstanceConstSharedPtr& local_address, + const Network::Address::InstanceConstSharedPtr&) -> TsiHandshakerPtr { ASSERT(local_address != nullptr); GrpcAltsCredentialsOptionsPtr options; @@ -84,8 +100,9 @@ createTransportSocketFactoryHelper(const Protobuf::Message& message, bool is_ups tsi_handshaker* handshaker = nullptr; // Specifying target name as empty since TSI won't take care of validating peer identity // in this use case. The validation will be performed by TsiSocket with the validator. - tsi_result status = alts_tsi_handshaker_create( - options.get(), target_name, handshaker_service.c_str(), is_upstream, &handshaker); + tsi_result status = + alts_tsi_handshaker_create(options.get(), target_name, handshaker_service.c_str(), + is_upstream, nullptr /* interested_parties */, &handshaker); CHandshakerPtr handshaker_ptr{handshaker}; if (status != TSI_OK) { @@ -108,15 +125,17 @@ ProtobufTypes::MessagePtr AltsTransportSocketConfigFactory::createEmptyConfigPro Network::TransportSocketFactoryPtr UpstreamAltsTransportSocketConfigFactory::createTransportSocketFactory( - const Protobuf::Message& message, Server::Configuration::TransportSocketFactoryContext&) { - return createTransportSocketFactoryHelper(message, /* is_upstream */ true); + const Protobuf::Message& message, + Server::Configuration::TransportSocketFactoryContext& factory_ctxt) { + return createTransportSocketFactoryHelper(message, /* is_upstream */ true, factory_ctxt); } Network::TransportSocketFactoryPtr DownstreamAltsTransportSocketConfigFactory::createTransportSocketFactory( - const Protobuf::Message& message, Server::Configuration::TransportSocketFactoryContext&, + const Protobuf::Message& message, + Server::Configuration::TransportSocketFactoryContext& factory_ctxt, const std::vector&) { - return createTransportSocketFactoryHelper(message, /* is_upstream */ false); + return createTransportSocketFactoryHelper(message, /* is_upstream */ false, factory_ctxt); } REGISTER_FACTORY(UpstreamAltsTransportSocketConfigFactory, diff --git a/source/extensions/transport_sockets/alts/grpc_tsi.h b/source/extensions/transport_sockets/alts/grpc_tsi.h index 417c59391a5c7..ac7265de6f3cd 100644 --- a/source/extensions/transport_sockets/alts/grpc_tsi.h +++ b/source/extensions/transport_sockets/alts/grpc_tsi.h @@ -9,6 +9,7 @@ #pragma GCC diagnostic ignored "-Wold-style-cast" #include "grpc/grpc_security.h" +#include "src/core/tsi/alts/handshaker/alts_shared_resource.h" #include "src/core/tsi/alts/handshaker/alts_tsi_handshaker.h" #include "src/core/tsi/transport_security_interface.h" diff --git a/source/extensions/transport_sockets/alts/tsi_socket.cc b/source/extensions/transport_sockets/alts/tsi_socket.cc index 65d655fbc578f..69fce6a6e6f37 100644 --- a/source/extensions/transport_sockets/alts/tsi_socket.cc +++ b/source/extensions/transport_sockets/alts/tsi_socket.cc @@ -34,6 +34,11 @@ std::string TsiSocket::protocol() const { return EMPTY_STRING; } +absl::string_view TsiSocket::failureReason() const { + // TODO(htuch): Implement error reason for TSI. + return EMPTY_STRING; +} + Network::PostIoAction TsiSocket::doHandshake() { ASSERT(!handshake_complete_); ENVOY_CONN_LOG(debug, "TSI: doHandshake", callbacks_->connection()); diff --git a/source/extensions/transport_sockets/alts/tsi_socket.h b/source/extensions/transport_sockets/alts/tsi_socket.h index 8e3ee5e954438..b304b55807251 100644 --- a/source/extensions/transport_sockets/alts/tsi_socket.h +++ b/source/extensions/transport_sockets/alts/tsi_socket.h @@ -57,8 +57,9 @@ class TsiSocket : public Network::TransportSocket, // Network::TransportSocket void setTransportSocketCallbacks(Envoy::Network::TransportSocketCallbacks& callbacks) override; std::string protocol() const override; + absl::string_view failureReason() const override; bool canFlushClose() override { return handshake_complete_; } - const Envoy::Ssl::Connection* ssl() const override { return nullptr; } + const Envoy::Ssl::ConnectionInfo* ssl() const override { return nullptr; } Network::IoResult doWrite(Buffer::Instance& buffer, bool end_stream) override; void closeSocket(Network::ConnectionEvent event) override; Network::IoResult doRead(Buffer::Instance& buffer) override; diff --git a/source/extensions/transport_sockets/tap/config.cc b/source/extensions/transport_sockets/tap/config.cc index f0ea486acac28..c6d67b6b07d16 100644 --- a/source/extensions/transport_sockets/tap/config.cc +++ b/source/extensions/transport_sockets/tap/config.cc @@ -17,18 +17,18 @@ namespace Tap { class SocketTapConfigFactoryImpl : public Extensions::Common::Tap::TapConfigFactory { public: - SocketTapConfigFactoryImpl(Event::TimeSystem& time_system) : time_system_(time_system) {} + SocketTapConfigFactoryImpl(TimeSource& time_source) : time_source_(time_source) {} // TapConfigFactory Extensions::Common::Tap::TapConfigSharedPtr createConfigFromProto(envoy::service::tap::v2alpha::TapConfig&& proto_config, Extensions::Common::Tap::Sink* admin_streamer) override { return std::make_shared(std::move(proto_config), admin_streamer, - time_system_); + time_source_); } private: - Event::TimeSystem& time_system_; + TimeSource& time_source_; }; Network::TransportSocketFactoryPtr UpstreamTapSocketConfigFactory::createTransportSocketFactory( @@ -45,7 +45,7 @@ Network::TransportSocketFactoryPtr UpstreamTapSocketConfigFactory::createTranspo auto inner_transport_factory = inner_config_factory.createTransportSocketFactory(*inner_factory_config, context); return std::make_unique( - outer_config, std::make_unique(context.dispatcher().timeSystem()), + outer_config, std::make_unique(context.dispatcher().timeSource()), context.admin(), context.singletonManager(), context.threadLocal(), context.dispatcher(), std::move(inner_transport_factory)); } @@ -64,7 +64,7 @@ Network::TransportSocketFactoryPtr DownstreamTapSocketConfigFactory::createTrans auto inner_transport_factory = inner_config_factory.createTransportSocketFactory( *inner_factory_config, context, server_names); return std::make_unique( - outer_config, std::make_unique(context.dispatcher().timeSystem()), + outer_config, std::make_unique(context.dispatcher().timeSource()), context.admin(), context.singletonManager(), context.threadLocal(), context.dispatcher(), std::move(inner_transport_factory)); } diff --git a/source/extensions/transport_sockets/tap/tap.cc b/source/extensions/transport_sockets/tap/tap.cc index 2e830b4a06137..fc34b5ee2c6df 100644 --- a/source/extensions/transport_sockets/tap/tap.cc +++ b/source/extensions/transport_sockets/tap/tap.cc @@ -17,6 +17,7 @@ void TapSocket::setTransportSocketCallbacks(Network::TransportSocketCallbacks& c } std::string TapSocket::protocol() const { return transport_socket_->protocol(); } +absl::string_view TapSocket::failureReason() const { return transport_socket_->failureReason(); } bool TapSocket::canFlushClose() { return transport_socket_->canFlushClose(); } @@ -49,7 +50,7 @@ Network::IoResult TapSocket::doWrite(Buffer::Instance& buffer, bool end_stream) void TapSocket::onConnected() { transport_socket_->onConnected(); } -const Ssl::Connection* TapSocket::ssl() const { return transport_socket_->ssl(); } +const Ssl::ConnectionInfo* TapSocket::ssl() const { return transport_socket_->ssl(); } TapSocketFactory::TapSocketFactory( const envoy::config::transport_socket::tap::v2alpha::Tap& proto_config, diff --git a/source/extensions/transport_sockets/tap/tap.h b/source/extensions/transport_sockets/tap/tap.h index da50dad76f9ce..eb5f76959d147 100644 --- a/source/extensions/transport_sockets/tap/tap.h +++ b/source/extensions/transport_sockets/tap/tap.h @@ -20,12 +20,13 @@ class TapSocket : public Network::TransportSocket { // Network::TransportSocket void setTransportSocketCallbacks(Network::TransportSocketCallbacks& callbacks) override; std::string protocol() const override; + absl::string_view failureReason() const override; bool canFlushClose() override; void closeSocket(Network::ConnectionEvent event) override; Network::IoResult doRead(Buffer::Instance& buffer) override; Network::IoResult doWrite(Buffer::Instance& buffer, bool end_stream) override; void onConnected() override; - const Ssl::Connection* ssl() const override; + const Ssl::ConnectionInfo* ssl() const override; private: SocketTapConfigSharedPtr config_; diff --git a/source/extensions/transport_sockets/tap/tap_config.h b/source/extensions/transport_sockets/tap/tap_config.h index 1826c95346728..8cbd431db0fe9 100644 --- a/source/extensions/transport_sockets/tap/tap_config.h +++ b/source/extensions/transport_sockets/tap/tap_config.h @@ -45,13 +45,18 @@ using PerSocketTapperPtr = std::unique_ptr; /** * Abstract socket tap configuration. */ -class SocketTapConfig : public Extensions::Common::Tap::TapConfig { +class SocketTapConfig : public virtual Extensions::Common::Tap::TapConfig { public: /** * @return a new per-socket tapper which is used to handle tapping of a discrete socket. * @param connection supplies the underlying network connection. */ virtual PerSocketTapperPtr createPerSocketTapper(const Network::Connection& connection) PURE; + + /** + * @return time source to use for stamping events. + */ + virtual TimeSource& timeSource() const PURE; }; using SocketTapConfigSharedPtr = std::shared_ptr; diff --git a/source/extensions/transport_sockets/tap/tap_config_impl.cc b/source/extensions/transport_sockets/tap/tap_config_impl.cc index 0c1d656b10bd6..92d4e502852e6 100644 --- a/source/extensions/transport_sockets/tap/tap_config_impl.cc +++ b/source/extensions/transport_sockets/tap/tap_config_impl.cc @@ -1,5 +1,6 @@ #include "extensions/transport_sockets/tap/tap_config_impl.h" +#include "common/common/assert.h" #include "common/network/utility.h" namespace Envoy { @@ -7,58 +8,121 @@ namespace Extensions { namespace TransportSockets { namespace Tap { -PerSocketTapperImpl::PerSocketTapperImpl(SocketTapConfigImplSharedPtr config, +namespace TapCommon = Extensions::Common::Tap; + +PerSocketTapperImpl::PerSocketTapperImpl(SocketTapConfigSharedPtr config, const Network::Connection& connection) - : config_(std::move(config)), connection_(connection), statuses_(config_->numMatchers()), - trace_(std::make_shared()) { + : config_(std::move(config)), + sink_handle_(config_->createPerTapSinkHandleManager(connection.id())), + connection_(connection), statuses_(config_->createMatchStatusVector()) { config_->rootMatcher().onNewStream(statuses_); + if (config_->streaming() && config_->rootMatcher().matchStatus(statuses_).matches_) { + TapCommon::TraceWrapperPtr trace = makeTraceSegment(); + fillConnectionInfo(*trace->mutable_socket_streamed_trace_segment()->mutable_connection()); + sink_handle_->submitTrace(std::move(trace)); + } +} + +void PerSocketTapperImpl::fillConnectionInfo(envoy::data::tap::v2alpha::Connection& connection) { + Network::Utility::addressToProtobufAddress(*connection_.localAddress(), + *connection.mutable_local_address()); + Network::Utility::addressToProtobufAddress(*connection_.remoteAddress(), + *connection.mutable_remote_address()); } void PerSocketTapperImpl::closeSocket(Network::ConnectionEvent) { - if (!config_->rootMatcher().matches(statuses_)) { + if (!config_->rootMatcher().matchStatus(statuses_).matches_) { return; } - auto* connection = trace_->mutable_socket_buffered_trace()->mutable_connection(); - connection->set_id(connection_.id()); - Network::Utility::addressToProtobufAddress(*connection_.localAddress(), - *connection->mutable_local_address()); - Network::Utility::addressToProtobufAddress(*connection_.remoteAddress(), - *connection->mutable_remote_address()); - config_->sink().submitBufferedTrace(trace_, connection_.id()); + if (config_->streaming()) { + TapCommon::TraceWrapperPtr trace = makeTraceSegment(); + auto& event = *trace->mutable_socket_streamed_trace_segment()->mutable_event(); + initEvent(event); + event.mutable_closed(); + sink_handle_->submitTrace(std::move(trace)); + } else { + makeBufferedTraceIfNeeded(); + fillConnectionInfo(*buffered_trace_->mutable_socket_buffered_trace()->mutable_connection()); + sink_handle_->submitTrace(std::move(buffered_trace_)); + } + + // Here we explicitly reset the sink_handle_ to release any sink resources and force a flush + // of any data (e.g., files). This is not explicitly needed in production, but is needed in + // tests to avoid race conditions due to deferred deletion. We could also do this with a stat, + // but this seems fine in general and is simpler. + sink_handle_.reset(); +} + +void PerSocketTapperImpl::initEvent(envoy::data::tap::v2alpha::SocketEvent& event) { + event.mutable_timestamp()->MergeFrom(Protobuf::util::TimeUtil::NanosecondsToTimestamp( + std::chrono::duration_cast( + config_->timeSource().systemTime().time_since_epoch()) + .count())); } void PerSocketTapperImpl::onRead(const Buffer::Instance& data, uint32_t bytes_read) { - if (!config_->rootMatcher().matches(statuses_)) { + if (!config_->rootMatcher().matchStatus(statuses_).matches_) { return; } - auto* event = trace_->mutable_socket_buffered_trace()->add_events(); - event->mutable_timestamp()->MergeFrom(Protobuf::util::TimeUtil::NanosecondsToTimestamp( - std::chrono::duration_cast( - config_->time_system_.systemTime().time_since_epoch()) - .count())); - // TODO(mattklein123): Avoid linearize/toString here. - const std::string linearized_data = data.toString(); - event->mutable_read()->set_data(linearized_data.data() + (linearized_data.size() - bytes_read), - bytes_read); + if (config_->streaming()) { + TapCommon::TraceWrapperPtr trace = makeTraceSegment(); + auto& event = *trace->mutable_socket_streamed_trace_segment()->mutable_event(); + initEvent(event); + TapCommon::Utility::addBufferToProtoBytes(*event.mutable_read()->mutable_data(), + config_->maxBufferedRxBytes(), data, + data.length() - bytes_read, bytes_read); + sink_handle_->submitTrace(std::move(trace)); + } else { + if (buffered_trace_ != nullptr && buffered_trace_->socket_buffered_trace().read_truncated()) { + return; + } + + makeBufferedTraceIfNeeded(); + auto& event = *buffered_trace_->mutable_socket_buffered_trace()->add_events(); + initEvent(event); + ASSERT(rx_bytes_buffered_ <= config_->maxBufferedRxBytes()); + buffered_trace_->mutable_socket_buffered_trace()->set_read_truncated( + TapCommon::Utility::addBufferToProtoBytes(*event.mutable_read()->mutable_data(), + config_->maxBufferedRxBytes() - + rx_bytes_buffered_, + data, data.length() - bytes_read, bytes_read)); + rx_bytes_buffered_ += event.read().data().as_bytes().size(); + } } void PerSocketTapperImpl::onWrite(const Buffer::Instance& data, uint32_t bytes_written, bool end_stream) { - if (!config_->rootMatcher().matches(statuses_)) { + if (!config_->rootMatcher().matchStatus(statuses_).matches_) { return; } - auto* event = trace_->mutable_socket_buffered_trace()->add_events(); - event->mutable_timestamp()->MergeFrom(Protobuf::util::TimeUtil::NanosecondsToTimestamp( - std::chrono::duration_cast( - config_->time_system_.systemTime().time_since_epoch()) - .count())); - // TODO(mattklein123): Avoid linearize/toString here. - const std::string linearized_data = data.toString(); - event->mutable_write()->set_data(linearized_data.data(), bytes_written); - event->mutable_write()->set_end_stream(end_stream); + if (config_->streaming()) { + TapCommon::TraceWrapperPtr trace = makeTraceSegment(); + auto& event = *trace->mutable_socket_streamed_trace_segment()->mutable_event(); + initEvent(event); + TapCommon::Utility::addBufferToProtoBytes(*event.mutable_write()->mutable_data(), + config_->maxBufferedTxBytes(), data, 0, + bytes_written); + event.mutable_write()->set_end_stream(end_stream); + sink_handle_->submitTrace(std::move(trace)); + } else { + if (buffered_trace_ != nullptr && buffered_trace_->socket_buffered_trace().write_truncated()) { + return; + } + + makeBufferedTraceIfNeeded(); + auto& event = *buffered_trace_->mutable_socket_buffered_trace()->add_events(); + initEvent(event); + ASSERT(tx_bytes_buffered_ <= config_->maxBufferedTxBytes()); + buffered_trace_->mutable_socket_buffered_trace()->set_write_truncated( + TapCommon::Utility::addBufferToProtoBytes( + *event.mutable_write()->mutable_data(), + config_->maxBufferedTxBytes() - tx_bytes_buffered_, data, 0, bytes_written)); + tx_bytes_buffered_ += event.write().data().as_bytes().size(); + event.mutable_write()->set_end_stream(end_stream); + } } } // namespace Tap diff --git a/source/extensions/transport_sockets/tap/tap_config_impl.h b/source/extensions/transport_sockets/tap/tap_config_impl.h index 0e0b29729b78e..dc16dc2c96d28 100644 --- a/source/extensions/transport_sockets/tap/tap_config_impl.h +++ b/source/extensions/transport_sockets/tap/tap_config_impl.h @@ -10,12 +10,9 @@ namespace Extensions { namespace TransportSockets { namespace Tap { -class SocketTapConfigImpl; -using SocketTapConfigImplSharedPtr = std::shared_ptr; - class PerSocketTapperImpl : public PerSocketTapper { public: - PerSocketTapperImpl(SocketTapConfigImplSharedPtr config, const Network::Connection& connection); + PerSocketTapperImpl(SocketTapConfigSharedPtr config, const Network::Connection& connection); // PerSocketTapper void closeSocket(Network::ConnectionEvent event) override; @@ -23,14 +20,28 @@ class PerSocketTapperImpl : public PerSocketTapper { void onWrite(const Buffer::Instance& data, uint32_t bytes_written, bool end_stream) override; private: - SocketTapConfigImplSharedPtr config_; + void initEvent(envoy::data::tap::v2alpha::SocketEvent&); + void fillConnectionInfo(envoy::data::tap::v2alpha::Connection& connection); + void makeBufferedTraceIfNeeded() { + if (buffered_trace_ == nullptr) { + buffered_trace_ = Extensions::Common::Tap::makeTraceWrapper(); + buffered_trace_->mutable_socket_buffered_trace()->set_trace_id(connection_.id()); + } + } + Extensions::Common::Tap::TraceWrapperPtr makeTraceSegment() { + Extensions::Common::Tap::TraceWrapperPtr trace = Extensions::Common::Tap::makeTraceWrapper(); + trace->mutable_socket_streamed_trace_segment()->set_trace_id(connection_.id()); + return trace; + } + + SocketTapConfigSharedPtr config_; + Extensions::Common::Tap::PerTapSinkHandleManagerPtr sink_handle_; const Network::Connection& connection_; - std::vector statuses_; - // TODO(mattklein123): Buffering the entire trace until socket close won't scale to - // long lived connections or large transfers. We could emit multiple tap - // files with bounded size, with identical connection ID to allow later - // reassembly. - std::shared_ptr trace_; + Extensions::Common::Tap::Matcher::MatchStatusVector statuses_; + // Must be a shared_ptr because of submitTrace(). + Extensions::Common::Tap::TraceWrapperPtr buffered_trace_; + uint32_t rx_bytes_buffered_{}; + uint32_t tx_bytes_buffered_{}; }; class SocketTapConfigImpl : public Extensions::Common::Tap::TapConfigBaseImpl, @@ -38,17 +49,18 @@ class SocketTapConfigImpl : public Extensions::Common::Tap::TapConfigBaseImpl, public std::enable_shared_from_this { public: SocketTapConfigImpl(envoy::service::tap::v2alpha::TapConfig&& proto_config, - Extensions::Common::Tap::Sink* admin_streamer, Event::TimeSystem& time_system) + Extensions::Common::Tap::Sink* admin_streamer, TimeSource& time_system) : Extensions::Common::Tap::TapConfigBaseImpl(std::move(proto_config), admin_streamer), - time_system_(time_system) {} + time_source_(time_system) {} // SocketTapConfig PerSocketTapperPtr createPerSocketTapper(const Network::Connection& connection) override { return std::make_unique(shared_from_this(), connection); } + TimeSource& timeSource() const override { return time_source_; } private: - Event::TimeSystem& time_system_; + TimeSource& time_source_; friend class PerSocketTapperImpl; }; diff --git a/source/extensions/transport_sockets/tls/BUILD b/source/extensions/transport_sockets/tls/BUILD index 9c9f85863a0c1..4f07d1e5dea08 100644 --- a/source/extensions/transport_sockets/tls/BUILD +++ b/source/extensions/transport_sockets/tls/BUILD @@ -28,6 +28,7 @@ envoy_cc_library( srcs = ["ssl_socket.cc"], hdrs = ["ssl_socket.h"], external_deps = [ + "abseil_optional", "abseil_synchronization", "ssl", ], diff --git a/source/extensions/transport_sockets/tls/context_impl.cc b/source/extensions/transport_sockets/tls/context_impl.cc index 61c14856a58a5..85a945ac78d25 100644 --- a/source/extensions/transport_sockets/tls/context_impl.cc +++ b/source/extensions/transport_sockets/tls/context_impl.cc @@ -767,13 +767,13 @@ ServerContextImpl::ServerContextImpl(Stats::Scope& scope, } if (!parsed_alpn_protocols_.empty()) { - SSL_CTX_set_alpn_select_cb(ctx.ssl_ctx_.get(), - [](SSL*, const unsigned char** out, unsigned char* outlen, - const unsigned char* in, unsigned int inlen, void* arg) -> int { - return static_cast(arg)->alpnSelectCallback( - out, outlen, in, inlen); - }, - this); + SSL_CTX_set_alpn_select_cb( + ctx.ssl_ctx_.get(), + [](SSL*, const unsigned char** out, unsigned char* outlen, const unsigned char* in, + unsigned int inlen, void* arg) -> int { + return static_cast(arg)->alpnSelectCallback(out, outlen, in, inlen); + }, + this); } if (!session_ticket_keys_.empty()) { diff --git a/source/extensions/transport_sockets/tls/ssl_socket.cc b/source/extensions/transport_sockets/tls/ssl_socket.cc index e90d83bedbe26..2918c8afff4d4 100644 --- a/source/extensions/transport_sockets/tls/ssl_socket.cc +++ b/source/extensions/transport_sockets/tls/ssl_socket.cc @@ -21,12 +21,16 @@ namespace TransportSockets { namespace Tls { namespace { + +constexpr absl::string_view NotReadyReason{"TLS error: Secret is not supplied by SDS"}; + // This SslSocket will be used when SSL secret is not fetched from SDS server. class NotReadySslSocket : public Network::TransportSocket { public: // Network::TransportSocket void setTransportSocketCallbacks(Network::TransportSocketCallbacks&) override {} std::string protocol() const override { return EMPTY_STRING; } + absl::string_view failureReason() const override { return NotReadyReason; } bool canFlushClose() override { return true; } void closeSocket(Network::ConnectionEvent) override {} Network::IoResult doRead(Buffer::Instance&) override { return {PostIoAction::Close, 0, false}; } @@ -34,7 +38,7 @@ class NotReadySslSocket : public Network::TransportSocket { return {PostIoAction::Close, 0, false}; } void onConnected() override {} - const Ssl::Connection* ssl() const override { return nullptr; } + const Ssl::ConnectionInfo* ssl() const override { return nullptr; } }; } // namespace @@ -161,10 +165,14 @@ void SslSocket::drainErrorQueue() { } saw_error = true; - ENVOY_CONN_LOG(debug, "SSL error: {}:{}:{}:{}", callbacks_->connection(), err, - ERR_lib_error_string(err), ERR_func_error_string(err), - ERR_reason_error_string(err)); + if (failure_reason_.empty()) { + failure_reason_ = "TLS error:"; + } + failure_reason_.append(absl::StrCat(" ", err, ":", ERR_lib_error_string(err), ":", + ERR_func_error_string(err), ":", + ERR_reason_error_string(err))); } + ENVOY_CONN_LOG(debug, "{}", callbacks_->connection(), failure_reason_); if (saw_error && !saw_counted_error) { ctx_->stats().connection_error_.inc(); } @@ -244,15 +252,13 @@ bool SslSocket::peerCertificatePresented() const { return cert != nullptr; } -std::string SslSocket::uriSanLocalCertificate() const { +std::vector SslSocket::uriSanLocalCertificate() const { // The cert object is not owned. X509* cert = SSL_get_certificate(ssl_.get()); if (!cert) { - return ""; + return {}; } - // TODO(PiotrSikora): Figure out if returning only one URI is valid limitation. - const std::vector& san_uris = Utility::getSubjectAltNames(*cert, GEN_URI); - return (san_uris.size() > 0) ? san_uris[0] : ""; + return Utility::getSubjectAltNames(*cert, GEN_URI); } std::vector SslSocket::dnsSansLocalCertificate() const { @@ -303,14 +309,42 @@ const std::string& SslSocket::urlEncodedPemEncodedPeerCertificate() const { return cached_url_encoded_pem_encoded_peer_certificate_; } -std::string SslSocket::uriSanPeerCertificate() const { +const std::string& SslSocket::urlEncodedPemEncodedPeerCertificateChain() const { + if (!cached_url_encoded_pem_encoded_peer_cert_chain_.empty()) { + return cached_url_encoded_pem_encoded_peer_cert_chain_; + } + + STACK_OF(X509)* cert_chain = SSL_get_peer_full_cert_chain(ssl_.get()); + if (cert_chain == nullptr) { + ASSERT(cached_url_encoded_pem_encoded_peer_cert_chain_.empty()); + return cached_url_encoded_pem_encoded_peer_cert_chain_; + } + + for (uint64_t i = 0; i < sk_X509_num(cert_chain); i++) { + X509* cert = sk_X509_value(cert_chain, i); + + bssl::UniquePtr buf(BIO_new(BIO_s_mem())); + RELEASE_ASSERT(buf != nullptr, ""); + RELEASE_ASSERT(PEM_write_bio_X509(buf.get(), cert) == 1, ""); + const uint8_t* output; + size_t length; + RELEASE_ASSERT(BIO_mem_contents(buf.get(), &output, &length) == 1, ""); + + absl::string_view pem(reinterpret_cast(output), length); + cached_url_encoded_pem_encoded_peer_cert_chain_ = absl::StrCat( + cached_url_encoded_pem_encoded_peer_cert_chain_, + absl::StrReplaceAll( + pem, {{"\n", "%0A"}, {" ", "%20"}, {"+", "%2B"}, {"/", "%2F"}, {"=", "%3D"}})); + } + return cached_url_encoded_pem_encoded_peer_cert_chain_; +} + +std::vector SslSocket::uriSanPeerCertificate() const { bssl::UniquePtr cert(SSL_get_peer_certificate(ssl_.get())); if (!cert) { - return ""; + return {}; } - // TODO(PiotrSikora): Figure out if returning only one URI is valid limitation. - const std::vector& san_uris = Utility::getSubjectAltNames(*cert, GEN_URI); - return (san_uris.size() > 0) ? san_uris[0] : ""; + return Utility::getSubjectAltNames(*cert, GEN_URI); } std::vector SslSocket::dnsSansPeerCertificate() const { @@ -337,6 +371,8 @@ std::string SslSocket::protocol() const { return std::string(reinterpret_cast(proto), proto_len); } +absl::string_view SslSocket::failureReason() const { return failure_reason_; } + std::string SslSocket::serialNumberPeerCertificate() const { bssl::UniquePtr cert(SSL_get_peer_certificate(ssl_.get())); if (!cert) { @@ -361,6 +397,22 @@ std::string SslSocket::subjectLocalCertificate() const { return Utility::getSubjectFromCertificate(*cert); } +absl::optional SslSocket::validFromPeerCertificate() const { + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl_.get())); + if (!cert) { + return absl::nullopt; + } + return Utility::getValidFrom(*cert); +} + +absl::optional SslSocket::expirationPeerCertificate() const { + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl_.get())); + if (!cert) { + return absl::nullopt; + } + return Utility::getExpirationTime(*cert); +} + namespace { SslSocketFactoryStats generateStats(const std::string& prefix, Stats::Scope& store) { return { diff --git a/source/extensions/transport_sockets/tls/ssl_socket.h b/source/extensions/transport_sockets/tls/ssl_socket.h index 79768856ed1df..ccae3a144e0be 100644 --- a/source/extensions/transport_sockets/tls/ssl_socket.h +++ b/source/extensions/transport_sockets/tls/ssl_socket.h @@ -15,6 +15,7 @@ #include "extensions/transport_sockets/tls/utility.h" #include "absl/synchronization/mutex.h" +#include "absl/types/optional.h" #include "openssl/ssl.h" namespace Envoy { @@ -39,7 +40,7 @@ struct SslSocketFactoryStats { enum class InitialState { Client, Server }; class SslSocket : public Network::TransportSocket, - public Envoy::Ssl::Connection, + public Envoy::Ssl::ConnectionInfo, protected Logger::Loggable { public: SslSocket(Envoy::Ssl::ContextSharedPtr ctx, InitialState state, @@ -47,25 +48,29 @@ class SslSocket : public Network::TransportSocket, // Ssl::Connection bool peerCertificatePresented() const override; - std::string uriSanLocalCertificate() const override; + std::vector uriSanLocalCertificate() const override; const std::string& sha256PeerCertificateDigest() const override; std::string serialNumberPeerCertificate() const override; std::string subjectPeerCertificate() const override; std::string subjectLocalCertificate() const override; - std::string uriSanPeerCertificate() const override; + std::vector uriSanPeerCertificate() const override; const std::string& urlEncodedPemEncodedPeerCertificate() const override; + const std::string& urlEncodedPemEncodedPeerCertificateChain() const override; std::vector dnsSansPeerCertificate() const override; std::vector dnsSansLocalCertificate() const override; + absl::optional validFromPeerCertificate() const override; + absl::optional expirationPeerCertificate() const override; // Network::TransportSocket void setTransportSocketCallbacks(Network::TransportSocketCallbacks& callbacks) override; std::string protocol() const override; + absl::string_view failureReason() const override; bool canFlushClose() override { return handshake_complete_; } void closeSocket(Network::ConnectionEvent close_type) override; Network::IoResult doRead(Buffer::Instance& read_buffer) override; Network::IoResult doWrite(Buffer::Instance& write_buffer, bool end_stream) override; void onConnected() override; - const Ssl::Connection* ssl() const override { return this; } + const Ssl::ConnectionInfo* ssl() const override { return this; } SSL* rawSslForTest() const { return ssl_.get(); } @@ -80,8 +85,10 @@ class SslSocket : public Network::TransportSocket, bool handshake_complete_{}; bool shutdown_sent_{}; uint64_t bytes_to_retry_{}; + std::string failure_reason_; mutable std::string cached_sha_256_peer_certificate_digest_; mutable std::string cached_url_encoded_pem_encoded_peer_certificate_; + mutable std::string cached_url_encoded_pem_encoded_peer_cert_chain_; }; class ClientSslSocketFactory : public Network::TransportSocketFactory, diff --git a/source/server/BUILD b/source/server/BUILD index 6f9e2bdca52e6..5c9e99d13757b 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -4,6 +4,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", + "envoy_proto_library", "envoy_select_hot_restart", ) @@ -43,7 +44,6 @@ envoy_cc_library( "//source/common/network:utility_lib", "//source/common/protobuf:utility_lib", "//source/common/tracing:http_tracer_lib", - "//source/extensions/filters/common/ratelimit:ratelimit_registration_lib", "@envoy_api//envoy/api/v2:lds_cc", "@envoy_api//envoy/config/bootstrap/v2:bootstrap_cc", "@envoy_api//envoy/config/wasm/v2:wasm_cc", @@ -109,11 +109,17 @@ envoy_cc_library( ], ) +envoy_proto_library( + name = "hot_restart", + srcs = ["hot_restart.proto"], +) + envoy_cc_library( - name = "hot_restart_lib", - srcs = envoy_select_hot_restart(["hot_restart_impl.cc"]), - hdrs = envoy_select_hot_restart(["hot_restart_impl.h"]), + name = "hot_restarting_base", + srcs = envoy_select_hot_restart(["hot_restarting_base.cc"]), + hdrs = envoy_select_hot_restart(["hot_restarting_base.h"]), deps = [ + ":hot_restart_cc", "//include/envoy/api:os_sys_calls_interface", "//include/envoy/event:dispatcher_interface", "//include/envoy/event:file_event_interface", @@ -122,42 +128,81 @@ envoy_cc_library( "//include/envoy/server:options_interface", "//source/common/api:os_sys_calls_lib", "//source/common/common:assert_lib", - "//source/common/common:block_memory_hash_set_lib", "//source/common/common:utility_lib", "//source/common/network:utility_lib", - "//source/common/stats:raw_stat_data_lib", - "//source/common/stats:stats_options_lib", ], ) envoy_cc_library( - name = "hot_restart_nop_lib", - hdrs = ["hot_restart_nop_impl.h"], + name = "hot_restarting_child", + srcs = envoy_select_hot_restart(["hot_restarting_child.cc"]), + hdrs = envoy_select_hot_restart(["hot_restarting_child.h"]), + deps = [ + ":hot_restarting_base", + "//source/common/stats:stat_merger_lib", + ], +) + +envoy_cc_library( + name = "hot_restarting_parent", + srcs = envoy_select_hot_restart(["hot_restarting_parent.cc"]), + hdrs = envoy_select_hot_restart(["hot_restarting_parent.h"]), deps = [ + ":hot_restarting_base", + "//source/common/memory:stats_lib", + ], +) + +envoy_cc_library( + name = "hot_restart_lib", + srcs = envoy_select_hot_restart(["hot_restart_impl.cc"]), + hdrs = envoy_select_hot_restart(["hot_restart_impl.h"]), + deps = [ + ":hot_restarting_child", + ":hot_restarting_parent", + "//include/envoy/api:os_sys_calls_interface", + "//include/envoy/event:dispatcher_interface", + "//include/envoy/event:file_event_interface", "//include/envoy/server:hot_restart_interface", + "//include/envoy/server:instance_interface", + "//include/envoy/server:options_interface", + "//source/common/api:os_sys_calls_lib", + "//source/common/common:assert_lib", "//source/common/stats:heap_stat_data_lib", ], ) envoy_cc_library( - name = "init_manager_lib", - srcs = ["init_manager_impl.cc"], - hdrs = ["init_manager_impl.h"], + name = "hot_restart_nop_lib", + hdrs = ["hot_restart_nop_impl.h"], deps = [ - "//include/envoy/init:init_interface", - "//source/common/common:assert_lib", + "//include/envoy/server:hot_restart_interface", + "//source/common/stats:heap_stat_data_lib", ], ) envoy_cc_library( name = "options_lib", - srcs = ["options_impl.cc"], - hdrs = ["options_impl.h"], + srcs = ["options_impl.cc"] + select({ + "//bazel:linux_x86_64": ["options_impl_platform_linux.cc"], + "//bazel:linux_aarch64": ["options_impl_platform_linux.cc"], + "//conditions:default": ["options_impl_platform_default.cc"], + }), + hdrs = [ + "options_impl.h", + "options_impl_platform.h", + ] + select({ + "//bazel:linux_x86_64": ["options_impl_platform_linux.h"], + "//bazel:linux_aarch64": ["options_impl_platform_linux.h"], + "//conditions:default": [], + }), external_deps = ["tclap"], deps = [ "//include/envoy/network:address_interface", "//include/envoy/server:options_interface", "//include/envoy/stats:stats_interface", + "//source/common/api:os_sys_calls_lib", + "//source/common/common:logger_lib", "//source/common/common:macros", "//source/common/common:version_lib", "//source/common/protobuf:utility_lib", @@ -186,12 +231,13 @@ envoy_cc_library( hdrs = ["lds_api.h"], deps = [ "//include/envoy/config:subscription_interface", - "//include/envoy/init:init_interface", + "//include/envoy/init:manager_interface", "//include/envoy/server:listener_manager_interface", "//source/common/common:cleanup_lib", "//source/common/config:resources_lib", "//source/common/config:subscription_factory_lib", "//source/common/config:utility_lib", + "//source/common/init:target_lib", "//source/common/protobuf:utility_lib", "@envoy_api//envoy/api/v2:lds_cc", ], @@ -204,7 +250,6 @@ envoy_cc_library( deps = [ ":configuration_lib", ":drain_manager_lib", - ":init_manager_lib", ":lds_api_lib", ":transport_socket_config_lib", "//include/envoy/server:filter_config_interface", @@ -214,6 +259,7 @@ envoy_cc_library( "//source/common/api:os_sys_calls_lib", "//source/common/common:empty_string", "//source/common/config:utility_lib", + "//source/common/init:manager_lib", "//source/common/network:cidr_range_lib", "//source/common/network:lc_trie_lib", "//source/common/network:listen_socket_lib", @@ -242,10 +288,8 @@ envoy_cc_library( "@envoy_api//envoy/api/v2:eds_cc", "@envoy_api//envoy/api/v2:lds_cc", "@envoy_api//envoy/api/v2:rds_cc", - "@envoy_api//envoy/service/accesslog/v2:als_cc", "@envoy_api//envoy/service/discovery/v2:ads_cc", "@envoy_api//envoy/service/discovery/v2:hds_cc", - "@envoy_api//envoy/service/metrics/v2:metrics_service_cc", "@envoy_api//envoy/service/ratelimit/v2:rls_cc", ], ) @@ -275,9 +319,8 @@ envoy_cc_library( ":configuration_lib", ":connection_handler_lib", ":guarddog_lib", - ":init_manager_lib", + ":listener_hooks_lib", ":listener_manager_lib", - ":test_hooks_lib", ":wasm_config_lib", ":worker_lib", "//include/envoy/event:dispatcher_interface", @@ -295,6 +338,7 @@ envoy_cc_library( "//include/envoy/upstream:cluster_manager_interface", "//source/common/access_log:access_log_manager_lib", "//source/common/api:api_lib", + "//source/common/buffer:buffer_lib", "//source/common/common:logger_lib", "//source/common/common:mutex_tracer_lib", "//source/common/common:utility_lib", @@ -304,7 +348,9 @@ envoy_cc_library( "//source/common/grpc:async_client_manager_lib", "//source/common/http:codes_lib", "//source/common/http:context_lib", + "//source/common/init:manager_lib", "//source/common/local_info:local_info_lib", + "//source/common/memory:heap_shrinker_lib", "//source/common/memory:stats_lib", "//source/common/protobuf:utility_lib", "//source/common/router:rds_lib", @@ -321,8 +367,8 @@ envoy_cc_library( ) envoy_cc_library( - name = "test_hooks_lib", - hdrs = ["test_hooks.h"], + name = "listener_hooks_lib", + hdrs = ["listener_hooks.h"], ) envoy_cc_library( @@ -343,7 +389,7 @@ envoy_cc_library( hdrs = ["worker_impl.h"], deps = [ ":connection_handler_lib", - ":test_hooks_lib", + ":listener_hooks_lib", "//include/envoy/api:api_interface", "//include/envoy/event:dispatcher_interface", "//include/envoy/event:timer_interface", diff --git a/source/server/backtrace.h b/source/server/backtrace.h index aea8cf1013137..1963a5ec06663 100644 --- a/source/server/backtrace.h +++ b/source/server/backtrace.h @@ -1,5 +1,7 @@ #pragma once +#include + #include "common/common/logger.h" #include "absl/debugging/stacktrace.h" @@ -66,22 +68,51 @@ class BackwardsTrace : Logger::Loggable { void logTrace() { ENVOY_LOG(critical, "Backtrace (use tools/stack_decode.py to get line numbers):"); - for (int i = 0; i < stack_depth_; ++i) { - char out[1024]; - const bool success = absl::Symbolize(stack_trace_[i], out, sizeof(out)); - if (success) { - ENVOY_LOG(critical, "#{}: {} [{}]", i, out, stack_trace_[i]); + visitTrace([](int index, const char* symbol, void* address) { + if (symbol != nullptr) { + ENVOY_LOG(critical, "#{}: {} [{}]", index, symbol, address); } else { - ENVOY_LOG(critical, "#{}: [{}]", i, stack_trace_[i]); + ENVOY_LOG(critical, "#{}: [{}]", index, address); } - } + }); } void logFault(const char* signame, const void* addr) { ENVOY_LOG(critical, "Caught {}, suspect faulting address {}", signame, addr); } + void printTrace(std::ostream& os) { + visitTrace([&](int index, const char* symbol, void* address) { + if (symbol != nullptr) { + os << "#" << index << " " << symbol << " [" << address << "]\n"; + } else { + os << "#" << index << " [" << address << "]\n"; + } + }); + } + private: + /** + * Visit the previously captured stack trace. + * + * The visitor function is called once per frame, with 3 parameters: + * 1. (int) The index of the current frame. + * 2. (const char*) The symbol name for the address of the current frame. nullptr means + * symbolization failed. + * 3. (void*) The address of the current frame. + */ + void visitTrace(const std::function& visitor) { + for (int i = 0; i < stack_depth_; ++i) { + char out[1024]; + const bool success = absl::Symbolize(stack_trace_[i], out, sizeof(out)); + if (success) { + visitor(i, out, stack_trace_[i]); + } else { + visitor(i, nullptr, stack_trace_[i]); + } + } + } + static constexpr int MaxStackDepth = 64; void* stack_trace_[MaxStackDepth]; int stack_depth_{0}; diff --git a/source/server/config_validation/api.cc b/source/server/config_validation/api.cc index 4763d043b4480..23954bb561b50 100644 --- a/source/server/config_validation/api.cc +++ b/source/server/config_validation/api.cc @@ -1,17 +1,22 @@ #include "server/config_validation/api.h" +#include "common/common/assert.h" + #include "server/config_validation/dispatcher.h" namespace Envoy { namespace Api { -ValidationImpl::ValidationImpl(std::chrono::milliseconds file_flush_interval_msec, - Thread::ThreadFactory& thread_factory, Stats::Store& stats_store, - Event::TimeSystem& time_system) - : Impl(file_flush_interval_msec, thread_factory, stats_store, time_system) {} +ValidationImpl::ValidationImpl(Thread::ThreadFactory& thread_factory, Stats::Store& stats_store, + Event::TimeSystem& time_system, Filesystem::Instance& file_system) + : Impl(thread_factory, stats_store, time_system, file_system), time_system_(time_system) {} Event::DispatcherPtr ValidationImpl::allocateDispatcher() { - return Event::DispatcherPtr{new Event::ValidationDispatcher(*this)}; + return Event::DispatcherPtr{new Event::ValidationDispatcher(*this, time_system_)}; +} + +Event::DispatcherPtr ValidationImpl::allocateDispatcher(Buffer::WatermarkFactoryPtr&&) { + NOT_REACHED_GCOVR_EXCL_LINE } } // namespace Api diff --git a/source/server/config_validation/api.h b/source/server/config_validation/api.h index ef05bfdc70319..658e671a8d970 100644 --- a/source/server/config_validation/api.h +++ b/source/server/config_validation/api.h @@ -15,11 +15,14 @@ namespace Api { */ class ValidationImpl : public Impl { public: - ValidationImpl(std::chrono::milliseconds file_flush_interval_msec, - Thread::ThreadFactory& thread_factory, Stats::Store& stats_store, - Event::TimeSystem& time_system); + ValidationImpl(Thread::ThreadFactory& thread_factory, Stats::Store& stats_store, + Event::TimeSystem& time_system, Filesystem::Instance& file_system); Event::DispatcherPtr allocateDispatcher() override; + Event::DispatcherPtr allocateDispatcher(Buffer::WatermarkFactoryPtr&& watermark_factory) override; + +private: + Event::TimeSystem& time_system_; }; } // namespace Api diff --git a/source/server/config_validation/async_client.cc b/source/server/config_validation/async_client.cc index be388769f8383..7015ab179a297 100644 --- a/source/server/config_validation/async_client.cc +++ b/source/server/config_validation/async_client.cc @@ -3,7 +3,8 @@ namespace Envoy { namespace Http { -ValidationAsyncClient::ValidationAsyncClient(Api::Api& api) : dispatcher_(api) {} +ValidationAsyncClient::ValidationAsyncClient(Api::Api& api, Event::TimeSystem& time_system) + : dispatcher_(api, time_system) {} AsyncClient::Request* ValidationAsyncClient::send(MessagePtr&&, Callbacks&, const RequestOptions&) { return nullptr; diff --git a/source/server/config_validation/async_client.h b/source/server/config_validation/async_client.h index 026fd01882767..83f909e696d8f 100644 --- a/source/server/config_validation/async_client.h +++ b/source/server/config_validation/async_client.h @@ -19,7 +19,7 @@ namespace Http { */ class ValidationAsyncClient : public AsyncClient { public: - ValidationAsyncClient(Api::Api& api); + ValidationAsyncClient(Api::Api& api, Event::TimeSystem& time_system); // Http::AsyncClient AsyncClient::Request* send(MessagePtr&& request, Callbacks& callbacks, diff --git a/source/server/config_validation/cluster_manager.cc b/source/server/config_validation/cluster_manager.cc index 328921df74804..56817278b2b7f 100644 --- a/source/server/config_validation/cluster_manager.cc +++ b/source/server/config_validation/cluster_manager.cc @@ -9,7 +9,7 @@ ClusterManagerPtr ValidationClusterManagerFactory::clusterManagerFromProto( const envoy::config::bootstrap::v2::Bootstrap& bootstrap) { return std::make_unique( bootstrap, *this, stats_, tls_, runtime_, random_, local_info_, log_manager_, - main_thread_dispatcher_, admin_, api_, http_context_); + main_thread_dispatcher_, admin_, api_, http_context_, time_system_); } CdsApiPtr @@ -26,10 +26,11 @@ ValidationClusterManager::ValidationClusterManager( Stats::Store& stats, ThreadLocal::Instance& tls, Runtime::Loader& runtime, Runtime::RandomGenerator& random, const LocalInfo::LocalInfo& local_info, AccessLog::AccessLogManager& log_manager, Event::Dispatcher& main_thread_dispatcher, - Server::Admin& admin, Api::Api& api, Http::Context& http_context) + Server::Admin& admin, Api::Api& api, Http::Context& http_context, + Event::TimeSystem& time_system) : ClusterManagerImpl(bootstrap, factory, stats, tls, runtime, random, local_info, log_manager, main_thread_dispatcher, admin, api, http_context), - async_client_(api) {} + async_client_(api, time_system) {} Http::ConnectionPool::Instance* ValidationClusterManager::httpConnPoolForCluster(const std::string&, ResourcePriority, diff --git a/source/server/config_validation/cluster_manager.h b/source/server/config_validation/cluster_manager.h index 4317dfe9fd86a..10863efe1cd18 100644 --- a/source/server/config_validation/cluster_manager.h +++ b/source/server/config_validation/cluster_manager.h @@ -19,6 +19,20 @@ class ValidationClusterManagerFactory : public ProdClusterManagerFactory { public: using ProdClusterManagerFactory::ProdClusterManagerFactory; + explicit ValidationClusterManagerFactory( + Server::Admin& admin, Runtime::Loader& runtime, Stats::Store& stats, + ThreadLocal::Instance& tls, Runtime::RandomGenerator& random, + Network::DnsResolverSharedPtr dns_resolver, Ssl::ContextManager& ssl_context_manager, + Event::Dispatcher& main_thread_dispatcher, const LocalInfo::LocalInfo& local_info, + Secret::SecretManager& secret_manager, Api::Api& api, Http::Context& http_context, + AccessLog::AccessLogManager& log_manager, Singleton::Manager& singleton_manager, + Event::TimeSystem& time_system) + : ProdClusterManagerFactory(admin, runtime, stats, tls, random, dns_resolver, + ssl_context_manager, main_thread_dispatcher, local_info, + secret_manager, api, http_context, log_manager, + singleton_manager), + time_system_(time_system) {} + ClusterManagerPtr clusterManagerFromProto(const envoy::config::bootstrap::v2::Bootstrap& bootstrap) override; @@ -26,6 +40,9 @@ class ValidationClusterManagerFactory : public ProdClusterManagerFactory { // unconditionally. CdsApiPtr createCds(const envoy::api::v2::core::ConfigSource& cds_config, ClusterManager& cm) override; + +private: + Event::TimeSystem& time_system_; }; /** @@ -38,7 +55,8 @@ class ValidationClusterManager : public ClusterManagerImpl { ThreadLocal::Instance& tls, Runtime::Loader& runtime, Runtime::RandomGenerator& random, const LocalInfo::LocalInfo& local_info, AccessLog::AccessLogManager& log_manager, Event::Dispatcher& dispatcher, - Server::Admin& admin, Api::Api& api, Http::Context& http_context); + Server::Admin& admin, Api::Api& api, Http::Context& http_context, + Event::TimeSystem& time_system); Http::ConnectionPool::Instance* httpConnPoolForCluster(const std::string&, ResourcePriority, Http::Protocol, diff --git a/source/server/config_validation/dispatcher.h b/source/server/config_validation/dispatcher.h index 8024c9e2591f4..ecd443c991df9 100644 --- a/source/server/config_validation/dispatcher.h +++ b/source/server/config_validation/dispatcher.h @@ -16,7 +16,8 @@ namespace Event { */ class ValidationDispatcher : public DispatcherImpl { public: - ValidationDispatcher(Api::Api& api) : DispatcherImpl(api) {} + ValidationDispatcher(Api::Api& api, Event::TimeSystem& time_system) + : DispatcherImpl(api, time_system) {} Network::ClientConnectionPtr createClientConnection(Network::Address::InstanceConstSharedPtr, diff --git a/source/server/config_validation/server.cc b/source/server/config_validation/server.cc index 82481e8a7cadc..8c0b41e66f69d 100644 --- a/source/server/config_validation/server.cc +++ b/source/server/config_validation/server.cc @@ -18,14 +18,15 @@ namespace Envoy { namespace Server { bool validateConfig(const Options& options, Network::Address::InstanceConstSharedPtr local_address, - ComponentFactory& component_factory, Thread::ThreadFactory& thread_factory) { + ComponentFactory& component_factory, Thread::ThreadFactory& thread_factory, + Filesystem::Instance& file_system) { Thread::MutexBasicLockable access_log_lock; Stats::IsolatedStoreImpl stats_store; try { Event::RealTimeSystem time_system; ValidationInstance server(options, time_system, local_address, stats_store, access_log_lock, - component_factory, thread_factory); + component_factory, thread_factory, file_system); std::cout << "configuration '" << options.configPath() << "' OK" << std::endl; server.shutdown(); return true; @@ -39,13 +40,15 @@ ValidationInstance::ValidationInstance(const Options& options, Event::TimeSystem Stats::IsolatedStoreImpl& store, Thread::BasicLockable& access_log_lock, ComponentFactory& component_factory, - Thread::ThreadFactory& thread_factory) + Thread::ThreadFactory& thread_factory, + Filesystem::Instance& file_system) : options_(options), stats_store_(store), - api_(new Api::ValidationImpl(options.fileFlushIntervalMsec(), thread_factory, store, - time_system)), + api_(new Api::ValidationImpl(thread_factory, store, time_system, file_system)), dispatcher_(api_->allocateDispatcher()), singleton_manager_(new Singleton::ManagerImpl(api_->threadFactory().currentThreadId())), - access_log_manager_(*api_, *dispatcher_, access_log_lock), mutex_tracer_(nullptr) { + access_log_manager_(options.fileFlushIntervalMsec(), *api_, *dispatcher_, access_log_lock, + store), + mutex_tracer_(nullptr), time_system_(time_system) { try { initialize(options, local_address, component_factory); } catch (const EnvoyException& e) { @@ -83,20 +86,19 @@ void ValidationInstance::initialize(const Options& options, Configuration::InitialImpl initial_config(bootstrap); overload_manager_ = std::make_unique(dispatcher(), stats(), threadLocal(), bootstrap.overload_manager(), *api_); - listener_manager_ = std::make_unique(*this, *this, *this); + listener_manager_ = std::make_unique(*this, *this, *this, false); thread_local_.registerThread(*dispatcher_, true); runtime_loader_ = component_factory.createRuntime(*this, initial_config); secret_manager_ = std::make_unique(); ssl_context_manager_ = - std::make_unique(api_->timeSystem()); + std::make_unique(api_->timeSource()); cluster_manager_factory_ = std::make_unique( admin(), runtime(), stats(), threadLocal(), random(), dnsResolver(), sslContextManager(), dispatcher(), localInfo(), *secret_manager_, *api_, http_context_, accessLogManager(), - singletonManager()); + singletonManager(), time_system_); config_.initialize(bootstrap, *this, *cluster_manager_factory_); http_context_.setTracer(config_.httpTracer()); - clusterManager().setInitializedCb( - [this]() -> void { init_manager_.initialize([]() -> void {}); }); + clusterManager().setInitializedCb([this]() -> void { init_manager_.initialize(init_watcher_); }); } void ValidationInstance::shutdown() { diff --git a/source/server/config_validation/server.h b/source/server/config_validation/server.h index 052d747f92fa4..b492102c86d71 100644 --- a/source/server/config_validation/server.h +++ b/source/server/config_validation/server.h @@ -35,7 +35,8 @@ namespace Server { * the config is valid, false if invalid. */ bool validateConfig(const Options& options, Network::Address::InstanceConstSharedPtr local_address, - ComponentFactory& component_factory, Thread::ThreadFactory& thread_factory); + ComponentFactory& component_factory, Thread::ThreadFactory& thread_factory, + Filesystem::Instance& file_system); /** * ValidationInstance does the bulk of the work for config-validation runs of Envoy. It implements @@ -52,12 +53,14 @@ bool validateConfig(const Options& options, Network::Address::InstanceConstShare class ValidationInstance : Logger::Loggable, public Instance, public ListenerComponentFactory, + public ServerLifecycleNotifier, public WorkerFactory { public: ValidationInstance(const Options& options, Event::TimeSystem& time_system, Network::Address::InstanceConstSharedPtr local_address, Stats::IsolatedStoreImpl& store, Thread::BasicLockable& access_log_lock, - ComponentFactory& component_factory, Thread::ThreadFactory& thread_factory); + ComponentFactory& component_factory, Thread::ThreadFactory& thread_factory, + Filesystem::Instance& file_system); // Server::Instance Admin& admin() override { return admin_; } @@ -72,9 +75,9 @@ class ValidationInstance : Logger::Loggable, DrainManager& drainManager() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } AccessLog::AccessLogManager& accessLogManager() override { return access_log_manager_; } void failHealthcheck(bool) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - void getParentStats(HotRestart::GetParentStatsInfo&) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } HotRestart& hotRestart() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } Init::Manager& initManager() override { return init_manager_; } + ServerLifecycleNotifier& lifecycleNotifier() override { return *this; } ListenerManager& listenerManager() override { return *listener_manager_; } Secret::SecretManager& secretManager() override { return *secret_manager_; } Runtime::RandomGenerator& random() override { return random_generator_; } @@ -92,7 +95,7 @@ class ValidationInstance : Logger::Loggable, Http::Context& httpContext() override { return http_context_; } ThreadLocal::Instance& threadLocal() override { return thread_local_; } const LocalInfo::LocalInfo& localInfo() override { return *local_info_; } - Event::TimeSystem& timeSystem() override { return api_->timeSystem(); } + TimeSource& timeSource() override { return api_->timeSource(); } Envoy::MutexTracer* mutexTracer() override { return mutex_tracer_; } std::chrono::milliseconds statsFlushInterval() const override { @@ -135,10 +138,25 @@ class ValidationInstance : Logger::Loggable, return nullptr; } + // ServerLifecycleNotifier + void registerCallback(Stage, StageCallback) override {} + void registerCallback(Stage, StageCallbackWithCompletion) override {} + private: void initialize(const Options& options, Network::Address::InstanceConstSharedPtr local_address, ComponentFactory& component_factory); + // init_manager_ must come before any member that participates in initialization, and destructed + // only after referencing members are gone, since initialization continuation can potentially + // occur at any point during member lifetime. + Init::ManagerImpl init_manager_{"Validation server"}; + Init::WatcherImpl init_watcher_{"(no-op)", []() {}}; + // secret_manager_ must come before listener_manager_, config_ and dispatcher_, and destructed + // only after these members can no longer reference it, since: + // - There may be active filter chains referencing it in listener_manager_. + // - There may be active clusters referencing it in config_.cluster_manager_. + // - There may be active connections referencing it. + std::unique_ptr secret_manager_; const Options& options_; Stats::IsolatedStoreImpl& stats_store_; ThreadLocal::InstanceImpl thread_local_; @@ -153,14 +171,11 @@ class ValidationInstance : Logger::Loggable, LocalInfo::LocalInfoPtr local_info_; AccessLog::AccessLogManagerImpl access_log_manager_; std::unique_ptr cluster_manager_factory_; - InitManagerImpl init_manager_; - // secret_manager_ must come before listener_manager_, since there may be active filter chains - // referencing it, so need to destruct these first. - std::unique_ptr secret_manager_; std::unique_ptr listener_manager_; std::unique_ptr overload_manager_; MutexTracer* mutex_tracer_; Http::ContextImpl http_context_; + Event::TimeSystem& time_system_; }; } // namespace Server diff --git a/source/server/configuration_impl.cc b/source/server/configuration_impl.cc index 3b52df0f3697e..96fd283011a6a 100644 --- a/source/server/configuration_impl.cc +++ b/source/server/configuration_impl.cc @@ -56,11 +56,6 @@ void MainImpl::initialize(const envoy::config::bootstrap::v2::Bootstrap& bootstr ENVOY_LOG(info, "loading {} cluster(s)", bootstrap.static_resources().clusters().size()); cluster_manager_ = cluster_manager_factory.clusterManagerFromProto(bootstrap); - // TODO(ramaraochavali): remove this dependency on extension when rate limit service config is - // deprecated and removed from bootstrap. For now, just call in to extensions to register the rate - // limit service config, so that extensions can build rate limit client. - ratelimit_client_factory_ = Envoy::Extensions::Filters::Common::RateLimit::rateLimitClientFactory( - server, cluster_manager_->grpcAsyncClientManager(), bootstrap); const auto& listeners = bootstrap.static_resources().listeners(); ENVOY_LOG(info, "loading {} listener(s)", listeners.size()); for (ssize_t i = 0; i < listeners.size(); i++) { @@ -132,11 +127,12 @@ InitialImpl::InitialImpl(const envoy::config::bootstrap::v2::Bootstrap& bootstra flags_path_ = bootstrap.flags_path(); } - if (bootstrap.has_runtime()) { - runtime_ = std::make_unique(); - runtime_->symlink_root_ = bootstrap.runtime().symlink_root(); - runtime_->subdirectory_ = bootstrap.runtime().subdirectory(); - runtime_->override_subdirectory_ = bootstrap.runtime().override_subdirectory(); + base_runtime_ = bootstrap.runtime().base(); + if (!bootstrap.runtime().symlink_root().empty()) { + disk_runtime_ = std::make_unique(); + disk_runtime_->symlink_root_ = bootstrap.runtime().symlink_root(); + disk_runtime_->subdirectory_ = bootstrap.runtime().subdirectory(); + disk_runtime_->override_subdirectory_ = bootstrap.runtime().override_subdirectory(); } } diff --git a/source/server/configuration_impl.h b/source/server/configuration_impl.h index a8906515d9b53..57f49ecadafc0 100644 --- a/source/server/configuration_impl.h +++ b/source/server/configuration_impl.h @@ -22,8 +22,6 @@ #include "common/network/resolver_impl.h" #include "common/network/utility.h" -#include "extensions/filters/common/ratelimit/ratelimit_registration.h" - namespace Envoy { namespace Server { namespace Configuration { @@ -129,7 +127,6 @@ class MainImpl : Logger::Loggable, public Main { std::chrono::milliseconds watchdog_megamiss_timeout_; std::chrono::milliseconds watchdog_kill_timeout_; std::chrono::milliseconds watchdog_multikill_timeout_; - Extensions::Filters::Common::RateLimit::ClientFactoryPtr ratelimit_client_factory_; }; /** @@ -142,7 +139,8 @@ class InitialImpl : public Initial { // Server::Configuration::Initial Admin& admin() override { return admin_; } absl::optional flagsPath() override { return flags_path_; } - Runtime* runtime() override { return runtime_.get(); } + const ProtobufWkt::Struct& baseRuntime() override { return base_runtime_; } + DiskRuntime* diskRuntime() override { return disk_runtime_.get(); } private: struct AdminImpl : public Admin { @@ -156,8 +154,8 @@ class InitialImpl : public Initial { Network::Address::InstanceConstSharedPtr address_; }; - struct RuntimeImpl : public Runtime { - // Server::Configuration::Runtime + struct DiskRuntimeImpl : public DiskRuntime { + // Server::Configuration::DiskRuntime const std::string& symlinkRoot() override { return symlink_root_; } const std::string& subdirectory() override { return subdirectory_; } const std::string& overrideSubdirectory() override { return override_subdirectory_; } @@ -169,7 +167,8 @@ class InitialImpl : public Initial { AdminImpl admin_; absl::optional flags_path_; - std::unique_ptr runtime_; + ProtobufWkt::Struct base_runtime_; + std::unique_ptr disk_runtime_; }; } // namespace Configuration diff --git a/source/server/connection_handler_impl.cc b/source/server/connection_handler_impl.cc index e46a68972b588..855cdaadb7e3e 100644 --- a/source/server/connection_handler_impl.cc +++ b/source/server/connection_handler_impl.cc @@ -188,7 +188,8 @@ void ConnectionHandlerImpl::ActiveSocket::continueFilterChain(bool success) { // Hands off connections redirected by iptables to the listener associated with the // original destination address. Pass 'hand_off_restored_destination_connections' as false to // prevent further redirection. - new_listener->onAccept(std::move(socket_), false); + new_listener->onAccept(std::move(socket_), + false /* hand_off_restored_destination_connections */); } else { // Set default transport protocol if none of the listener filters did it. if (socket_->detectedTransportProtocol().empty()) { @@ -238,7 +239,6 @@ void ConnectionHandlerImpl::ActiveListener::newConnection(Network::ConnectionSoc Network::ConnectionPtr new_connection = parent_.dispatcher_.createServerConnection(std::move(socket), std::move(transport_socket)); new_connection->setBufferLimits(config_.perConnectionBufferLimitBytes()); - new_connection->setWriteFilterOrder(config_.reverseWriteFilterOrder()); const bool empty_filter_chain = !config_.filterChainFactory().createNetworkFilterChain( *new_connection, filter_chain->networkFilterFactories()); @@ -259,7 +259,7 @@ void ConnectionHandlerImpl::ActiveListener::onNewConnection( // If the connection is already closed, we can just let this connection immediately die. if (new_connection->state() != Network::Connection::State::Closed) { ActiveConnectionPtr active_connection( - new ActiveConnection(*this, std::move(new_connection), parent_.dispatcher_.timeSystem())); + new ActiveConnection(*this, std::move(new_connection), parent_.dispatcher_.timeSource())); active_connection->moveIntoList(std::move(active_connection), connections_); parent_.num_connections_++; } @@ -267,9 +267,9 @@ void ConnectionHandlerImpl::ActiveListener::onNewConnection( ConnectionHandlerImpl::ActiveConnection::ActiveConnection(ActiveListener& listener, Network::ConnectionPtr&& new_connection, - Event::TimeSystem& time_system) + TimeSource& time_source) : listener_(listener), connection_(std::move(new_connection)), - conn_length_(new Stats::Timespan(listener_.stats_.downstream_cx_length_ms_, time_system)) { + conn_length_(new Stats::Timespan(listener_.stats_.downstream_cx_length_ms_, time_source)) { // We just universally set no delay on connections. Theoretically we might at some point want // to make this configurable. connection_->noDelay(true); diff --git a/source/server/connection_handler_impl.h b/source/server/connection_handler_impl.h index 0b402139ca66b..8de050a63375e 100644 --- a/source/server/connection_handler_impl.h +++ b/source/server/connection_handler_impl.h @@ -116,7 +116,7 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, NonCopyable { public Event::DeferredDeletable, public Network::ConnectionCallbacks { ActiveConnection(ActiveListener& listener, Network::ConnectionPtr&& new_connection, - Event::TimeSystem& time_system); + TimeSource& time_system); ~ActiveConnection(); // Network::ConnectionCallbacks diff --git a/source/server/drain_manager_impl.cc b/source/server/drain_manager_impl.cc index e7ca1852f57c8..852452da10ab0 100644 --- a/source/server/drain_manager_impl.cc +++ b/source/server/drain_manager_impl.cc @@ -61,7 +61,7 @@ void DrainManagerImpl::startParentShutdownSequence() { parent_shutdown_timer_ = server_.dispatcher().createTimer([this]() -> void { // Shut down the parent now. It should have already been draining. ENVOY_LOG(info, "shutting down parent after drain"); - server_.hotRestart().terminateParent(); + server_.hotRestart().sendParentTerminateRequest(); }); parent_shutdown_timer_->enableTimer(std::chrono::duration_cast( diff --git a/source/server/guarddog_impl.cc b/source/server/guarddog_impl.cc index 6c0d4a7e54007..67b55ca008401 100644 --- a/source/server/guarddog_impl.cc +++ b/source/server/guarddog_impl.cc @@ -17,10 +17,10 @@ namespace Envoy { namespace Server { GuardDogImpl::GuardDogImpl(Stats::Scope& stats_scope, const Server::Configuration::Main& config, - Api::Api& api) - : time_system_(api.timeSystem()), miss_timeout_(config.wdMissTimeout()), - megamiss_timeout_(config.wdMegaMissTimeout()), kill_timeout_(config.wdKillTimeout()), - multi_kill_timeout_(config.wdMultiKillTimeout()), + Api::Api& api, std::unique_ptr&& test_interlock) + : test_interlock_hook_(std::move(test_interlock)), time_source_(api.timeSource()), + miss_timeout_(config.wdMissTimeout()), megamiss_timeout_(config.wdMegaMissTimeout()), + kill_timeout_(config.wdKillTimeout()), multi_kill_timeout_(config.wdMultiKillTimeout()), loop_interval_([&]() -> std::chrono::milliseconds { // The loop interval is simply the minimum of all specified intervals, // but we must account for the 0=disabled case. This lambda takes care @@ -32,15 +32,28 @@ GuardDogImpl::GuardDogImpl(Stats::Scope& stats_scope, const Server::Configuratio }()), watchdog_miss_counter_(stats_scope.counter("server.watchdog_miss")), watchdog_megamiss_counter_(stats_scope.counter("server.watchdog_mega_miss")), - run_thread_(true) { + dispatcher_(api.allocateDispatcher()), + loop_timer_(dispatcher_->createTimer([this]() { step(); })), run_thread_(true) { start(api); } +GuardDogImpl::GuardDogImpl(Stats::Scope& stats_scope, const Server::Configuration::Main& config, + Api::Api& api) + : GuardDogImpl(stats_scope, config, api, std::make_unique()) {} + GuardDogImpl::~GuardDogImpl() { stop(); } -void GuardDogImpl::threadRoutine() { - do { - const auto now = time_system_.monotonicTime(); +void GuardDogImpl::step() { + { + Thread::LockGuard guard(mutex_); + if (!run_thread_) { + return; + } + } + + const auto now = time_source_.monotonicTime(); + + { bool seen_one_multi_timeout(false); Thread::LockGuard guard(wd_lock_); for (auto& watched_dog : watched_dogs_) { @@ -79,17 +92,25 @@ void GuardDogImpl::threadRoutine() { } } } - } while (waitOrDetectStop()); + } + + { + Thread::LockGuard guard(mutex_); + test_interlock_hook_->signalFromImpl(now); + if (run_thread_) { + loop_timer_->enableTimer(loop_interval_); + } + } } WatchDogSharedPtr GuardDogImpl::createWatchDog(Thread::ThreadIdPtr&& thread_id) { // Timer started by WatchDog will try to fire at 1/2 of the interval of the // minimum timeout specified. loop_interval_ is const so all shared state - // accessed out of the locked section below is const (time_system_ has no + // accessed out of the locked section below is const (time_source_ has no // state). auto wd_interval = loop_interval_ / 2; WatchDogSharedPtr new_watchdog = - std::make_shared(std::move(thread_id), time_system_, wd_interval); + std::make_shared(std::move(thread_id), time_source_, wd_interval); WatchedDog watched_dog; watched_dog.dog_ = new_watchdog; { @@ -111,41 +132,19 @@ void GuardDogImpl::stopWatching(WatchDogSharedPtr wd) { } } -bool GuardDogImpl::waitOrDetectStop() { - force_checked_event_.notifyAll(); - Thread::LockGuard guard(exit_lock_); - // Spurious wakeups are OK without explicit handling. We'll just check - // earlier than strictly required for that round. - - // Preferably, we should be calling - // time_system_.waitFor(exit_lock_, exit_event_, loop_interval_); - // here, but that makes GuardDogMissTest.* very flaky. The reason that - // directly calling condvar waitFor works is that it doesn't advance - // simulated time, which the test is carefully controlling. - // - // One alternative approach that would be easier to test is to use a private - // dispatcher and a TimerCB to execute the loop body of threadRoutine(). In - // this manner, the same dynamics would occur in production, with added - // overhead from libevent, But then the unit-test would purely control the - // advancement of time, and thus be more robust. Another variation would be - // to run this watchdog on the main-thread dispatcher, though such an approach - // could not detect when the main-thread was stuck. - exit_event_.waitFor(exit_lock_, loop_interval_); // NO_CHECK_FORMAT(real_time) - - return run_thread_; -} - void GuardDogImpl::start(Api::Api& api) { - run_thread_ = true; - thread_ = api.threadFactory().createThread([this]() -> void { threadRoutine(); }); + Thread::LockGuard guard(mutex_); + thread_ = api.threadFactory().createThread( + [this]() -> void { dispatcher_->run(Event::Dispatcher::RunType::RunUntilExit); }); + loop_timer_->enableTimer(std::chrono::milliseconds(0)); } void GuardDogImpl::stop() { { - Thread::LockGuard guard(exit_lock_); + Thread::LockGuard guard(mutex_); run_thread_ = false; - exit_event_.notifyAll(); } + dispatcher_->exit(); if (thread_) { thread_->join(); thread_.reset(); diff --git a/source/server/guarddog_impl.h b/source/server/guarddog_impl.h index a1b228f966937..7f07ba898ce71 100644 --- a/source/server/guarddog_impl.h +++ b/source/server/guarddog_impl.h @@ -32,13 +32,40 @@ namespace Server { */ class GuardDogImpl : public GuardDog { public: + /** + * Defines a test interlock hook to enable tests to synchronize the guard-dog + * execution so they can probe current counter values. The default + * implementation that runs in production has empty methods, which are + * overridden in the implementation used during tests. + */ + class TestInterlockHook { + public: + virtual ~TestInterlockHook() = default; + + /** + * Called from GuardDogImpl to indicate that it has evaluated all watch-dogs + * up to a particular point in time. + */ + virtual void signalFromImpl(MonotonicTime) {} + + /** + * Called from GuardDog tests to block until the implementation has reached + * the desired point in time. + */ + virtual void waitFromTest(Thread::MutexBasicLockable&, MonotonicTime) {} + }; + /** * @param stats_scope Statistics scope to write watchdog_miss and * watchdog_mega_miss events into. * @param config Configuration object. + * @param api API object. + * @param test_interlock a hook for enabling interlock with unit tests. * * See the configuration documentation for details on the timeout settings. */ + GuardDogImpl(Stats::Scope& stats_scope, const Server::Configuration::Main& config, Api::Api& api, + std::unique_ptr&& test_interlock); GuardDogImpl(Stats::Scope& stats_scope, const Server::Configuration::Main& config, Api::Api& api); ~GuardDogImpl(); @@ -46,10 +73,17 @@ class GuardDogImpl : public GuardDog { * Exposed for testing purposes only (but harmless to call): */ int loopIntervalForTest() const { return loop_interval_.count(); } + + /** + * Test hook to force a step() to catch up with the current simulated + * time. This is inlined so that it does not need to be present in the + * production binary. + */ void forceCheckForTest() { - exit_event_.notifyAll(); - Thread::LockGuard guard(exit_lock_); - force_checked_event_.wait(exit_lock_); + Thread::LockGuard guard(mutex_); + MonotonicTime now = time_source_.monotonicTime(); + loop_timer_->enableTimer(std::chrono::milliseconds(0)); + test_interlock_hook_->waitFromTest(mutex_, now); } // Server::GuardDog @@ -57,12 +91,8 @@ class GuardDogImpl : public GuardDog { void stopWatching(WatchDogSharedPtr wd) override; private: - void threadRoutine(); - /** - * @return True if we should continue, false if signalled to stop. - */ - bool waitOrDetectStop(); - void start(Api::Api& api) EXCLUSIVE_LOCKS_REQUIRED(exit_lock_); + void start(Api::Api& api); + void step(); void stop(); // Per the C++ standard it is OK to use these in ctor initializer as long as // it is after kill and multikill timeout values are initialized. @@ -76,7 +106,8 @@ class GuardDogImpl : public GuardDog { bool megamiss_alerted_{}; }; - Event::TimeSystem& time_system_; + std::unique_ptr test_interlock_hook_; + TimeSource& time_source_; const std::chrono::milliseconds miss_timeout_; const std::chrono::milliseconds megamiss_timeout_; const std::chrono::milliseconds kill_timeout_; @@ -87,10 +118,10 @@ class GuardDogImpl : public GuardDog { std::vector watched_dogs_ GUARDED_BY(wd_lock_); Thread::MutexBasicLockable wd_lock_; Thread::ThreadPtr thread_; - Thread::MutexBasicLockable exit_lock_; - Thread::CondVar exit_event_; - bool run_thread_ GUARDED_BY(exit_lock_); - Thread::CondVar force_checked_event_; + Event::DispatcherPtr dispatcher_; + Event::TimerPtr loop_timer_; + Thread::MutexBasicLockable mutex_; + bool run_thread_ GUARDED_BY(mutex_); }; } // namespace Server diff --git a/source/server/hot_restart.proto b/source/server/hot_restart.proto new file mode 100644 index 0000000000000..7d17a3800edf3 --- /dev/null +++ b/source/server/hot_restart.proto @@ -0,0 +1,66 @@ +syntax = "proto3"; + +package envoy; + +message HotRestartMessage { + // Child->parent requests + message Request { + message PassListenSocket { + string address = 1; + } + message ShutdownAdmin { + } + message Stats { + } + message DrainListeners { + } + message Terminate { + } + oneof request { + PassListenSocket pass_listen_socket = 1; + ShutdownAdmin shutdown_admin = 2; + Stats stats = 3; + DrainListeners drain_listeners = 4; + Terminate terminate = 5; + } + } + + // Parent->child replies + message Reply { + message PassListenSocket { + int32 fd = 1; + } + message ShutdownAdmin { + uint64 original_start_time_unix_seconds = 1; + } + message Stats { + // Values for server_stats, which don't fit with the "combination logic" approach. + uint64 memory_allocated = 1; + uint64 num_connections = 2; + + // Keys are fully qualified stat names. + // + // The amount added to the counter since the last time a message included the counter in this + // map. (The first time a counter is included in this map, it's the amount added since the + // final latch() before hot restart began). + map counter_deltas = 3; + // The parent's current values for various gauges in its stats store. + map gauges = 4; + } + oneof reply { + // When this oneof is of the PassListenSocketReply type, there is a special + // implied meaning: the recvmsg that got this proto has control data to make + // the passing of the fd work, so make use of CMSG_SPACE etc. + PassListenSocket pass_listen_socket = 1; + ShutdownAdmin shutdown_admin = 2; + Stats stats = 3; + } + } + + oneof requestreply { + Request request = 1; + Reply reply = 2; + } + + bool didnt_recognize_your_last_message = 3; +} diff --git a/source/server/hot_restart_impl.cc b/source/server/hot_restart_impl.cc index 334ec322ecf13..0a511bd741e1b 100644 --- a/source/server/hot_restart_impl.cc +++ b/source/server/hot_restart_impl.cc @@ -15,36 +15,18 @@ #include "envoy/server/options.h" #include "common/api/os_sys_calls_impl.h" +#include "common/api/os_sys_calls_impl_hot_restart.h" #include "common/common/fmt.h" #include "common/common/lock_guard.h" -#include "common/common/utility.h" -#include "common/network/utility.h" -#include "common/stats/raw_stat_data.h" -#include "common/stats/stats_options_impl.h" #include "absl/strings/string_view.h" namespace Envoy { namespace Server { -// Increment this whenever there is a shared memory / RPC change that will prevent a hot restart -// from working. Operations code can then cope with this and do a full restart. -const uint64_t SharedMemory::VERSION = 10; - -static BlockMemoryHashSetOptions blockMemHashOptions(uint64_t max_stats) { - BlockMemoryHashSetOptions hash_set_options; - hash_set_options.capacity = max_stats; - - // https://stackoverflow.com/questions/3980117/hash-table-why-size-should-be-prime - hash_set_options.num_slots = Primes::findPrimeLargerThan(hash_set_options.capacity / 2); - return hash_set_options; -} - -SharedMemory& SharedMemory::initialize(uint64_t stats_set_size, const Options& options) { +SharedMemory* attachSharedMemory(const Options& options) { Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get(); - - const uint64_t entry_size = Stats::RawStatData::structSizeWithOptions(options.statsOptions()); - const uint64_t total_size = sizeof(SharedMemory) + stats_set_size; + Api::HotRestartOsSysCalls& hot_restart_os_sys_calls = Api::HotRestartOsSysCallsSingleton::get(); int flags = O_RDWR; const std::string shmem_name = fmt::format("/envoy_shared_memory_{}", options.baseId()); @@ -53,62 +35,55 @@ SharedMemory& SharedMemory::initialize(uint64_t stats_set_size, const Options& o // If we are meant to be first, attempt to unlink a previous shared memory instance. If this // is a clean restart this should then allow the shm_open() call below to succeed. - os_sys_calls.shmUnlink(shmem_name.c_str()); + hot_restart_os_sys_calls.shmUnlink(shmem_name.c_str()); } const Api::SysCallIntResult result = - os_sys_calls.shmOpen(shmem_name.c_str(), flags, S_IRUSR | S_IWUSR); + hot_restart_os_sys_calls.shmOpen(shmem_name.c_str(), flags, S_IRUSR | S_IWUSR); if (result.rc_ == -1) { PANIC(fmt::format("cannot open shared memory region {} check user permissions. Error: {}", shmem_name, strerror(result.errno_))); } if (options.restartEpoch() == 0) { - const Api::SysCallIntResult truncateRes = os_sys_calls.ftruncate(result.rc_, total_size); + const Api::SysCallIntResult truncateRes = + os_sys_calls.ftruncate(result.rc_, sizeof(SharedMemory)); RELEASE_ASSERT(truncateRes.rc_ != -1, ""); } - const Api::SysCallPtrResult mmapRes = - os_sys_calls.mmap(nullptr, total_size, PROT_READ | PROT_WRITE, MAP_SHARED, result.rc_, 0); + const Api::SysCallPtrResult mmapRes = os_sys_calls.mmap( + nullptr, sizeof(SharedMemory), PROT_READ | PROT_WRITE, MAP_SHARED, result.rc_, 0); SharedMemory* shmem = reinterpret_cast(mmapRes.rc_); RELEASE_ASSERT(shmem != MAP_FAILED, ""); RELEASE_ASSERT((reinterpret_cast(shmem) % alignof(decltype(shmem))) == 0, ""); if (options.restartEpoch() == 0) { - shmem->size_ = total_size; - shmem->version_ = VERSION; - shmem->max_stats_ = options.maxStats(); - shmem->entry_size_ = entry_size; - shmem->initializeMutex(shmem->log_lock_); - shmem->initializeMutex(shmem->access_log_lock_); - shmem->initializeMutex(shmem->stat_lock_); - shmem->initializeMutex(shmem->init_lock_); + shmem->size_ = sizeof(SharedMemory); + shmem->version_ = HOT_RESTART_VERSION; + initializeMutex(shmem->log_lock_); + initializeMutex(shmem->access_log_lock_); } else { - RELEASE_ASSERT(shmem->size_ == total_size, ""); - RELEASE_ASSERT(shmem->version_ == VERSION, ""); - RELEASE_ASSERT(shmem->max_stats_ == options.maxStats(), ""); - RELEASE_ASSERT(shmem->entry_size_ == entry_size, ""); + RELEASE_ASSERT(shmem->size_ == sizeof(SharedMemory), + "Hot restart SharedMemory size mismatch! You must have hot restarted into a " + "not-hot-restart-compatible new version of Envoy."); + RELEASE_ASSERT(shmem->version_ == HOT_RESTART_VERSION, + "Hot restart version mismatch! You must have hot restarted into a " + "not-hot-restart-compatible new version of Envoy."); } - // Stats::RawStatData must be naturally aligned for atomics to work properly. - RELEASE_ASSERT( - (reinterpret_cast(shmem->stats_set_data_) % alignof(Stats::RawStatDataSet)) == 0, - ""); - // Here we catch the case where a new Envoy starts up when the current Envoy has not yet fully // initialized. The startup logic is quite complicated, and it's not worth trying to handle this // in a finer way. This will cause the startup to fail with an error code early, without // affecting any currently running processes. The process runner should try again later with some // back off and with the same hot restart epoch number. - uint64_t old_flags = shmem->flags_.fetch_or(Flags::INITIALIZING); - if (old_flags & Flags::INITIALIZING) { + uint64_t old_flags = shmem->flags_.fetch_or(SHMEM_FLAGS_INITIALIZING); + if (old_flags & SHMEM_FLAGS_INITIALIZING) { throw EnvoyException("previous envoy process is still initializing"); } - - return *shmem; + return shmem; } -void SharedMemory::initializeMutex(pthread_mutex_t& mutex) { +void initializeMutex(pthread_mutex_t& mutex) { pthread_mutexattr_t attribute; pthread_mutexattr_init(&attribute); pthread_mutexattr_setpshared(&attribute, PTHREAD_PROCESS_SHARED); @@ -116,368 +91,56 @@ void SharedMemory::initializeMutex(pthread_mutex_t& mutex) { pthread_mutex_init(&mutex, &attribute); } -std::string SharedMemory::version(uint64_t max_num_stats, - const Stats::StatsOptions& stats_options) { - return fmt::format("{}.{}.{}.{}", VERSION, sizeof(SharedMemory), max_num_stats, - stats_options.maxNameLength()); -} - HotRestartImpl::HotRestartImpl(const Options& options) - : options_(options), stats_set_options_(blockMemHashOptions(options.maxStats())), - shmem_(SharedMemory::initialize( - Stats::RawStatDataSet::numBytes(stats_set_options_, options_.statsOptions()), options_)), - log_lock_(shmem_.log_lock_), access_log_lock_(shmem_.access_log_lock_), - stat_lock_(shmem_.stat_lock_), init_lock_(shmem_.init_lock_) { - { - // We must hold the stat lock when attaching to an existing memory segment - // because it might be actively written to while we sanityCheck it. - Thread::LockGuard lock(stat_lock_); - stats_set_ = - std::make_unique(stats_set_options_, options.restartEpoch() == 0, - shmem_.stats_set_data_, options_.statsOptions()); - } - stats_allocator_ = std::make_unique(stat_lock_, *stats_set_, - options_.statsOptions()); - my_domain_socket_ = bindDomainSocket(options.restartEpoch()); - child_address_ = createDomainSocketAddress((options.restartEpoch() + 1)); - initDomainSocketAddress(&parent_address_); - if (options.restartEpoch() != 0) { - parent_address_ = createDomainSocketAddress((options.restartEpoch() + -1)); - } - + : as_child_(HotRestartingChild(options.baseId(), options.restartEpoch())), + as_parent_(HotRestartingParent(options.baseId(), options.restartEpoch())), + shmem_(attachSharedMemory(options)), log_lock_(shmem_->log_lock_), + access_log_lock_(shmem_->access_log_lock_) { // If our parent ever goes away just terminate us so that we don't have to rely on ops/launching // logic killing the entire process tree. We should never exist without our parent. int rc = prctl(PR_SET_PDEATHSIG, SIGTERM); RELEASE_ASSERT(rc != -1, ""); } -int HotRestartImpl::bindDomainSocket(uint64_t id) { - Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get(); - // This actually creates the socket and binds it. We use the socket in datagram mode so we can - // easily read single messages. - int fd = socket(AF_UNIX, SOCK_DGRAM | SOCK_NONBLOCK, 0); - sockaddr_un address = createDomainSocketAddress(id); - Api::SysCallIntResult result = - os_sys_calls.bind(fd, reinterpret_cast(&address), sizeof(address)); - if (result.rc_ != 0) { - throw EnvoyException( - fmt::format("unable to bind domain socket with id={} (see --base-id option)", id)); - } - - return fd; -} - -void HotRestartImpl::initDomainSocketAddress(sockaddr_un* address) { - memset(address, 0, sizeof(*address)); - address->sun_family = AF_UNIX; -} - -sockaddr_un HotRestartImpl::createDomainSocketAddress(uint64_t id) { - // Right now we only allow a maximum of 3 concurrent envoy processes to be running. When the third - // starts up it will kill the oldest parent. - const uint64_t MAX_CONCURRENT_PROCESSES = 3; - id = id % MAX_CONCURRENT_PROCESSES; - - // This creates an anonymous domain socket name (where the first byte of the name of \0). - sockaddr_un address; - initDomainSocketAddress(&address); - StringUtil::strlcpy(&address.sun_path[1], - fmt::format("envoy_domain_socket_{}", options_.baseId() + id).c_str(), - sizeof(address.sun_path) - 1); - address.sun_path[0] = 0; - return address; -} - void HotRestartImpl::drainParentListeners() { - if (options_.restartEpoch() > 0) { - // No reply expected. - RpcBase rpc(RpcMessageType::DrainListenersRequest); - sendMessage(parent_address_, rpc); - } - + as_child_.drainParentListeners(); // At this point we are initialized and a new Envoy can startup if needed. - shmem_.flags_ &= ~SharedMemory::Flags::INITIALIZING; + shmem_->flags_ &= ~SHMEM_FLAGS_INITIALIZING; } int HotRestartImpl::duplicateParentListenSocket(const std::string& address) { - if (options_.restartEpoch() == 0 || parent_terminated_) { - return -1; - } - - RpcGetListenSocketRequest rpc; - ASSERT(address.length() < sizeof(rpc.address_)); - StringUtil::strlcpy(rpc.address_, address.c_str(), sizeof(rpc.address_)); - sendMessage(parent_address_, rpc); - RpcGetListenSocketReply* reply = - receiveTypedRpc(); - return reply->fd_; -} - -void HotRestartImpl::getParentStats(GetParentStatsInfo& info) { - // There exists a race condition during hot restart involving fetching parent stats. It looks like - // this: - // 1) There currently exist 2 Envoy processes (draining has not completed): P0 and P1. - // 2) New process (P2) comes up and passes the INITIALIZING check. - // 3) P2 proceeds to the parent admin shutdown phase. - // 4) This races with P1 fetching parent stats from P0. - // 5) Calling receiveTypedRpc() below picks up the wrong message. - // - // There are not any great solutions to this problem. We could potentially guard this using flags, - // but this is a legitimate race condition even under normal restart conditions, so exiting P2 - // with an error is not great. We could also rework all of this code so that P0<->P1 and P1<->P2 - // communication occur over different socket pairs. This could work, but is a large change. We - // could also potentially use connection oriented sockets and accept connections from our child, - // and connect to our parent, but again, this becomes complicated. - // - // Instead, we guard this condition with a lock. However, to avoid deadlock, we must tryLock() - // in this path, since this call runs in the same thread as the event loop that is receiving - // messages. If tryLock() fails it is sufficient to not return any parent stats. - Thread::TryLockGuard lock(init_lock_); - memset(&info, 0, sizeof(info)); - if (options_.restartEpoch() == 0 || parent_terminated_ || !lock.tryLock()) { - return; - } - - RpcBase rpc(RpcMessageType::GetStatsRequest); - sendMessage(parent_address_, rpc); - RpcGetStatsReply* reply = receiveTypedRpc(); - info.memory_allocated_ = reply->memory_allocated_; - info.num_connections_ = reply->num_connections_; + return as_child_.duplicateParentListenSocket(address); } void HotRestartImpl::initialize(Event::Dispatcher& dispatcher, Server::Instance& server) { - socket_event_ = - dispatcher.createFileEvent(my_domain_socket_, - [this](uint32_t events) -> void { - ASSERT(events == Event::FileReadyType::Read); - onSocketEvent(); - }, - Event::FileTriggerType::Edge, Event::FileReadyType::Read); - server_ = &server; -} - -HotRestartImpl::RpcBase* HotRestartImpl::receiveRpc(bool block) { - // By default the domain socket is non blocking. If we need to block, make it blocking first. - if (block) { - int rc = fcntl(my_domain_socket_, F_SETFL, 0); - RELEASE_ASSERT(rc != -1, ""); - } - - iovec iov[1]; - iov[0].iov_base = &rpc_buffer_[0]; - iov[0].iov_len = rpc_buffer_.size(); - - // We always setup to receive an FD even though most messages do not pass one. - uint8_t control_buffer[CMSG_SPACE(sizeof(int))]; - memset(control_buffer, 0, CMSG_SPACE(sizeof(int))); - - msghdr message; - memset(&message, 0, sizeof(message)); - message.msg_iov = iov; - message.msg_iovlen = 1; - message.msg_control = control_buffer; - message.msg_controllen = CMSG_SPACE(sizeof(int)); - - int rc = recvmsg(my_domain_socket_, &message, 0); - if (!block && rc == -1 && errno == EAGAIN) { - return nullptr; - } - - RELEASE_ASSERT(rc != -1, ""); - RELEASE_ASSERT(message.msg_flags == 0, ""); - - // Turn non-blocking back on if we made it blocking. - if (block) { - int rc = fcntl(my_domain_socket_, F_SETFL, O_NONBLOCK); - RELEASE_ASSERT(rc != -1, ""); - } - - RpcBase* rpc = reinterpret_cast(&rpc_buffer_[0]); - RELEASE_ASSERT(static_cast(rc) == rpc->length_, ""); - - // We should only get control data in a GetListenSocketReply. If that's the case, pull the - // cloned fd out of the control data and stick it into the RPC so that higher level code does - // need to deal with any of this. - for (cmsghdr* cmsg = CMSG_FIRSTHDR(&message); cmsg != nullptr; - cmsg = CMSG_NXTHDR(&message, cmsg)) { - - if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS && - rpc->type_ == RpcMessageType::GetListenSocketReply) { - - reinterpret_cast(rpc)->fd_ = - *reinterpret_cast(CMSG_DATA(cmsg)); - } else { - RELEASE_ASSERT(false, ""); - } - } - - return rpc; -} - -void HotRestartImpl::sendMessage(sockaddr_un& address, RpcBase& rpc) { - iovec iov[1]; - iov[0].iov_base = &rpc; - iov[0].iov_len = rpc.length_; - - msghdr message; - memset(&message, 0, sizeof(message)); - message.msg_name = &address; - message.msg_namelen = sizeof(address); - message.msg_iov = iov; - message.msg_iovlen = 1; - int rc = sendmsg(my_domain_socket_, &message, 0); - RELEASE_ASSERT(rc != -1, ""); -} - -void HotRestartImpl::onGetListenSocket(RpcGetListenSocketRequest& rpc) { - RpcGetListenSocketReply reply; - reply.fd_ = -1; - - Network::Address::InstanceConstSharedPtr addr = - Network::Utility::resolveUrl(std::string(rpc.address_)); - for (const auto& listener : server_->listenerManager().listeners()) { - if (*listener.get().socket().localAddress() == *addr) { - reply.fd_ = listener.get().socket().ioHandle().fd(); - break; - } - } - - if (reply.fd_ == -1) { - // In this case there is no fd to duplicate so we just send a normal message. - sendMessage(child_address_, reply); - } else { - iovec iov[1]; - iov[0].iov_base = &reply; - iov[0].iov_len = reply.length_; - - uint8_t control_buffer[CMSG_SPACE(sizeof(int))]; - memset(control_buffer, 0, CMSG_SPACE(sizeof(int))); - - msghdr message; - memset(&message, 0, sizeof(message)); - message.msg_name = &child_address_; - message.msg_namelen = sizeof(child_address_); - message.msg_iov = iov; - message.msg_iovlen = 1; - message.msg_control = control_buffer; - message.msg_controllen = CMSG_SPACE(sizeof(int)); - - cmsghdr* control_message = CMSG_FIRSTHDR(&message); - control_message->cmsg_level = SOL_SOCKET; - control_message->cmsg_type = SCM_RIGHTS; - control_message->cmsg_len = CMSG_LEN(sizeof(int)); - *reinterpret_cast(CMSG_DATA(control_message)) = reply.fd_; - - int rc = sendmsg(my_domain_socket_, &message, 0); - RELEASE_ASSERT(rc != -1, ""); - } + as_parent_.initialize(dispatcher, server); } -void HotRestartImpl::onSocketEvent() { - while (true) { - RpcBase* base_message = receiveRpc(false); - if (!base_message) { - return; - } - - switch (base_message->type_) { - case RpcMessageType::ShutdownAdminRequest: { - server_->shutdownAdmin(); - RpcShutdownAdminReply rpc; - rpc.original_start_time_ = server_->startTimeFirstEpoch(); - sendMessage(child_address_, rpc); - break; - } - - case RpcMessageType::GetListenSocketRequest: { - RpcGetListenSocketRequest* message = - reinterpret_cast(base_message); - onGetListenSocket(*message); - break; - } - - case RpcMessageType::GetStatsRequest: { - GetParentStatsInfo info; - server_->getParentStats(info); - RpcGetStatsReply rpc; - rpc.memory_allocated_ = info.memory_allocated_; - rpc.num_connections_ = info.num_connections_; - sendMessage(child_address_, rpc); - break; - } - - case RpcMessageType::DrainListenersRequest: { - server_->drainListeners(); - break; - } - - case RpcMessageType::TerminateRequest: { - ENVOY_LOG(info, "shutting down due to child request"); - kill(getpid(), SIGTERM); - break; - } - - default: { - RpcBase rpc(RpcMessageType::UnknownRequestReply); - sendMessage(child_address_, rpc); - break; - } - } - } +void HotRestartImpl::sendParentAdminShutdownRequest(time_t& original_start_time) { + as_child_.sendParentAdminShutdownRequest(original_start_time); } -void HotRestartImpl::shutdownParentAdmin(ShutdownParentAdminInfo& info) { - // See large comment in getParentStats() on why this operation is locked. - Thread::LockGuard lock(init_lock_); - if (options_.restartEpoch() == 0) { - return; - } - - RpcBase rpc(RpcMessageType::ShutdownAdminRequest); - sendMessage(parent_address_, rpc); - RpcShutdownAdminReply* reply = - receiveTypedRpc(); - info.original_start_time_ = reply->original_start_time_; -} +void HotRestartImpl::sendParentTerminateRequest() { as_child_.sendParentTerminateRequest(); } -void HotRestartImpl::terminateParent() { - if (options_.restartEpoch() == 0 || parent_terminated_) { - return; +HotRestart::ServerStatsFromParent +HotRestartImpl::mergeParentStatsIfAny(Stats::StoreRoot& stats_store) { + std::unique_ptr wrapper_msg = as_child_.getParentStats(); + ServerStatsFromParent response; + // getParentStats() will happily and cleanly return nullptr if we have no parent. + if (wrapper_msg) { + as_child_.mergeParentStats(stats_store, wrapper_msg->reply().stats()); + response.parent_memory_allocated_ = wrapper_msg->reply().stats().memory_allocated(); + response.parent_connections_ = wrapper_msg->reply().stats().num_connections(); } - - RpcBase rpc(RpcMessageType::TerminateRequest); - sendMessage(parent_address_, rpc); - parent_terminated_ = true; -} - -void HotRestartImpl::shutdown() { socket_event_.reset(); } - -std::string HotRestartImpl::version() { - Thread::LockGuard lock(stat_lock_); - return versionHelper(shmem_.maxStats(), options_.statsOptions(), *stats_set_); + return response; } -// Called from envoy --hot-restart-version -- needs to instantiate a RawStatDataSet so it -// can generate the version string. -std::string HotRestartImpl::hotRestartVersion(uint64_t max_num_stats, uint64_t max_stat_name_len) { - Stats::StatsOptionsImpl stats_options; - stats_options.max_obj_name_length_ = max_stat_name_len - stats_options.maxStatSuffixLength(); - - const BlockMemoryHashSetOptions hash_set_options = blockMemHashOptions(max_num_stats); - const uint64_t bytes = Stats::RawStatDataSet::numBytes(hash_set_options, stats_options); - std::unique_ptr mem_buffer_for_dry_run_(new uint8_t[bytes]); +void HotRestartImpl::shutdown() { as_parent_.shutdown(); } - Stats::RawStatDataSet stats_set(hash_set_options, true /* init */, mem_buffer_for_dry_run_.get(), - stats_options); - - return versionHelper(max_num_stats, stats_options, stats_set); -} +std::string HotRestartImpl::version() { return hotRestartVersion(); } -std::string HotRestartImpl::versionHelper(uint64_t max_num_stats, - const Stats::StatsOptions& stats_options, - Stats::RawStatDataSet& stats_set) { - return SharedMemory::version(max_num_stats, stats_options) + "." + - stats_set.version(stats_options); +std::string HotRestartImpl::hotRestartVersion() { + return fmt::format("{}.{}", HOT_RESTART_VERSION, sizeof(SharedMemory)); } } // namespace Server diff --git a/source/server/hot_restart_impl.h b/source/server/hot_restart_impl.h index 5d432c42ad441..0244c20f69274 100644 --- a/source/server/hot_restart_impl.h +++ b/source/server/hot_restart_impl.h @@ -10,63 +10,43 @@ #include "envoy/common/platform.h" #include "envoy/server/hot_restart.h" -#include "envoy/server/options.h" -#include "envoy/stats/stats_options.h" #include "common/common/assert.h" -#include "common/stats/raw_stat_data.h" +#include "common/stats/heap_stat_data.h" + +#include "server/hot_restarting_child.h" +#include "server/hot_restarting_parent.h" namespace Envoy { namespace Server { +// Increment this whenever there is a shared memory / RPC change that will prevent a hot restart +// from working. Operations code can then cope with this and do a full restart. +const uint64_t HOT_RESTART_VERSION = 11; + /** * Shared memory segment. This structure is laid directly into shared memory and is used amongst * all running envoy processes. */ -class SharedMemory { -public: - static void configure(uint64_t max_num_stats, uint64_t max_stat_name_len); - static std::string version(uint64_t max_num_stats, const Stats::StatsOptions& stats_options); - - // Made public for testing. - static const uint64_t VERSION; - - int64_t maxStats() const { return max_stats_; } - -private: - struct Flags { - static const uint64_t INITIALIZING = 0x1; - }; - - // Due to the flexible-array-length of stats_set_data_, c-style allocation - // and initialization are necessary. - SharedMemory() = delete; - ~SharedMemory() = delete; - - /** - * Initialize the shared memory segment, depending on whether we should be the first running - * envoy, or a host restarted envoy process. - */ - static SharedMemory& initialize(uint64_t stats_set_size, const Options& options); - - /** - * Initialize a pthread mutex for process shared locking. - */ - void initializeMutex(pthread_mutex_t& mutex); - +struct SharedMemory { uint64_t size_; uint64_t version_; - uint64_t max_stats_; - uint64_t entry_size_; - std::atomic flags_; pthread_mutex_t log_lock_; pthread_mutex_t access_log_lock_; - pthread_mutex_t stat_lock_; - pthread_mutex_t init_lock_; - alignas(BlockMemoryHashSet) uint8_t stats_set_data_[]; - - friend class HotRestartImpl; + std::atomic flags_; }; +static const uint64_t SHMEM_FLAGS_INITIALIZING = 0x1; + +/** + * Initialize the shared memory segment, depending on whether we are the first running + * envoy, or a host restarted envoy process. + */ +SharedMemory* attachSharedMemory(const Options& options); + +/** + * Initialize a pthread mutex for process shared locking. + */ +void initializeMutex(pthread_mutex_t& mutex); /** * Implementation of Thread::BasicLockable that operates on a process shared pthread mutex. @@ -110,118 +90,39 @@ class ProcessSharedMutex : public Thread::BasicLockable { }; /** - * Implementation of HotRestart built for Linux. + * Implementation of HotRestart built for Linux. Most of the "protocol" type logic is split out into + * HotRestarting{Base,Parent,Child}. This class ties all that to shared memory and version logic. */ -class HotRestartImpl : public HotRestart, Logger::Loggable { +class HotRestartImpl : public HotRestart { public: HotRestartImpl(const Options& options); // Server::HotRestart void drainParentListeners() override; int duplicateParentListenSocket(const std::string& address) override; - void getParentStats(GetParentStatsInfo& info) override; void initialize(Event::Dispatcher& dispatcher, Server::Instance& server) override; - void shutdownParentAdmin(ShutdownParentAdminInfo& info) override; - void terminateParent() override; + void sendParentAdminShutdownRequest(time_t& original_start_time) override; + void sendParentTerminateRequest() override; + ServerStatsFromParent mergeParentStatsIfAny(Stats::StoreRoot& stats_store) override; void shutdown() override; std::string version() override; Thread::BasicLockable& logLock() override { return log_lock_; } Thread::BasicLockable& accessLogLock() override { return access_log_lock_; } - Stats::RawStatDataAllocator& statsAllocator() override { return *stats_allocator_; } /** * envoy --hot_restart_version doesn't initialize Envoy, but computes the version string * based on the configured options. */ - static std::string hotRestartVersion(uint64_t max_num_stats, uint64_t max_stat_name_len); + static std::string hotRestartVersion(); private: - enum class RpcMessageType { - DrainListenersRequest = 1, - GetListenSocketRequest = 2, - GetListenSocketReply = 3, - ShutdownAdminRequest = 4, - ShutdownAdminReply = 5, - TerminateRequest = 6, - UnknownRequestReply = 7, - GetStatsRequest = 8, - GetStatsReply = 9 - }; - - PACKED_STRUCT(struct RpcBase { - RpcBase(RpcMessageType type, uint64_t length = sizeof(RpcBase)) - : type_(type), length_(length) {} - - RpcMessageType type_; - uint64_t length_; - }); - - PACKED_STRUCT(struct RpcGetListenSocketRequest - : public RpcBase { - RpcGetListenSocketRequest() - : RpcBase(RpcMessageType::GetListenSocketRequest, sizeof(*this)) {} - - char address_[256]{0}; - }); - - PACKED_STRUCT(struct RpcGetListenSocketReply - : public RpcBase { - RpcGetListenSocketReply() - : RpcBase(RpcMessageType::GetListenSocketReply, sizeof(*this)) {} - - int fd_{0}; - }); - - PACKED_STRUCT(struct RpcShutdownAdminReply - : public RpcBase { - RpcShutdownAdminReply() - : RpcBase(RpcMessageType::ShutdownAdminReply, sizeof(*this)) {} - - uint64_t original_start_time_{0}; - }); - - PACKED_STRUCT(struct RpcGetStatsReply - : public RpcBase { - RpcGetStatsReply() : RpcBase(RpcMessageType::GetStatsReply, sizeof(*this)) {} - - uint64_t memory_allocated_{0}; - uint64_t num_connections_{0}; - uint64_t unused_[16]{0}; - }); - - template rpc_class* receiveTypedRpc() { - RpcBase* base_message = receiveRpc(true); - RELEASE_ASSERT(base_message->length_ == sizeof(rpc_class), ""); - RELEASE_ASSERT(base_message->type_ == rpc_type, ""); - return reinterpret_cast(base_message); - } - - int bindDomainSocket(uint64_t id); - void initDomainSocketAddress(sockaddr_un* address); - sockaddr_un createDomainSocketAddress(uint64_t id); - void onGetListenSocket(RpcGetListenSocketRequest& rpc); - void onSocketEvent(); - RpcBase* receiveRpc(bool block); - void sendMessage(sockaddr_un& address, RpcBase& rpc); - static std::string versionHelper(uint64_t max_num_stats, const Stats::StatsOptions& stats_options, - Stats::RawStatDataSet& stats_set); - - const Options& options_; - BlockMemoryHashSetOptions stats_set_options_; - SharedMemory& shmem_; - std::unique_ptr stats_set_ GUARDED_BY(stat_lock_); - std::unique_ptr stats_allocator_; + HotRestartingChild as_child_; + HotRestartingParent as_parent_; + // This pointer is shared memory, and is expected to exist until process end. + // It will automatically be unmapped when the process terminates. + SharedMemory* shmem_; ProcessSharedMutex log_lock_; ProcessSharedMutex access_log_lock_; - ProcessSharedMutex stat_lock_; - ProcessSharedMutex init_lock_; - int my_domain_socket_{-1}; - sockaddr_un parent_address_; - sockaddr_un child_address_; - Event::FileEventPtr socket_event_; - std::array rpc_buffer_; - Server::Instance* server_{}; - bool parent_terminated_{}; }; } // namespace Server diff --git a/source/server/hot_restart_nop_impl.h b/source/server/hot_restart_nop_impl.h index bc31f897f29ad..010f93340ed52 100644 --- a/source/server/hot_restart_nop_impl.h +++ b/source/server/hot_restart_nop_impl.h @@ -15,25 +15,23 @@ namespace Server { */ class HotRestartNopImpl : public Server::HotRestart { public: - HotRestartNopImpl() {} - // Server::HotRestart void drainParentListeners() override {} int duplicateParentListenSocket(const std::string&) override { return -1; } - void getParentStats(GetParentStatsInfo& info) override { memset(&info, 0, sizeof(info)); } void initialize(Event::Dispatcher&, Server::Instance&) override {} - void shutdownParentAdmin(ShutdownParentAdminInfo&) override {} - void terminateParent() override {} + void sendParentAdminShutdownRequest(time_t&) override {} + void sendParentTerminateRequest() override {} + ServerStatsFromParent mergeParentStatsIfAny(Stats::StoreRoot&) override { + return ServerStatsFromParent(); + } void shutdown() override {} std::string version() override { return "disabled"; } Thread::BasicLockable& logLock() override { return log_lock_; } Thread::BasicLockable& accessLogLock() override { return access_log_lock_; } - Stats::StatDataAllocator& statsAllocator() override { return stats_allocator_; } private: Thread::MutexBasicLockable log_lock_; Thread::MutexBasicLockable access_log_lock_; - Stats::HeapStatDataAllocator stats_allocator_; }; } // namespace Server diff --git a/source/server/hot_restarting_base.cc b/source/server/hot_restarting_base.cc new file mode 100644 index 0000000000000..20e0a6c0347bc --- /dev/null +++ b/source/server/hot_restarting_base.cc @@ -0,0 +1,213 @@ +#include "server/hot_restarting_base.h" + +#include "common/api/os_sys_calls_impl.h" +#include "common/common/utility.h" + +namespace Envoy { +namespace Server { + +using HotRestartMessage = envoy::HotRestartMessage; + +void HotRestartingBase::initDomainSocketAddress(sockaddr_un* address) { + memset(address, 0, sizeof(*address)); + address->sun_family = AF_UNIX; +} + +sockaddr_un HotRestartingBase::createDomainSocketAddress(uint64_t id, const std::string& role) { + // Right now we only allow a maximum of 3 concurrent envoy processes to be running. When the third + // starts up it will kill the oldest parent. + const uint64_t MAX_CONCURRENT_PROCESSES = 3; + id = id % MAX_CONCURRENT_PROCESSES; + + // This creates an anonymous domain socket name (where the first byte of the name of \0). + sockaddr_un address; + initDomainSocketAddress(&address); + StringUtil::strlcpy(&address.sun_path[1], + fmt::format("envoy_domain_socket_{}_{}", role, base_id_ + id).c_str(), + sizeof(address.sun_path) - 1); + address.sun_path[0] = 0; + return address; +} + +void HotRestartingBase::bindDomainSocket(uint64_t id, const std::string& role) { + Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get(); + // This actually creates the socket and binds it. We use the socket in datagram mode so we can + // easily read single messages. + my_domain_socket_ = socket(AF_UNIX, SOCK_DGRAM | SOCK_NONBLOCK, 0); + sockaddr_un address = createDomainSocketAddress(id, role); + Api::SysCallIntResult result = + os_sys_calls.bind(my_domain_socket_, reinterpret_cast(&address), sizeof(address)); + if (result.rc_ != 0) { + throw EnvoyException( + fmt::format("unable to bind domain socket with id={} (see --base-id option)", id)); + } +} + +void HotRestartingBase::sendHotRestartMessage(sockaddr_un& address, + const HotRestartMessage& proto) { + const uint64_t serialized_size = proto.ByteSizeLong(); + const uint64_t total_size = sizeof(uint64_t) + serialized_size; + // Fill with uint64_t 'length' followed by the serialized HotRestartMessage. + std::vector send_buf; + send_buf.resize(total_size); + *reinterpret_cast(send_buf.data()) = htobe64(serialized_size); + RELEASE_ASSERT(proto.SerializeWithCachedSizesToArray(send_buf.data() + sizeof(uint64_t)), + "failed to serialize a HotRestartMessage"); + + RELEASE_ASSERT(fcntl(my_domain_socket_, F_SETFL, 0) != -1, + fmt::format("Set domain socket blocking failed, errno = {}", errno)); + + uint8_t* next_byte_to_send = send_buf.data(); + uint64_t sent = 0; + while (sent < total_size) { + const uint64_t cur_chunk_size = std::min(MaxSendmsgSize, total_size - sent); + iovec iov[1]; + iov[0].iov_base = next_byte_to_send; + iov[0].iov_len = cur_chunk_size; + next_byte_to_send += cur_chunk_size; + sent += cur_chunk_size; + msghdr message; + memset(&message, 0, sizeof(message)); + message.msg_name = &address; + message.msg_namelen = sizeof(address); + message.msg_iov = iov; + message.msg_iovlen = 1; + + // Control data stuff, only relevant for the fd passing done with PassListenSocketReply. + uint8_t control_buffer[CMSG_SPACE(sizeof(int))]; + if (replyIsExpectedType(&proto, HotRestartMessage::Reply::kPassListenSocket) && + proto.reply().pass_listen_socket().fd() != -1) { + memset(control_buffer, 0, CMSG_SPACE(sizeof(int))); + message.msg_control = control_buffer; + message.msg_controllen = CMSG_SPACE(sizeof(int)); + cmsghdr* control_message = CMSG_FIRSTHDR(&message); + control_message->cmsg_level = SOL_SOCKET; + control_message->cmsg_type = SCM_RIGHTS; + control_message->cmsg_len = CMSG_LEN(sizeof(int)); + *reinterpret_cast(CMSG_DATA(control_message)) = proto.reply().pass_listen_socket().fd(); + ASSERT(sent == total_size, "an fd passing message was too long for one sendmsg()."); + } + + const int rc = sendmsg(my_domain_socket_, &message, 0); + RELEASE_ASSERT(rc == static_cast(cur_chunk_size), + fmt::format("hot restart sendmsg() failed: returned {}, errno {}", rc, errno)); + } + RELEASE_ASSERT(fcntl(my_domain_socket_, F_SETFL, O_NONBLOCK) != -1, + fmt::format("Set domain socket nonblocking failed, errno = {}", errno)); +} + +bool HotRestartingBase::replyIsExpectedType(const HotRestartMessage* proto, + HotRestartMessage::Reply::ReplyCase oneof_type) const { + return proto != nullptr && proto->requestreply_case() == HotRestartMessage::kReply && + proto->reply().reply_case() == oneof_type; +} + +// Pull the cloned fd, if present, out of the control data and write it into the +// PassListenSocketReply proto; the higher level code will see a listening fd that Just Works. We +// should only get control data in a PassListenSocketReply, it should only be the fd passing type, +// and there should only be one at a time. Crash on any other control data. +void HotRestartingBase::getPassedFdIfPresent(HotRestartMessage* out, msghdr* message) { + cmsghdr* cmsg = CMSG_FIRSTHDR(message); + if (cmsg != nullptr) { + RELEASE_ASSERT(cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS && + replyIsExpectedType(out, HotRestartMessage::Reply::kPassListenSocket), + "recvmsg() came with control data when the message's purpose was not to pass a " + "file descriptor."); + + out->mutable_reply()->mutable_pass_listen_socket()->set_fd( + *reinterpret_cast(CMSG_DATA(cmsg))); + + RELEASE_ASSERT(CMSG_NXTHDR(message, cmsg) == nullptr, + "More than one control data on a single hot restart recvmsg()."); + } +} + +// While in use, recv_buf_ is always >= MaxSendmsgSize. In between messages, it is kept empty, to be +// grown back to MaxSendmsgSize at the start of the next message. +void HotRestartingBase::initRecvBufIfNewMessage() { + if (recv_buf_.empty()) { + ASSERT(cur_msg_recvd_bytes_ == 0); + ASSERT(!expected_proto_length_.has_value()); + recv_buf_.resize(MaxSendmsgSize); + } +} + +// Must only be called when recv_buf_ contains a full proto. Returns that proto, and resets all of +// our receive-buffering state back to empty, to await a new message. +std::unique_ptr HotRestartingBase::parseProtoAndResetState() { + auto ret = std::make_unique(); + RELEASE_ASSERT( + ret->ParseFromArray(recv_buf_.data() + sizeof(uint64_t), expected_proto_length_.value()), + "failed to parse a HotRestartMessage."); + recv_buf_.resize(0); + cur_msg_recvd_bytes_ = 0; + expected_proto_length_.reset(); + return ret; +} + +std::unique_ptr HotRestartingBase::receiveHotRestartMessage(Blocking block) { + // By default the domain socket is non blocking. If we need to block, make it blocking first. + if (block == Blocking::Yes) { + RELEASE_ASSERT(fcntl(my_domain_socket_, F_SETFL, 0) != -1, + fmt::format("Set domain socket blocking failed, errno = {}", errno)); + } + + initRecvBufIfNewMessage(); + + iovec iov[1]; + msghdr message; + uint8_t control_buffer[CMSG_SPACE(sizeof(int))]; + std::unique_ptr ret = nullptr; + while (!ret) { + iov[0].iov_base = recv_buf_.data() + cur_msg_recvd_bytes_; + iov[0].iov_len = MaxSendmsgSize; + + // We always setup to receive an FD even though most messages do not pass one. + memset(control_buffer, 0, CMSG_SPACE(sizeof(int))); + memset(&message, 0, sizeof(message)); + message.msg_iov = iov; + message.msg_iovlen = 1; + message.msg_control = control_buffer; + message.msg_controllen = CMSG_SPACE(sizeof(int)); + + const int recvmsg_rc = recvmsg(my_domain_socket_, &message, 0); + if (block == Blocking::No && recvmsg_rc == -1 && errno == EAGAIN) { + return nullptr; + } + RELEASE_ASSERT(recvmsg_rc != -1, fmt::format("recvmsg() returned -1, errno = {}", errno)); + RELEASE_ASSERT(message.msg_flags == 0, + fmt::format("recvmsg() left msg_flags = {}", message.msg_flags)); + cur_msg_recvd_bytes_ += recvmsg_rc; + + // If we don't already know 'length', we're at the start of a new length+protobuf message! + if (!expected_proto_length_.has_value()) { + // We are not ok with messages so fragmented that the length doesn't even come in one piece. + RELEASE_ASSERT(recvmsg_rc >= 8, "received a brokenly tiny message fragment."); + + expected_proto_length_ = be64toh(*reinterpret_cast(recv_buf_.data())); + // Expand the buffer from its default 4096 if this message is going to be longer. + if (expected_proto_length_.value() > MaxSendmsgSize - sizeof(uint64_t)) { + recv_buf_.resize(expected_proto_length_.value() + sizeof(uint64_t)); + cur_msg_recvd_bytes_ = recvmsg_rc; + } + } + // If we have received beyond the end of the current in-flight proto, then next is misaligned. + RELEASE_ASSERT(cur_msg_recvd_bytes_ <= sizeof(uint64_t) + expected_proto_length_.value(), + "received a length+protobuf message not aligned to start of sendmsg()."); + + if (cur_msg_recvd_bytes_ == sizeof(uint64_t) + expected_proto_length_.value()) { + ret = parseProtoAndResetState(); + } + } + + // Turn non-blocking back on if we made it blocking. + if (block == Blocking::Yes) { + RELEASE_ASSERT(fcntl(my_domain_socket_, F_SETFL, O_NONBLOCK) != -1, + fmt::format("Set domain socket nonblocking failed, errno = {}", errno)); + } + getPassedFdIfPresent(ret.get(), &message); + return ret; +} + +} // namespace Server +} // namespace Envoy diff --git a/source/server/hot_restarting_base.h b/source/server/hot_restarting_base.h new file mode 100644 index 0000000000000..a5a0fbb264afd --- /dev/null +++ b/source/server/hot_restarting_base.h @@ -0,0 +1,91 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include + +#include "envoy/common/platform.h" +#include "envoy/server/hot_restart.h" +#include "envoy/server/options.h" + +#include "common/common/assert.h" + +namespace Envoy { +namespace Server { + +/** + * Logic shared by the implementations of both sides of the child<-->parent hot restart protocol: + * domain socket communication, and our ad hoc RPC protocol. + */ +class HotRestartingBase { +protected: + HotRestartingBase(uint64_t base_id) : base_id_(base_id) {} + + void initDomainSocketAddress(sockaddr_un* address); + sockaddr_un createDomainSocketAddress(uint64_t id, const std::string& role); + void bindDomainSocket(uint64_t id, const std::string& role); + int myDomainSocket() const { return my_domain_socket_; } + + // Protocol description: + // + // In each direction between parent<-->child, a series of pairs of: + // A uint64 'length' (bytes in network order), + // followed by 'length' bytes of a serialized HotRestartMessage. + // Each new message must start in a new sendmsg datagram, i.e. 'length' must always start at byte + // 0. Each sendmsg datagram can be up to 4096 bytes (including 'length' if present). When the + // serialized protobuf is longer than 4096-8 bytes, and so cannot fit in just one datagram, it is + // delivered by a series of datagrams. In each of these continuation datagrams, the protobuf data + // starts at byte 0. + // + // There is no mechanism to explicitly pair responses to requests. However, the child initiates + // all exchanges, and blocks until a reply is received, so there is implicit pairing. + void sendHotRestartMessage(sockaddr_un& address, const envoy::HotRestartMessage& proto); + + enum class Blocking { Yes, No }; + // Receive data, possibly enough to build one of our protocol messages. + // If block is true, blocks until a full protocol message is available. + // If block is false, returns nullptr if we run out of data to receive before a full protocol + // message is available. In either case, the HotRestartingBase may end up buffering some data for + // the next protocol message, even if the function returns a protobuf. + std::unique_ptr receiveHotRestartMessage(Blocking block); + + bool replyIsExpectedType(const envoy::HotRestartMessage* proto, + envoy::HotRestartMessage::Reply::ReplyCase oneof_type) const; + +private: + void getPassedFdIfPresent(envoy::HotRestartMessage* out, msghdr* message); + std::unique_ptr parseProtoAndResetState(); + void initRecvBufIfNewMessage(); + + // An int in [0, MAX_CONCURRENT_PROCESSES). As hot restarts happen, each next process gets the + // next of 0,1,2,0,1,... + // A HotRestartingBase's domain socket's name contains its base_id_ value, and so we can use + // this value to determine which domain socket name to treat as our parent, and which to treat as + // our child. (E.g. if we are 2, 1 is parent and 0 is child). + const uint64_t base_id_; + int my_domain_socket_{-1}; + + const uint64_t MaxSendmsgSize = 4096; + + // State for the receiving half of the protocol. + // + // When filled, the size in bytes that the in-flight HotRestartMessage should be. + // When empty, we're ready to start receiving a new message (starting with a uint64 'length'). + absl::optional expected_proto_length_; + // How much of the current in-flight message (including both the uint64 'length', plus the proto + // itself) we have received. Once this equals expected_proto_length_ + sizeof(uint64_t), we're + // ready to parse the HotRestartMessage. Should be set to 0 in between messages, to indicate + // readiness for a new message. + uint64_t cur_msg_recvd_bytes_{}; + // The first 8 bytes will always be the raw net-order bytes of the current value of + // expected_proto_length_. The protobuf partial data starts at byte 8. + // Should be resized to 0 in between messages, to indicate readiness for a new message. + std::vector recv_buf_; +}; + +} // namespace Server +} // namespace Envoy diff --git a/source/server/hot_restarting_child.cc b/source/server/hot_restarting_child.cc new file mode 100644 index 0000000000000..1807f53903c5c --- /dev/null +++ b/source/server/hot_restarting_child.cc @@ -0,0 +1,101 @@ +#include "server/hot_restarting_child.h" + +#include "common/common/utility.h" + +namespace Envoy { +namespace Server { + +using HotRestartMessage = envoy::HotRestartMessage; + +HotRestartingChild::HotRestartingChild(int base_id, int restart_epoch) + : HotRestartingBase(base_id), restart_epoch_(restart_epoch) { + initDomainSocketAddress(&parent_address_); + if (restart_epoch_ != 0) { + parent_address_ = createDomainSocketAddress(restart_epoch_ + -1, "parent"); + } + bindDomainSocket(restart_epoch_, "child"); +} + +int HotRestartingChild::duplicateParentListenSocket(const std::string& address) { + if (restart_epoch_ == 0 || parent_terminated_) { + return -1; + } + + HotRestartMessage wrapped_request; + wrapped_request.mutable_request()->mutable_pass_listen_socket()->set_address(address); + sendHotRestartMessage(parent_address_, wrapped_request); + + std::unique_ptr wrapped_reply = receiveHotRestartMessage(Blocking::Yes); + if (!replyIsExpectedType(wrapped_reply.get(), HotRestartMessage::Reply::kPassListenSocket)) { + return -1; + } + return wrapped_reply->reply().pass_listen_socket().fd(); +} + +std::unique_ptr HotRestartingChild::getParentStats() { + if (restart_epoch_ == 0 || parent_terminated_) { + return nullptr; + } + + HotRestartMessage wrapped_request; + wrapped_request.mutable_request()->mutable_stats(); + sendHotRestartMessage(parent_address_, wrapped_request); + + std::unique_ptr wrapped_reply = receiveHotRestartMessage(Blocking::Yes); + RELEASE_ASSERT(replyIsExpectedType(wrapped_reply.get(), HotRestartMessage::Reply::kStats), + "Hot restart parent did not respond as expected to get stats request."); + return wrapped_reply; +} + +void HotRestartingChild::drainParentListeners() { + if (restart_epoch_ == 0 || parent_terminated_) { + return; + } + // No reply expected. + HotRestartMessage wrapped_request; + wrapped_request.mutable_request()->mutable_drain_listeners(); + sendHotRestartMessage(parent_address_, wrapped_request); +} + +void HotRestartingChild::sendParentAdminShutdownRequest(time_t& original_start_time) { + if (restart_epoch_ == 0 || parent_terminated_) { + return; + } + + HotRestartMessage wrapped_request; + wrapped_request.mutable_request()->mutable_shutdown_admin(); + sendHotRestartMessage(parent_address_, wrapped_request); + + std::unique_ptr wrapped_reply = receiveHotRestartMessage(Blocking::Yes); + RELEASE_ASSERT(replyIsExpectedType(wrapped_reply.get(), HotRestartMessage::Reply::kShutdownAdmin), + "Hot restart parent did not respond as expected to ShutdownParentAdmin."); + original_start_time = wrapped_reply->reply().shutdown_admin().original_start_time_unix_seconds(); +} + +void HotRestartingChild::sendParentTerminateRequest() { + if (restart_epoch_ == 0 || parent_terminated_) { + return; + } + HotRestartMessage wrapped_request; + wrapped_request.mutable_request()->mutable_terminate(); + sendHotRestartMessage(parent_address_, wrapped_request); + parent_terminated_ = true; + // Once setting parent_terminated_ == true, we can send no more hot restart RPCs, and therefore + // receive no more responses, including stats. So, now safe to forget our stat transferral state. + // + // This destruction is actually important far beyond memory efficiency. The scope-based temporary + // counter logic relies on the StatMerger getting destroyed once hot restart's stat merging is + // all done. (See stat_merger.h for details). + stat_merger_.reset(); +} + +void HotRestartingChild::mergeParentStats(Stats::Store& stats_store, + const HotRestartMessage::Reply::Stats& stats_proto) { + if (!stat_merger_) { + stat_merger_ = std::make_unique(stats_store); + } + stat_merger_->mergeStats(stats_proto.counter_deltas(), stats_proto.gauges()); +} + +} // namespace Server +} // namespace Envoy diff --git a/source/server/hot_restarting_child.h b/source/server/hot_restarting_child.h new file mode 100644 index 0000000000000..08c3cc27359f1 --- /dev/null +++ b/source/server/hot_restarting_child.h @@ -0,0 +1,33 @@ +#pragma once + +#include "common/stats/stat_merger.h" + +#include "server/hot_restarting_base.h" + +namespace Envoy { +namespace Server { + +/** + * The child half of hot restarting. Issues requests and commands to the parent. + */ +class HotRestartingChild : HotRestartingBase, Logger::Loggable { +public: + HotRestartingChild(int base_id, int restart_epoch); + + int duplicateParentListenSocket(const std::string& address); + std::unique_ptr getParentStats(); + void drainParentListeners(); + void sendParentAdminShutdownRequest(time_t& original_start_time); + void sendParentTerminateRequest(); + void mergeParentStats(Stats::Store& stats_store, + const envoy::HotRestartMessage::Reply::Stats& stats_proto); + +private: + const int restart_epoch_; + bool parent_terminated_{}; + sockaddr_un parent_address_; + std::unique_ptr stat_merger_{}; +}; + +} // namespace Server +} // namespace Envoy diff --git a/source/server/hot_restarting_parent.cc b/source/server/hot_restarting_parent.cc new file mode 100644 index 0000000000000..becef491b5fae --- /dev/null +++ b/source/server/hot_restarting_parent.cc @@ -0,0 +1,130 @@ +#include "server/hot_restarting_parent.h" + +#include "envoy/server/instance.h" + +#include "common/memory/stats.h" +#include "common/network/utility.h" + +namespace Envoy { +namespace Server { + +using HotRestartMessage = envoy::HotRestartMessage; + +HotRestartingParent::HotRestartingParent(int base_id, int restart_epoch) + : HotRestartingBase(base_id), restart_epoch_(restart_epoch) { + child_address_ = createDomainSocketAddress(restart_epoch_ + 1, "child"); + bindDomainSocket(restart_epoch_, "parent"); +} + +void HotRestartingParent::initialize(Event::Dispatcher& dispatcher, Server::Instance& server) { + socket_event_ = dispatcher.createFileEvent( + myDomainSocket(), + [this](uint32_t events) -> void { + ASSERT(events == Event::FileReadyType::Read); + onSocketEvent(); + }, + Event::FileTriggerType::Edge, Event::FileReadyType::Read); + internal_ = std::make_unique(&server); +} + +void HotRestartingParent::onSocketEvent() { + std::unique_ptr wrapped_request; + while ((wrapped_request = receiveHotRestartMessage(Blocking::No))) { + if (wrapped_request->requestreply_case() == HotRestartMessage::kReply) { + ENVOY_LOG(error, "child sent us a HotRestartMessage reply (we want requests); ignoring."); + HotRestartMessage wrapped_reply; + wrapped_reply.set_didnt_recognize_your_last_message(true); + sendHotRestartMessage(child_address_, wrapped_reply); + continue; + } + switch (wrapped_request->request().request_case()) { + case HotRestartMessage::Request::kShutdownAdmin: { + sendHotRestartMessage(child_address_, internal_->shutdownAdmin()); + break; + } + + case HotRestartMessage::Request::kPassListenSocket: { + sendHotRestartMessage(child_address_, + internal_->getListenSocketsForChild(wrapped_request->request())); + break; + } + + case HotRestartMessage::Request::kStats: { + HotRestartMessage wrapped_reply; + internal_->exportStatsToChild(wrapped_reply.mutable_reply()->mutable_stats()); + sendHotRestartMessage(child_address_, wrapped_reply); + break; + } + + case HotRestartMessage::Request::kDrainListeners: { + internal_->drainListeners(); + break; + } + + case HotRestartMessage::Request::kTerminate: { + ENVOY_LOG(info, "shutting down due to child request"); + kill(getpid(), SIGTERM); + break; + } + + default: { + ENVOY_LOG(error, "child sent us an unfamiliar type of HotRestartMessage; ignoring."); + HotRestartMessage wrapped_reply; + wrapped_reply.set_didnt_recognize_your_last_message(true); + sendHotRestartMessage(child_address_, wrapped_reply); + break; + } + } + } +} + +void HotRestartingParent::shutdown() { socket_event_.reset(); } + +HotRestartMessage HotRestartingParent::Internal::shutdownAdmin() { + server_->shutdownAdmin(); + HotRestartMessage wrapped_reply; + wrapped_reply.mutable_reply()->mutable_shutdown_admin()->set_original_start_time_unix_seconds( + server_->startTimeFirstEpoch()); + return wrapped_reply; +} + +HotRestartMessage +HotRestartingParent::Internal::getListenSocketsForChild(const HotRestartMessage::Request& request) { + HotRestartMessage wrapped_reply; + wrapped_reply.mutable_reply()->mutable_pass_listen_socket()->set_fd(-1); + Network::Address::InstanceConstSharedPtr addr = + Network::Utility::resolveUrl(request.pass_listen_socket().address()); + for (const auto& listener : server_->listenerManager().listeners()) { + if (*listener.get().socket().localAddress() == *addr) { + wrapped_reply.mutable_reply()->mutable_pass_listen_socket()->set_fd( + listener.get().socket().ioHandle().fd()); + break; + } + } + return wrapped_reply; +} + +// TODO(fredlas) if there are enough stats for stat name length to become an issue, this current +// implementation can negate the benefit of symbolized stat names by periodically reaching the +// magnitude of memory usage that they are meant to avoid, since this map holds full-string +// names. The problem can be solved by splitting the export up over many chunks. +void HotRestartingParent::Internal::exportStatsToChild(HotRestartMessage::Reply::Stats* stats) { + for (const auto& gauge : server_->stats().gauges()) { + (*stats->mutable_gauges())[gauge->name()] = gauge->value(); + } + for (const auto& counter : server_->stats().counters()) { + // The hot restart parent is expected to have stopped its normal stat exporting (and so + // latching) by the time it begins exporting to the hot restart child. + uint64_t latched_value = counter->latch(); + if (latched_value > 0) { + (*stats->mutable_counter_deltas())[counter->name()] = latched_value; + } + } + stats->set_memory_allocated(Memory::Stats::totalCurrentlyAllocated()); + stats->set_num_connections(server_->listenerManager().numConnections()); +} + +void HotRestartingParent::Internal::drainListeners() { server_->drainListeners(); } + +} // namespace Server +} // namespace Envoy diff --git a/source/server/hot_restarting_parent.h b/source/server/hot_restarting_parent.h new file mode 100644 index 0000000000000..4afef96a14a6b --- /dev/null +++ b/source/server/hot_restarting_parent.h @@ -0,0 +1,49 @@ +#pragma once + +#include "common/common/hash.h" + +#include "server/hot_restarting_base.h" + +namespace Envoy { +namespace Server { + +/** + * The parent half of hot restarting. Listens for requests and commands from the child. + * This outer class only handles evented socket I/O. The actual hot restart logic lives in + * HotRestartingParent::Internal. + */ +class HotRestartingParent : HotRestartingBase, Logger::Loggable { +public: + HotRestartingParent(int base_id, int restart_epoch); + void initialize(Event::Dispatcher& dispatcher, Server::Instance& server); + void shutdown(); + + // The hot restarting parent's hot restart logic. Each function is meant to be called to fulfill a + // request from the child for that action. + class Internal { + public: + explicit Internal(Server::Instance* server) : server_(server) {} + // Return value is the response to return to the child. + envoy::HotRestartMessage shutdownAdmin(); + // Return value is the response to return to the child. + envoy::HotRestartMessage + getListenSocketsForChild(const envoy::HotRestartMessage::Request& request); + // 'stats' is a field in the reply protobuf to be sent to the child, which we should populate. + void exportStatsToChild(envoy::HotRestartMessage::Reply::Stats* stats); + void drainListeners(); + + private: + Server::Instance* const server_{}; + }; + +private: + void onSocketEvent(); + + const int restart_epoch_; + sockaddr_un child_address_; + Event::FileEventPtr socket_event_; + std::unique_ptr internal_; +}; + +} // namespace Server +} // namespace Envoy diff --git a/source/server/http/admin.cc b/source/server/http/admin.cc index 4d60e0ed7aa85..d893b8be8c1eb 100644 --- a/source/server/http/admin.cc +++ b/source/server/http/admin.cc @@ -182,6 +182,14 @@ void setHealthFlag(Upstream::Host::HealthFlag flag, const Upstream::Host& host, health_status.set_failed_active_degraded_check( host.healthFlagGet(Upstream::Host::HealthFlag::DEGRADED_ACTIVE_HC)); break; + case Upstream::Host::HealthFlag::PENDING_DYNAMIC_REMOVAL: + health_status.set_pending_dynamic_removal( + host.healthFlagGet(Upstream::Host::HealthFlag::PENDING_DYNAMIC_REMOVAL)); + break; + case Upstream::Host::HealthFlag::PENDING_ACTIVE_HC: + health_status.set_pending_active_hc( + host.healthFlagGet(Upstream::Host::HealthFlag::PENDING_ACTIVE_HC)); + break; } } } // namespace @@ -514,6 +522,53 @@ Http::Code AdminImpl::handlerCpuProfiler(absl::string_view url, Http::HeaderMap& return Http::Code::OK; } +Http::Code AdminImpl::handlerHeapProfiler(absl::string_view url, Http::HeaderMap&, + Buffer::Instance& response, AdminStream&) { + if (!Profiler::Heap::profilerEnabled()) { + response.add("The current build does not support heap profiler"); + return Http::Code::NotImplemented; + } + + Http::Utility::QueryParams query_params = Http::Utility::parseQueryString(url); + if (query_params.size() != 1 || query_params.begin()->first != "enable" || + (query_params.begin()->second != "y" && query_params.begin()->second != "n")) { + response.add("?enable=\n"); + return Http::Code::BadRequest; + } + + Http::Code res = Http::Code::OK; + bool enable = query_params.begin()->second == "y"; + if (enable) { + if (Profiler::Heap::isProfilerStarted()) { + response.add("Fail to start heap profiler: already started"); + res = Http::Code::BadRequest; + } else if (!Profiler::Heap::startProfiler(profile_path_)) { + // GCOVR_EXCL_START + // TODO(silentdai) remove the GCOVR when startProfiler is better implemented + response.add("Fail to start the heap profiler"); + res = Http::Code::InternalServerError; + // GCOVR_EXCL_END + } else { + response.add("Starting heap profiler"); + res = Http::Code::OK; + } + } else { + // !enable + if (!Profiler::Heap::isProfilerStarted()) { + response.add("Fail to stop heap profiler: not started"); + res = Http::Code::BadRequest; + } else { + Profiler::Heap::stopProfiler(); + response.add( + fmt::format("Heap profiler stopped and data written to {}. See " + "http://goog-perftools.sourceforge.net/doc/heap_profiler.html for details.", + profile_path_)); + res = Http::Code::OK; + } + } + return res; +} + Http::Code AdminImpl::handlerHealthcheckFail(absl::string_view, Http::HeaderMap&, Buffer::Instance& response, AdminStream&) { server_.failHealthcheck(true); @@ -539,7 +594,7 @@ Http::Code AdminImpl::handlerLogging(absl::string_view url, Http::HeaderMap&, Http::Utility::QueryParams query_params = Http::Utility::parseQueryString(url); Http::Code rc = Http::Code::OK; - if (query_params.size() > 0 && !changeLogLevel(query_params)) { + if (!query_params.empty() && !changeLogLevel(query_params)) { response.add("usage: /logging?= (change single level)\n"); response.add("usage: /logging?level= (change all levels)\n"); response.add("levels: "); @@ -592,7 +647,7 @@ Http::Code AdminImpl::handlerServerInfo(absl::string_view, Http::HeaderMap& head server_info.set_version(VersionInfo::version()); switch (server_.initManager().state()) { - case Init::Manager::State::NotInitialized: + case Init::Manager::State::Uninitialized: server_info.set_state(envoy::admin::v2alpha::ServerInfo::PRE_INITIALIZING); break; case Init::Manager::State::Initializing: @@ -773,7 +828,7 @@ uint64_t PrometheusStatsFormatter::statsAsPrometheus( response.add(fmt::format("{0}_bucket{{{1}le=\"+Inf\"}} {2}\n", metric_name, hist_tags, stats.sampleCount())); - response.add(fmt::format("{0}_sum{{{1}}} {2}\n", metric_name, tags, stats.sampleSum())); + response.add(fmt::format("{0}_sum{{{1}}} {2:.32g}\n", metric_name, tags, stats.sampleSum())); response.add(fmt::format("{0}_count{{{1}}} {2}\n", metric_name, tags, stats.sampleCount())); } @@ -1063,7 +1118,7 @@ AdminImpl::AdminImpl(const std::string& profile_path, Server::Instance& server) stats_(Http::ConnectionManagerImpl::generateStats("http.admin.", server_.stats())), tracing_stats_( Http::ConnectionManagerImpl::generateTracingStats("http.admin.", no_op_store_)), - route_config_provider_(server.timeSystem()), + route_config_provider_(server.timeSource()), // TODO(jsedgwick) add /runtime_reset endpoint that removes all admin-set values handlers_{ {"/", "Admin home page", MAKE_ADMIN_HANDLER(handlerAdminHome), false, false}, @@ -1076,6 +1131,8 @@ AdminImpl::AdminImpl(const std::string& profile_path, Server::Instance& server) MAKE_ADMIN_HANDLER(handlerContention), false, false}, {"/cpuprofiler", "enable/disable the CPU profiler", MAKE_ADMIN_HANDLER(handlerCpuProfiler), false, true}, + {"/heapprofiler", "enable/disable the heap profiler", + MAKE_ADMIN_HANDLER(handlerHeapProfiler), false, true}, {"/healthcheck/fail", "cause the server to fail health checks", MAKE_ADMIN_HANDLER(handlerHealthcheckFail), false, true}, {"/healthcheck/ok", "cause the server to pass health checks", @@ -1103,7 +1160,7 @@ AdminImpl::AdminImpl(const std::string& profile_path, Server::Instance& server) {"/runtime_modify", "modify runtime values", MAKE_ADMIN_HANDLER(handlerRuntimeModify), false, true}, }, - date_provider_(server.dispatcher().timeSystem()), + date_provider_(server.dispatcher().timeSource()), admin_filter_chain_(std::make_shared()) {} Http::ServerConnectionPtr AdminImpl::createCodec(Network::Connection& connection, @@ -1120,7 +1177,7 @@ bool AdminImpl::createNetworkFilterChain(Network::Connection& connection, // the envoy is overloaded. connection.addReadFilter(Network::ReadFilterSharedPtr{new Http::ConnectionManagerImpl( *this, server_.drainManager(), server_.random(), server_.httpContext(), server_.runtime(), - server_.localInfo(), server_.clusterManager(), nullptr, server_.timeSystem())}); + server_.localInfo(), server_.clusterManager(), nullptr, server_.timeSource())}); return true; } @@ -1148,8 +1205,8 @@ Http::Code AdminImpl::runCallback(absl::string_view path_and_query, if (method != Http::Headers::get().MethodValues.Post) { ENVOY_LOG(error, "admin path \"{}\" mutates state, method={} rather than POST", handler.prefix_, method); - code = Http::Code::BadRequest; - response.add("Invalid request; POST required"); + code = Http::Code::MethodNotAllowed; + response.add(fmt::format("Method {} not allowed, POST required.", method)); break; } } diff --git a/source/server/http/admin.h b/source/server/http/admin.h index 8587c9ec52df9..3eeafd6ded7b5 100644 --- a/source/server/http/admin.h +++ b/source/server/http/admin.h @@ -97,7 +97,6 @@ class AdminImpl : public Admin, Http::DateProvider& dateProvider() override { return date_provider_; } std::chrono::milliseconds drainTimeout() override { return std::chrono::milliseconds(100); } Http::FilterChainFactory& filterFactory() override { return *this; } - bool reverseEncodeOrder() override { return false; } bool generateRequestId() override { return false; } absl::optional idleTimeout() const override { return idle_timeout_; } uint32_t maxRequestHeadersKb() const override { return max_request_headers_kb_; } @@ -127,6 +126,7 @@ class AdminImpl : public Admin, Http::ConnectionManagerListenerStats& listenerStats() override { return listener_->stats_; } bool proxy100Continue() const override { return false; } const Http::Http1Settings& http1Settings() const override { return http1_settings_; } + bool shouldNormalizePath() const override { return true; } Http::Code request(absl::string_view path_and_query, absl::string_view method, Http::HeaderMap& response_headers, std::string& body) override; void closeSocket(); @@ -154,6 +154,7 @@ class AdminImpl : public Admin, Router::ConfigConstSharedPtr config() override { return config_; } absl::optional configInfo() const override { return {}; } SystemTime lastUpdated() const override { return time_source_.systemTime(); } + void onConfigUpdate() override {} Router::ConfigConstSharedPtr config_; TimeSource& time_source_; @@ -210,6 +211,9 @@ class AdminImpl : public Admin, Buffer::Instance& response, AdminStream&); Http::Code handlerCpuProfiler(absl::string_view path_and_query, Http::HeaderMap& response_headers, Buffer::Instance& response, AdminStream&); + Http::Code handlerHeapProfiler(absl::string_view path_and_query, + Http::HeaderMap& response_headers, Buffer::Instance& response, + AdminStream&); Http::Code handlerHealthcheckFail(absl::string_view path_and_query, Http::HeaderMap& response_headers, Buffer::Instance& response, AdminStream&); @@ -268,7 +272,6 @@ class AdminImpl : public Admin, Stats::Scope& listenerScope() override { return *scope_; } uint64_t listenerTag() const override { return 0; } const std::string& name() const override { return name_; } - bool reverseWriteFilterOrder() const override { return false; } AdminImpl& parent_; const std::string name_; diff --git a/source/server/init_manager_impl.cc b/source/server/init_manager_impl.cc deleted file mode 100644 index cbf83677a2f30..0000000000000 --- a/source/server/init_manager_impl.cc +++ /dev/null @@ -1,48 +0,0 @@ -#include "server/init_manager_impl.h" - -#include - -#include "common/common/assert.h" - -namespace Envoy { -namespace Server { - -void InitManagerImpl::initialize(std::function callback) { - ASSERT(state_ == State::NotInitialized); - if (targets_.empty()) { - callback(); - state_ = State::Initialized; - } else { - callback_ = callback; - state_ = State::Initializing; - // Target::initialize(...) method can modify the list to remove the item currently - // being initialized, so we increment the iterator before calling initialize. - for (auto iter = targets_.begin(); iter != targets_.end();) { - Init::Target* target = *iter; - ++iter; - initializeTarget(*target); - } - } -} - -void InitManagerImpl::initializeTarget(Init::Target& target) { - target.initialize([this, &target]() -> void { - ASSERT(std::find(targets_.begin(), targets_.end(), &target) != targets_.end()); - targets_.remove(&target); - if (targets_.empty()) { - state_ = State::Initialized; - callback_(); - } - }); -} - -void InitManagerImpl::registerTarget(Init::Target& target) { - ASSERT(state_ != State::Initialized); - targets_.push_back(&target); - if (state_ == State::Initializing) { - initializeTarget(target); - } -} - -} // namespace Server -} // namespace Envoy diff --git a/source/server/init_manager_impl.h b/source/server/init_manager_impl.h deleted file mode 100644 index 8e3d67d455b4b..0000000000000 --- a/source/server/init_manager_impl.h +++ /dev/null @@ -1,31 +0,0 @@ -#pragma once - -#include - -#include "envoy/init/init.h" - -namespace Envoy { -namespace Server { - -/** - * Implementation of Init::Manager for use during post cluster manager init / pre listening. - * TODO(JimmyCYJ): Move InitManagerImpl into a new subdirectory in source/ called init/. - */ -class InitManagerImpl : public Init::Manager { -public: - void initialize(std::function callback); - - // Init::Manager - void registerTarget(Init::Target& target) override; - State state() const override { return state_; } - -private: - void initializeTarget(Init::Target& target); - - std::list targets_; - State state_{State::NotInitialized}; - std::function callback_; -}; - -} // namespace Server -} // namespace Envoy diff --git a/source/server/lds_api.cc b/source/server/lds_api.cc index 9f9d1134ecacb..92c4ec6824278 100644 --- a/source/server/lds_api.cc +++ b/source/server/lds_api.cc @@ -20,33 +20,34 @@ LdsApiImpl::LdsApiImpl(const envoy::api::v2::core::ConfigSource& lds_config, Runtime::RandomGenerator& random, Init::Manager& init_manager, const LocalInfo::LocalInfo& local_info, Stats::Scope& scope, ListenerManager& lm, Api::Api& api) - : listener_manager_(lm), scope_(scope.createScope("listener_manager.lds.")), cm_(cm) { - subscription_ = - Envoy::Config::SubscriptionFactory::subscriptionFromConfigSource( - lds_config, local_info, dispatcher, cm, random, *scope_, - "envoy.api.v2.ListenerDiscoveryService.FetchListeners", - "envoy.api.v2.ListenerDiscoveryService.StreamListeners", api); + : listener_manager_(lm), scope_(scope.createScope("listener_manager.lds.")), cm_(cm), + init_target_("LDS", [this]() { subscription_->start({}, *this); }) { + subscription_ = Envoy::Config::SubscriptionFactory::subscriptionFromConfigSource( + lds_config, local_info, dispatcher, cm, random, *scope_, + "envoy.api.v2.ListenerDiscoveryService.FetchListeners", + "envoy.api.v2.ListenerDiscoveryService.StreamListeners", + Grpc::Common::typeUrl(envoy::api::v2::Listener().GetDescriptor()->full_name()), api); Config::Utility::checkLocalInfo("lds", local_info); - init_manager.registerTarget(*this); + init_manager.add(init_target_); } -void LdsApiImpl::initialize(std::function callback) { - initialize_callback_ = callback; - subscription_->start({}, *this); -} - -void LdsApiImpl::onConfigUpdate(const ResourceVector& resources, const std::string& version_info) { +void LdsApiImpl::onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + const std::string& version_info) { cm_.adsMux().pause(Config::TypeUrl::get().RouteConfiguration); Cleanup rds_resume([this] { cm_.adsMux().resume(Config::TypeUrl::get().RouteConfiguration); }); + + std::vector listeners; + for (const auto& listener_blob : resources) { + listeners.push_back(MessageUtil::anyConvert(listener_blob)); + MessageUtil::validate(listeners.back()); + } + std::vector exception_msgs; std::unordered_set listener_names; - for (const auto& listener : resources) { + for (const auto& listener : listeners) { if (!listener_names.insert(listener.name()).second) { throw EnvoyException(fmt::format("duplicate listener {} found", listener.name())); } } - for (const auto& listener : resources) { - MessageUtil::validate(listener); - } // We need to keep track of which listeners we might need to remove. std::unordered_map> listeners_to_remove; @@ -57,7 +58,7 @@ void LdsApiImpl::onConfigUpdate(const ResourceVector& resources, const std::stri for (const auto& listener : listener_manager_.listeners()) { listeners_to_remove.emplace(listener.get().name(), listener); } - for (const auto& listener : resources) { + for (const auto& listener : listeners) { listeners_to_remove.erase(listener.name()); } for (const auto& listener : listeners_to_remove) { @@ -66,8 +67,8 @@ void LdsApiImpl::onConfigUpdate(const ResourceVector& resources, const std::stri } } - for (const auto& listener : resources) { - const std::string listener_name = listener.name(); + for (const auto& listener : listeners) { + const std::string& listener_name = listener.name(); try { if (listener_manager_.addOrUpdateListener(listener, version_info, true)) { ENVOY_LOG(info, "lds: add/update listener '{}'", listener_name); @@ -75,26 +76,22 @@ void LdsApiImpl::onConfigUpdate(const ResourceVector& resources, const std::stri ENVOY_LOG(debug, "lds: add/update listener '{}' skipped", listener_name); } } catch (const EnvoyException& e) { - throw EnvoyException( - fmt::format("Error adding/updating listener {}: {}", listener_name, e.what())); + exception_msgs.push_back(fmt::format("{}: {}", listener_name, e.what())); } } version_info_ = version_info; - runInitializeCallbackIfAny(); + init_target_.ready(); + if (!exception_msgs.empty()) { + throw EnvoyException(fmt::format("Error adding/updating listener(s) {}", + StringUtil::join(exception_msgs, ", "))); + } } void LdsApiImpl::onConfigUpdateFailed(const EnvoyException*) { // We need to allow server startup to continue, even if we have a bad // config. - runInitializeCallbackIfAny(); -} - -void LdsApiImpl::runInitializeCallbackIfAny() { - if (initialize_callback_) { - initialize_callback_(); - initialize_callback_ = nullptr; - } + init_target_.ready(); } } // namespace Server diff --git a/source/server/lds_api.h b/source/server/lds_api.h index fefea2e171564..859d26a641b71 100644 --- a/source/server/lds_api.h +++ b/source/server/lds_api.h @@ -5,11 +5,12 @@ #include "envoy/api/api.h" #include "envoy/api/v2/lds.pb.h" #include "envoy/config/subscription.h" -#include "envoy/init/init.h" +#include "envoy/init/manager.h" #include "envoy/server/listener_manager.h" #include "envoy/stats/scope.h" #include "common/common/logger.h" +#include "common/init/target_impl.h" namespace Envoy { namespace Server { @@ -18,8 +19,7 @@ namespace Server { * LDS API implementation that fetches via Subscription. */ class LdsApiImpl : public LdsApi, - public Init::Target, - Config::SubscriptionCallbacks, + Config::SubscriptionCallbacks, Logger::Loggable { public: LdsApiImpl(const envoy::api::v2::core::ConfigSource& lds_config, Upstream::ClusterManager& cm, @@ -30,25 +30,26 @@ class LdsApiImpl : public LdsApi, // Server::LdsApi std::string versionInfo() const override { return version_info_; } - // Init::Target - void initialize(std::function callback) override; - // Config::SubscriptionCallbacks - void onConfigUpdate(const ResourceVector& resources, const std::string& version_info) override; + // TODO(fredlas) deduplicate + void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + const std::string& version_info) override; + void onConfigUpdate(const Protobuf::RepeatedPtrField&, + const Protobuf::RepeatedPtrField&, const std::string&) override { + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } void onConfigUpdateFailed(const EnvoyException* e) override; std::string resourceName(const ProtobufWkt::Any& resource) override { return MessageUtil::anyConvert(resource).name(); } private: - void runInitializeCallbackIfAny(); - - std::unique_ptr> subscription_; + std::unique_ptr subscription_; std::string version_info_; ListenerManager& listener_manager_; Stats::ScopePtr scope_; Upstream::ClusterManager& cm_; - std::function initialize_callback_; + Init::TargetImpl init_target_; }; } // namespace Server diff --git a/source/server/test_hooks.h b/source/server/listener_hooks.h similarity index 64% rename from source/server/test_hooks.h rename to source/server/listener_hooks.h index 4f0eaec1ed815..6293a80dacd09 100644 --- a/source/server/test_hooks.h +++ b/source/server/listener_hooks.h @@ -8,9 +8,9 @@ namespace Envoy { * Hooks in the server to allow for integration testing. The real server just uses an empty * implementation defined below. */ -class TestHooks { +class ListenerHooks { public: - virtual ~TestHooks() {} + virtual ~ListenerHooks() {} /** * Called when a worker has added a listener and it is listening. @@ -21,16 +21,22 @@ class TestHooks { * Called when a worker has removed a listener and it is no longer listening. */ virtual void onWorkerListenerRemoved() PURE; + + /** + * Called when the Runtime::ScopedLoaderSingleton is created by the server. + */ + virtual void onRuntimeCreated() PURE; }; /** - * Empty implementation of TestHooks. + * Empty implementation of ListenerHooks. */ -class DefaultTestHooks : public TestHooks { +class DefaultListenerHooks : public ListenerHooks { public: - // TestHooks + // ListenerHooks void onWorkerListenerAdded() override {} void onWorkerListenerRemoved() override {} + void onRuntimeCreated() override {} }; } // namespace Envoy diff --git a/source/server/listener_manager_impl.cc b/source/server/listener_manager_impl.cc index cd2ae8202e600..c877132033da5 100644 --- a/source/server/listener_manager_impl.cc +++ b/source/server/listener_manager_impl.cc @@ -51,7 +51,7 @@ std::vector ProdListenerComponentFactory::createNetwor std::vector ret; for (ssize_t i = 0; i < filters.size(); i++) { const auto& proto_config = filters[i]; - const ProtobufTypes::String string_name = proto_config.name(); + const std::string& string_name = proto_config.name(); ENVOY_LOG(debug, " filter #{}:", i); ENVOY_LOG(debug, " name: {}", string_name); const Json::ObjectSharedPtr filter_config = @@ -81,7 +81,7 @@ ProdListenerComponentFactory::createListenerFilterFactoryList_( std::vector ret; for (ssize_t i = 0; i < filters.size(); i++) { const auto& proto_config = filters[i]; - const ProtobufTypes::String string_name = proto_config.name(); + const std::string& string_name = proto_config.name(); ENVOY_LOG(debug, " filter #{}:", i); ENVOY_LOG(debug, " name: {}", string_name); const Json::ObjectSharedPtr filter_config = @@ -118,8 +118,8 @@ Network::SocketSharedPtr ProdListenerComponentFactory::createListenSocket( } const std::string addr = fmt::format("unix://{}", address->asString()); const int fd = server_.hotRestart().duplicateParentListenSocket(addr); - Network::IoHandlePtr io_handle = std::make_unique(fd); - if (io_handle->fd() != -1) { + Network::IoHandlePtr io_handle = std::make_unique(fd); + if (io_handle->isOpen()) { ENVOY_LOG(debug, "obtained socket for address {} from parent", addr); return std::make_shared(std::move(io_handle), address); } @@ -133,7 +133,7 @@ Network::SocketSharedPtr ProdListenerComponentFactory::createListenSocket( const int fd = server_.hotRestart().duplicateParentListenSocket(addr); if (fd != -1) { ENVOY_LOG(debug, "obtained socket for address {} from parent", addr); - Network::IoHandlePtr io_handle = std::make_unique(fd); + Network::IoHandlePtr io_handle = std::make_unique(fd); if (socket_type == Network::Address::SocketType::Stream) { return std::make_shared(std::move(io_handle), address, options); } else { @@ -165,10 +165,11 @@ ListenerImpl::ListenerImpl(const envoy::api::v2::Listener& config, const std::st PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, use_original_dst, false)), per_connection_buffer_limit_bytes_( PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, per_connection_buffer_limit_bytes, 1024 * 1024)), - listener_tag_(parent_.factory_.nextListenerTag()), name_(name), - reverse_write_filter_order_( - PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, bugfix_reverse_write_filter_order, true)), - modifiable_(modifiable), workers_started_(workers_started), hash_(hash), + listener_tag_(parent_.factory_.nextListenerTag()), name_(name), modifiable_(modifiable), + workers_started_(workers_started), hash_(hash), + dynamic_init_manager_(fmt::format("Listener {}", name)), + init_watcher_(std::make_unique( + "ListenerImpl", [this] { parent_.onListenerWarmed(*this); })), local_drain_manager_(parent.factory_.createDrainManager(config.drain_type())), config_(config), version_info_(version_info), listener_filters_timeout_( @@ -184,7 +185,7 @@ ListenerImpl::ListenerImpl(const envoy::api::v2::Listener& config, const std::st config.tcp_fast_open_queue_length().value())); } - if (config.socket_options().size() > 0) { + if (!config.socket_options().empty()) { addListenSocketOptions( Network::SocketOptionFactory::buildLiteralOptions(config.socket_options())); } @@ -219,6 +220,12 @@ ListenerImpl::ListenerImpl(const envoy::api::v2::Listener& config, const std::st for (const auto& filter_chain : config.filter_chains()) { const auto& filter_chain_match = filter_chain.filter_chain_match(); + if (!filter_chain_match.address_suffix().empty() || filter_chain_match.has_suffix_len() || + filter_chain_match.source_prefix_ranges_size() || filter_chain_match.source_ports_size()) { + throw EnvoyException(fmt::format("error adding listener '{}': contains filter chains with " + "unimplemented fields", + address_->asString())); + } if (filter_chains.find(filter_chain_match) != filter_chains.end()) { throw EnvoyException(fmt::format("error adding listener '{}': multiple filter chains with " "the same matching rules are defined", @@ -318,9 +325,9 @@ ListenerImpl::~ListenerImpl() { // The filter factories may have pending initialize actions (like in the case of RDS). Those // actions will fire in the destructor to avoid blocking initial server startup. If we are using // a local init manager we should block the notification from trying to move us from warming to - // active. This is done here explicitly by setting a boolean and then clearing the factory + // active. This is done here explicitly by resetting the watcher and then clearing the factory // vector for clarity. - initialize_canceled_ = true; + init_watcher_.reset(); destination_ports_map_.clear(); } @@ -419,6 +426,14 @@ void ListenerImpl::addFilterChainForSourceTypes( SourceTypesArray& source_types_array, const envoy::api::v2::listener::FilterChainMatch_ConnectionSourceType source_type, const Network::FilterChainSharedPtr& filter_chain) { + if (source_types_array[source_type] != nullptr) { + // If we got here and found already configured branch, then it means that this FilterChainMatch + // is a duplicate, and that there is some overlap in the repeated fields with already processed + // FilterChainMatches. + throw EnvoyException(fmt::format("error adding listener '{}': multiple filter chains with " + "overlapping matching rules are defined", + address_->asString())); + } source_types_array[source_type] = filter_chain; } @@ -621,13 +636,9 @@ void ListenerImpl::initialize() { last_updated_ = timeSource().systemTime(); // If workers have already started, we shift from using the global init manager to using a local // per listener init manager. See ~ListenerImpl() for why we gate the onListenerWarmed() call - // with initialize_canceled_. + // by resetting the watcher. if (workers_started_) { - dynamic_init_manager_.initialize([this]() -> void { - if (!initialize_canceled_) { - parent_.onListenerWarmed(*this); - } - }); + dynamic_init_manager_.initialize(*init_watcher_); } } @@ -665,10 +676,13 @@ void ListenerImpl::setSocket(const Network::SocketSharedPtr& socket) { ListenerManagerImpl::ListenerManagerImpl(Instance& server, ListenerComponentFactory& listener_factory, - WorkerFactory& worker_factory) - : server_(server), factory_(listener_factory), stats_(generateStats(server.stats())), + WorkerFactory& worker_factory, + bool enable_dispatcher_stats) + : server_(server), factory_(listener_factory), + scope_(server.stats().createScope("listener_manager.")), stats_(generateStats(*scope_)), config_tracker_entry_(server.admin().getConfigTracker().add( - "listeners", [this] { return dumpListenerConfigs(); })) { + "listeners", [this] { return dumpListenerConfigs(); })), + enable_dispatcher_stats_(enable_dispatcher_stats) { for (uint32_t i = 0; i < server.options().concurrency(); i++) { workers_.emplace_back(worker_factory.createWorker(server.overloadManager())); } @@ -712,9 +726,7 @@ ProtobufTypes::MessagePtr ListenerManagerImpl::dumpListenerConfigs() { } ListenerManagerStats ListenerManagerImpl::generateStats(Stats::Scope& scope) { - const std::string final_prefix = "listener_manager."; - return {ALL_LISTENER_MANAGER_STATS(POOL_COUNTER_PREFIX(scope, final_prefix), - POOL_GAUGE_PREFIX(scope, final_prefix))}; + return {ALL_LISTENER_MANAGER_STATS(POOL_COUNTER(scope), POOL_GAUGE(scope))}; } bool ListenerManagerImpl::addOrUpdateListener(const envoy::api::v2::Listener& config, @@ -1000,12 +1012,16 @@ void ListenerManagerImpl::startWorkers(GuardDog& guard_dog) { ENVOY_LOG(info, "all dependencies initialized. starting workers"); ASSERT(!workers_started_); workers_started_ = true; + uint32_t i = 0; for (const auto& worker : workers_) { ASSERT(warming_listeners_.empty()); for (const auto& listener : active_listeners_) { addListenerToWorker(*worker, *listener); } worker->start(guard_dog); + if (enable_dispatcher_stats_) { + worker->initializeStats(*scope_, fmt::format("worker_{}.", i++)); + } } } diff --git a/source/server/listener_manager_impl.h b/source/server/listener_manager_impl.h index fd19d5026eab6..767369578c995 100644 --- a/source/server/listener_manager_impl.h +++ b/source/server/listener_manager_impl.h @@ -12,10 +12,10 @@ #include "envoy/stats/scope.h" #include "common/common/logger.h" +#include "common/init/manager_impl.h" #include "common/network/cidr_range.h" #include "common/network/lc_trie.h" -#include "server/init_manager_impl.h" #include "server/lds_api.h" namespace Envoy { @@ -104,7 +104,7 @@ struct ListenerManagerStats { class ListenerManagerImpl : public ListenerManager, Logger::Loggable { public: ListenerManagerImpl(Instance& server, ListenerComponentFactory& listener_factory, - WorkerFactory& worker_factory); + WorkerFactory& worker_factory, bool enable_dispatcher_stats); void onListenerWarmed(ListenerImpl& listener); @@ -177,9 +177,11 @@ class ListenerManagerImpl : public ListenerManager, Logger::Loggable draining_listeners_; std::list workers_; bool workers_started_{}; + Stats::ScopePtr scope_; ListenerManagerStats stats_; ConfigTracker::EntryOwnerPtr config_tracker_entry_; LdsApiPtr lds_api_; + const bool enable_dispatcher_stats_{}; }; // TODO(mattklein123): Consider getting rid of pre-worker start and post-worker start code by @@ -257,7 +259,6 @@ class ListenerImpl : public Network::ListenerConfig, Stats::Scope& listenerScope() override { return *listener_scope_; } uint64_t listenerTag() const override { return listener_tag_; } const std::string& name() const override { return name_; } - bool reverseWriteFilterOrder() const override { return reverse_write_filter_order_; } // Server::Configuration::ListenerFactoryContext AccessLog::AccessLogManager& accessLogManager() override { @@ -281,7 +282,7 @@ class ListenerImpl : public Network::ListenerConfig, const envoy::api::v2::core::Metadata& listenerMetadata() const override { return config_.metadata(); }; - TimeSource& timeSource() override { return api().timeSystem(); } + TimeSource& timeSource() override { return api().timeSource(); } void ensureSocketOptions() { if (!listen_socket_options_) { listen_socket_options_ = @@ -298,6 +299,9 @@ class ListenerImpl : public Network::ListenerConfig, } const Network::ListenerConfig& listenerConfig() const override { return *this; } Api::Api& api() override { return parent_.server_.api(); } + ServerLifecycleNotifier& lifecycleNotifier() override { + return parent_.server_.lifecycleNotifier(); + } // Network::DrainDecision bool drainClose() const override; @@ -398,12 +402,17 @@ class ListenerImpl : public Network::ListenerConfig, const uint32_t per_connection_buffer_limit_bytes_; const uint64_t listener_tag_; const std::string name_; - const bool reverse_write_filter_order_; const bool modifiable_; const bool workers_started_; const uint64_t hash_; - InitManagerImpl dynamic_init_manager_; - bool initialize_canceled_{}; + + // This init manager is populated with targets from the filter chain factories, namely + // RdsRouteConfigSubscription::init_target_, so the listener can wait for route configs. + Init::ManagerImpl dynamic_init_manager_; + + // This init watcher, if available, notifies the "parent" listener manager when listener + // initialization is complete. It may be reset to cancel interest. + std::unique_ptr init_watcher_; std::vector listener_filter_factories_; DrainManagerPtr local_drain_manager_; bool saw_listener_create_failure_{}; diff --git a/source/server/options_impl.cc b/source/server/options_impl.cc index 62971fa3e6a35..ef62769408778 100644 --- a/source/server/options_impl.cc +++ b/source/server/options_impl.cc @@ -11,26 +11,12 @@ #include "common/common/version.h" #include "common/protobuf/utility.h" +#include "server/options_impl_platform.h" + #include "absl/strings/str_split.h" #include "spdlog/spdlog.h" #include "tclap/CmdLine.h" -// Can be overridden at compile time -#ifndef ENVOY_DEFAULT_MAX_STATS -#define ENVOY_DEFAULT_MAX_STATS 16384 -#endif - -// Can be overridden at compile time -// See comment in common/stat/stat_impl.h for rationale behind -// this constant. -#ifndef ENVOY_DEFAULT_MAX_OBJ_NAME_LENGTH -#define ENVOY_DEFAULT_MAX_OBJ_NAME_LENGTH 60 -#endif - -#if ENVOY_DEFAULT_MAX_OBJ_NAME_LENGTH < 60 -#error "ENVOY_DEFAULT_MAX_OBJ_NAME_LENGTH must be >= 60" -#endif - namespace Envoy { OptionsImpl::OptionsImpl(int argc, const char* const* argv, const HotRestartVersionCb& hot_restart_version_cb, @@ -104,19 +90,21 @@ OptionsImpl::OptionsImpl(int argc, const char* const* argv, "traffic normally) or 'validate' (validate configs and exit).", false, "serve", "string", cmd); TCLAP::ValueArg max_stats("", "max-stats", - "Maximum number of stats gauges and counters " - "that can be allocated in shared memory.", - false, ENVOY_DEFAULT_MAX_STATS, "uint64_t", cmd); + "Deprecated and unused; please do not specify.", false, 123, + "uint64_t", cmd); TCLAP::ValueArg max_obj_name_len("", "max-obj-name-len", - "Maximum name length for a field in the config " - "(applies to listener name, route config name and" - " the cluster name)", - false, ENVOY_DEFAULT_MAX_OBJ_NAME_LENGTH, "uint64_t", - cmd); + "Deprecated and unused; please do not specify.", false, + 123, "uint64_t", cmd); TCLAP::SwitchArg disable_hot_restart("", "disable-hot-restart", "Disable hot restart functionality", cmd, false); TCLAP::SwitchArg enable_mutex_tracing( "", "enable-mutex-tracing", "Enable mutex contention tracing functionality", cmd, false); + TCLAP::SwitchArg cpuset_threads( + "", "cpuset-threads", "Get the default # of worker threads from cpuset size", cmd, false); + + TCLAP::ValueArg use_libevent_buffer("", "use-libevent-buffers", + "Use the original libevent buffer implementation", + false, true, "bool", cmd); cmd.setExceptionHandling(false); try { @@ -136,24 +124,13 @@ OptionsImpl::OptionsImpl(int argc, const char* const* argv, throw NoServingException(); } - auto check_numeric_arg = [](bool is_error, uint64_t value, absl::string_view pattern) { - if (is_error) { - const std::string message = fmt::format(std::string(pattern), value); - throw MalformedArgvException(message); - } - }; - check_numeric_arg(max_obj_name_len.getValue() < 60, max_obj_name_len.getValue(), - "error: the 'max-obj-name-len' value specified ({}) is less than the minimum " - "value of 60"); - check_numeric_arg(max_stats.getValue() > 100 * 1000 * 1000, max_stats.getValue(), - "error: the 'max-stats' value specified ({}) is more than the maximum value " - "of 100M"); - // TODO(jmarantz): should we also multiply these to bound the total amount of memory? - hot_restart_disabled_ = disable_hot_restart.getValue(); mutex_tracing_enabled_ = enable_mutex_tracing.getValue(); + libevent_buffer_enabled_ = use_libevent_buffer.getValue(); + cpuset_threads_ = cpuset_threads.getValue(); + log_level_ = default_log_level; for (size_t i = 0; i < ARRAY_SIZE(spdlog::level::level_string_views); i++) { if (log_level.getValue() == spdlog::level::level_string_views[i]) { @@ -188,7 +165,20 @@ OptionsImpl::OptionsImpl(int argc, const char* const* argv, // For base ID, scale what the user inputs by 10 so that we have spread for domain sockets. base_id_ = base_id.getValue() * 10; - concurrency_ = std::max(1U, concurrency.getValue()); + + if (!concurrency.isSet() && cpuset_threads_) { + // The 'concurrency' command line option wasn't set but the 'cpuset-threads' + // option was set. Use the number of CPUs assigned to the process cpuset, if + // that can be known. + concurrency_ = OptionsImplPlatform::getCpuCount(); + } else { + if (concurrency.isSet() && cpuset_threads_ && cpuset_threads.isSet()) { + ENVOY_LOG(warn, "Both --concurrency and --cpuset-threads options are set; not applying " + "--cpuset-threads."); + } + concurrency_ = std::max(1U, concurrency.getValue()); + } + config_path_ = config_path.getValue(); config_yaml_ = config_yaml.getValue(); allow_unknown_fields_ = allow_unknown_fields.getValue(); @@ -204,12 +194,9 @@ OptionsImpl::OptionsImpl(int argc, const char* const* argv, file_flush_interval_msec_ = std::chrono::milliseconds(file_flush_interval_msec.getValue()); drain_time_ = std::chrono::seconds(drain_time_s.getValue()); parent_shutdown_time_ = std::chrono::seconds(parent_shutdown_time_s.getValue()); - max_stats_ = max_stats.getValue(); - stats_options_.max_obj_name_length_ = max_obj_name_len.getValue(); if (hot_restart_version_option.getValue()) { - std::cerr << hot_restart_version_cb(max_stats.getValue(), stats_options_.maxNameLength(), - !hot_restart_disabled_); + std::cerr << hot_restart_version_cb(!hot_restart_disabled_); throw NoServingException(); } } @@ -287,10 +274,9 @@ Server::CommandLineOptionsPtr OptionsImpl::toCommandLineOptions() const { Protobuf::util::TimeUtil::SecondsToDuration(parentShutdownTime().count())); command_line_options->mutable_drain_time()->MergeFrom( Protobuf::util::TimeUtil::SecondsToDuration(drainTime().count())); - command_line_options->set_max_stats(maxStats()); - command_line_options->set_max_obj_name_len(statsOptions().maxObjNameLength()); command_line_options->set_disable_hot_restart(hotRestartDisabled()); command_line_options->set_enable_mutex_tracing(mutexTracingEnabled()); + command_line_options->set_cpuset_threads(cpusetThreadsEnabled()); command_line_options->set_restart_epoch(restartEpoch()); return command_line_options; } @@ -302,7 +288,7 @@ OptionsImpl::OptionsImpl(const std::string& service_cluster, const std::string& log_format_(Logger::Logger::DEFAULT_LOG_FORMAT), restart_epoch_(0u), service_cluster_(service_cluster), service_node_(service_node), service_zone_(service_zone), file_flush_interval_msec_(10000), drain_time_(600), parent_shutdown_time_(900), - mode_(Server::Mode::Serve), max_stats_(ENVOY_DEFAULT_MAX_STATS), hot_restart_disabled_(false), - signal_handling_enabled_(true), mutex_tracing_enabled_(false) {} + mode_(Server::Mode::Serve), hot_restart_disabled_(false), signal_handling_enabled_(true), + mutex_tracing_enabled_(false), cpuset_threads_(false), libevent_buffer_enabled_(false) {} } // namespace Envoy diff --git a/source/server/options_impl.h b/source/server/options_impl.h index cd0ec6a9d180f..2441d4893e649 100644 --- a/source/server/options_impl.h +++ b/source/server/options_impl.h @@ -6,9 +6,8 @@ #include "envoy/common/exception.h" #include "envoy/server/options.h" -#include "envoy/stats/stats_options.h" -#include "common/stats/stats_options_impl.h" +#include "common/common/logger.h" #include "spdlog/spdlog.h" @@ -16,12 +15,12 @@ namespace Envoy { /** * Implementation of Server::Options. */ -class OptionsImpl : public Server::Options { +class OptionsImpl : public Server::Options, protected Logger::Loggable { public: /** - * Parameters are max_num_stats, max_stat_name_len, hot_restart_enabled + * Parameters are max_stat_name_len, hot_restart_enabled */ - typedef std::function HotRestartVersionCb; + typedef std::function HotRestartVersionCb; /** * @throw NoServingException if Envoy has already done everything specified by the argv (e.g. @@ -65,14 +64,13 @@ class OptionsImpl : public Server::Options { } void setServiceNodeName(const std::string& service_node) { service_node_ = service_node; } void setServiceZone(const std::string& service_zone) { service_zone_ = service_zone; } - void setMaxStats(uint64_t max_stats) { max_stats_ = max_stats; } - void setStatsOptions(Stats::StatsOptionsImpl stats_options) { stats_options_ = stats_options; } void setHotRestartDisabled(bool hot_restart_disabled) { hot_restart_disabled_ = hot_restart_disabled; } void setSignalHandling(bool signal_handling_enabled) { signal_handling_enabled_ = signal_handling_enabled; } + void setCpusetThreads(bool cpuset_threads_enabled) { cpuset_threads_ = cpuset_threads_enabled; } // Server::Options uint64_t baseId() const override { return base_id_; } @@ -100,13 +98,13 @@ class OptionsImpl : public Server::Options { const std::string& serviceClusterName() const override { return service_cluster_; } const std::string& serviceNodeName() const override { return service_node_; } const std::string& serviceZone() const override { return service_zone_; } - uint64_t maxStats() const override { return max_stats_; } - const Stats::StatsOptions& statsOptions() const override { return stats_options_; } bool hotRestartDisabled() const override { return hot_restart_disabled_; } bool signalHandlingEnabled() const override { return signal_handling_enabled_; } bool mutexTracingEnabled() const override { return mutex_tracing_enabled_; } + bool libeventBufferEnabled() const override { return libevent_buffer_enabled_; } virtual Server::CommandLineOptionsPtr toCommandLineOptions() const override; void parseComponentLogLevels(const std::string& component_log_levels); + bool cpusetThreadsEnabled() const override { return cpuset_threads_; } uint32_t count() const; private: @@ -132,11 +130,11 @@ class OptionsImpl : public Server::Options { std::chrono::seconds drain_time_; std::chrono::seconds parent_shutdown_time_; Server::Mode mode_; - uint64_t max_stats_; - Stats::StatsOptionsImpl stats_options_; bool hot_restart_disabled_; bool signal_handling_enabled_; bool mutex_tracing_enabled_; + bool cpuset_threads_; + bool libevent_buffer_enabled_; uint32_t count_; }; diff --git a/source/server/options_impl_platform.h b/source/server/options_impl_platform.h new file mode 100644 index 0000000000000..7d628d3699025 --- /dev/null +++ b/source/server/options_impl_platform.h @@ -0,0 +1,12 @@ +#pragma once + +#include + +#include "common/common/logger.h" + +namespace Envoy { +class OptionsImplPlatform : protected Logger::Loggable { +public: + static uint32_t getCpuCount(); +}; +} // namespace Envoy diff --git a/source/server/options_impl_platform_default.cc b/source/server/options_impl_platform_default.cc new file mode 100644 index 0000000000000..3b4cbbe3118e9 --- /dev/null +++ b/source/server/options_impl_platform_default.cc @@ -0,0 +1,14 @@ +#include + +#include "common/common/logger.h" + +#include "server/options_impl_platform.h" + +namespace Envoy { + +uint32_t OptionsImplPlatform::getCpuCount() { + ENVOY_LOG(warn, "CPU number provided by HW thread count (instead of cpuset)."); + return std::thread::hardware_concurrency(); +} + +} // namespace Envoy diff --git a/source/server/options_impl_platform_linux.cc b/source/server/options_impl_platform_linux.cc new file mode 100644 index 0000000000000..069c68ab83da1 --- /dev/null +++ b/source/server/options_impl_platform_linux.cc @@ -0,0 +1,46 @@ +#if !defined(__linux__) +#error "Linux platform file is part of non-Linux build." +#endif + +#include "server/options_impl_platform_linux.h" + +#include + +#include + +#include "common/api/os_sys_calls_impl_linux.h" + +#include "server/options_impl_platform.h" + +namespace Envoy { + +uint32_t OptionsImplPlatformLinux::getCpuAffinityCount(unsigned int hw_threads) { + unsigned int threads = 0; + pid_t pid = getpid(); + cpu_set_t mask; + auto& linux_os_syscalls = Api::LinuxOsSysCallsSingleton::get(); + + CPU_ZERO(&mask); + const Api::SysCallIntResult result = + linux_os_syscalls.sched_getaffinity(pid, sizeof(cpu_set_t), &mask); + if (result.rc_ == -1) { + // Fall back to number of hardware threads. + return hw_threads; + } + + threads = CPU_COUNT(&mask); + + // Sanity check. + if (threads > 0 && threads <= hw_threads) { + return threads; + } + + return hw_threads; +} + +uint32_t OptionsImplPlatform::getCpuCount() { + unsigned int hw_threads = std::max(1U, std::thread::hardware_concurrency()); + return OptionsImplPlatformLinux::getCpuAffinityCount(hw_threads); +} + +} // namespace Envoy diff --git a/source/server/options_impl_platform_linux.h b/source/server/options_impl_platform_linux.h new file mode 100644 index 0000000000000..dfdb0c7efae0b --- /dev/null +++ b/source/server/options_impl_platform_linux.h @@ -0,0 +1,15 @@ +#pragma once + +#if !defined(__linux__) +#error "Linux platform file is part of non-Linux build." +#endif + +#include +#include + +namespace Envoy { +class OptionsImplPlatformLinux { +public: + static uint32_t getCpuAffinityCount(unsigned int hw_threads); +}; +} // namespace Envoy diff --git a/source/server/overload_manager_impl.cc b/source/server/overload_manager_impl.cc index 37289afafec8d..364f739bfaa1a 100644 --- a/source/server/overload_manager_impl.cc +++ b/source/server/overload_manager_impl.cc @@ -34,14 +34,14 @@ class ThresholdTriggerImpl : public OverloadAction::Trigger { }; std::string StatsName(const std::string& a, const std::string& b) { - return absl::StrCat("overload.", a, b); + return absl::StrCat("overload.", a, ".", b); } } // namespace OverloadAction::OverloadAction(const envoy::config::overload::v2alpha::OverloadAction& config, Stats::Scope& stats_scope) - : active_gauge_(stats_scope.gauge(StatsName(config.name(), ".active"))) { + : active_gauge_(stats_scope.gauge(StatsName(config.name(), "active"))) { for (const auto& trigger_config : config.triggers()) { TriggerPtr trigger; @@ -162,18 +162,19 @@ void OverloadManagerImpl::stop() { resources_.clear(); } -void OverloadManagerImpl::registerForAction(const std::string& action, +bool OverloadManagerImpl::registerForAction(const std::string& action, Event::Dispatcher& dispatcher, OverloadActionCb callback) { ASSERT(!started_); if (actions_.find(action) == actions_.end()) { - ENVOY_LOG(debug, "No overload action configured for {}.", action); - return; + ENVOY_LOG(debug, "No overload action is configured for {}.", action); + return false; } action_to_callbacks_.emplace(std::piecewise_construct, std::forward_as_tuple(action), std::forward_as_tuple(dispatcher, callback)); + return true; } ThreadLocalOverloadState& OverloadManagerImpl::getThreadLocalOverloadState() { @@ -191,7 +192,7 @@ void OverloadManagerImpl::updateResourcePressure(const std::string& resource, do const bool is_active = action_it->second.isActive(); const auto state = is_active ? OverloadActionState::Active : OverloadActionState::Inactive; - ENVOY_LOG(info, "Overload action {} has become {}", action, + ENVOY_LOG(info, "Overload action {} became {}", action, is_active ? "active" : "inactive"); tls_->runOnAllThreads([this, action, state] { tls_->getTyped().setState(action, state); @@ -209,9 +210,9 @@ void OverloadManagerImpl::updateResourcePressure(const std::string& resource, do OverloadManagerImpl::Resource::Resource(const std::string& name, ResourceMonitorPtr monitor, OverloadManagerImpl& manager, Stats::Scope& stats_scope) : name_(name), monitor_(std::move(monitor)), manager_(manager), pending_update_(false), - pressure_gauge_(stats_scope.gauge(StatsName(name, ".pressure"))), - failed_updates_counter_(stats_scope.counter(StatsName(name, ".failed_updates"))), - skipped_updates_counter_(stats_scope.counter(StatsName(name, ".skipped_updates"))) {} + pressure_gauge_(stats_scope.gauge(StatsName(name, "pressure"))), + failed_updates_counter_(stats_scope.counter(StatsName(name, "failed_updates"))), + skipped_updates_counter_(stats_scope.counter(StatsName(name, "skipped_updates"))) {} void OverloadManagerImpl::Resource::update() { if (!pending_update_) { diff --git a/source/server/overload_manager_impl.h b/source/server/overload_manager_impl.h index fb180ccec05c0..9b7293ed231c9 100644 --- a/source/server/overload_manager_impl.h +++ b/source/server/overload_manager_impl.h @@ -58,7 +58,7 @@ class OverloadManagerImpl : Logger::Loggable, public OverloadM // Server::OverloadManager void start() override; - void registerForAction(const std::string& action, Event::Dispatcher& dispatcher, + bool registerForAction(const std::string& action, Event::Dispatcher& dispatcher, OverloadActionCb callback) override; ThreadLocalOverloadState& getThreadLocalOverloadState() override; diff --git a/source/server/proto_descriptors.cc b/source/server/proto_descriptors.cc index fd12f9e0a9f5f..608299149b5a0 100644 --- a/source/server/proto_descriptors.cc +++ b/source/server/proto_descriptors.cc @@ -4,10 +4,8 @@ #include "envoy/api/v2/eds.pb.h" #include "envoy/api/v2/lds.pb.h" #include "envoy/api/v2/rds.pb.h" -#include "envoy/service/accesslog/v2/als.pb.h" #include "envoy/service/discovery/v2/ads.pb.h" #include "envoy/service/discovery/v2/hds.pb.h" -#include "envoy/service/metrics/v2/metrics_service.pb.h" #include "envoy/service/ratelimit/v2/rls.pb.h" #include "common/common/fmt.h" @@ -30,8 +28,6 @@ bool validateProtoDescriptors() { "envoy.service.discovery.v2.AggregatedDiscoveryService.StreamAggregatedResources", "envoy.service.discovery.v2.HealthDiscoveryService.FetchHealthCheck", "envoy.service.discovery.v2.HealthDiscoveryService.StreamHealthCheck", - "envoy.service.accesslog.v2.AccessLogService.StreamAccessLogs", - "envoy.service.metrics.v2.MetricsService.StreamMetrics", "envoy.service.ratelimit.v2.RateLimitService.ShouldRateLimit", }; diff --git a/source/server/server.cc b/source/server/server.cc index e2a2bb36426c4..97d4ce8d4a55a 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -2,6 +2,7 @@ #include +#include #include #include #include @@ -21,6 +22,7 @@ #include "common/api/api_impl.h" #include "common/api/os_sys_calls_impl.h" +#include "common/buffer/buffer_impl.h" #include "common/common/mutex_tracer_impl.h" #include "common/common/utility.h" #include "common/common/version.h" @@ -41,34 +43,36 @@ #include "server/configuration_impl.h" #include "server/connection_handler_impl.h" #include "server/guarddog_impl.h" -#include "server/test_hooks.h" +#include "server/listener_hooks.h" #include "server/wasm_config_impl.h" namespace Envoy { namespace Server { InstanceImpl::InstanceImpl(const Options& options, Event::TimeSystem& time_system, - Network::Address::InstanceConstSharedPtr local_address, TestHooks& hooks, - HotRestart& restarter, Stats::StoreRoot& store, + Network::Address::InstanceConstSharedPtr local_address, + ListenerHooks& hooks, HotRestart& restarter, Stats::StoreRoot& store, Thread::BasicLockable& access_log_lock, ComponentFactory& component_factory, Runtime::RandomGeneratorPtr&& random_generator, - ThreadLocal::Instance& tls, Thread::ThreadFactory& thread_factory) - : shutdown_(false), options_(options), time_system_(time_system), restarter_(restarter), + ThreadLocal::Instance& tls, Thread::ThreadFactory& thread_factory, + Filesystem::Instance& file_system) + : secret_manager_(std::make_unique()), shutdown_(false), + options_(options), time_source_(time_system), restarter_(restarter), start_time_(time(nullptr)), original_start_time_(start_time_), stats_store_(store), - thread_local_(tls), - api_(new Api::Impl(options.fileFlushIntervalMsec(), thread_factory, store, time_system)), - secret_manager_(std::make_unique()), + thread_local_(tls), api_(new Api::Impl(thread_factory, store, time_system, file_system)), dispatcher_(api_->allocateDispatcher()), singleton_manager_(new Singleton::ManagerImpl(api_->threadFactory().currentThreadId())), handler_(new ConnectionHandlerImpl(ENVOY_LOGGER(), *dispatcher_)), random_generator_(std::move(random_generator)), listener_component_factory_(*this), worker_factory_(thread_local_, *api_, hooks), dns_resolver_(dispatcher_->createDnsResolver({})), - access_log_manager_(*api_, *dispatcher_, access_log_lock), terminated_(false), + access_log_manager_(options.fileFlushIntervalMsec(), *api_, *dispatcher_, access_log_lock, + store), + terminated_(false), mutex_tracer_(options.mutexTracingEnabled() ? &Envoy::MutexTracerImpl::getOrCreateTracer() - : nullptr) { - + : nullptr), + main_thread_id_(std::this_thread::get_id()) { try { if (!options.logPath().empty()) { try { @@ -82,7 +86,7 @@ InstanceImpl::InstanceImpl(const Options& options, Event::TimeSystem& time_syste restarter_.initialize(*dispatcher_, *this); drain_manager_ = component_factory.createDrainManager(*this); - initialize(options, local_address, component_factory); + initialize(options, std::move(local_address), component_factory, hooks); } catch (const EnvoyException& e) { ENVOY_LOG(critical, "error initializing configuration '{}': {}", options.configPath(), e.what()); @@ -146,14 +150,16 @@ void InstanceImpl::flushStats() { // A shutdown initiated before this callback may prevent this from being called as per // the semantics documented in ThreadLocal's runOnAllThreads method. stats_store_.mergeHistograms([this]() -> void { - HotRestart::GetParentStatsInfo info; - restarter_.getParentStats(info); + // mergeParentStatsIfAny() does nothing and returns a struct of 0s if there is no parent. + HotRestart::ServerStatsFromParent parent_stats = restarter_.mergeParentStatsIfAny(stats_store_); + server_stats_->uptime_.set(time(nullptr) - original_start_time_); server_stats_->memory_allocated_.set(Memory::Stats::totalCurrentlyAllocated() + - info.memory_allocated_); + parent_stats.parent_memory_allocated_); server_stats_->memory_heap_size_.set(Memory::Stats::totalCurrentlyReserved()); - server_stats_->parent_connections_.set(info.num_connections_); - server_stats_->total_connections_.set(numConnections() + info.num_connections_); + server_stats_->parent_connections_.set(parent_stats.parent_connections_); + server_stats_->total_connections_.set(listener_manager_->numConnections() + + parent_stats.parent_connections_); server_stats_->days_until_first_cert_expiring_.set( sslContextManager().daysUntilFirstCertExpires()); InstanceUtil::flushMetricsToSinks(config_.statsSinks(), stats_store_.source()); @@ -164,11 +170,6 @@ void InstanceImpl::flushStats() { }); } -void InstanceImpl::getParentStats(HotRestart::GetParentStatsInfo& info) { - info.memory_allocated_ = Memory::Stats::totalCurrentlyAllocated(); - info.num_connections_ = numConnections(); -} - bool InstanceImpl::healthCheckFailed() { return server_stats_->live_.value() == 0; } InstanceUtil::BootstrapVersion @@ -198,7 +199,7 @@ InstanceUtil::loadBootstrapConfig(envoy::config::bootstrap::v2::Bootstrap& boots void InstanceImpl::initialize(const Options& options, Network::Address::InstanceConstSharedPtr local_address, - ComponentFactory& component_factory) { + ComponentFactory& component_factory, ListenerHooks& hooks) { ENVOY_LOG(info, "initializing epoch {} (hot restart version={})", options.restartEpoch(), restarter_.version()); @@ -227,9 +228,15 @@ void InstanceImpl::initialize(const Options& options, ENVOY_LOG(info, " wasm: {}", Registry::FactoryRegistry::allFactoryNames()); + // Enable the selected buffer implementation (old libevent evbuffer version or new native + // version) early in the initialization, before any buffers can be created. + Buffer::OwnedImpl::useOldImpl(options.libeventBufferEnabled()); + ENVOY_LOG(info, "buffer implementation: {}", + Buffer::OwnedImpl().usesOldImpl() ? "old (libevent)" : "new"); + // Handle configuration that needs to take place prior to the main configuration load. - InstanceUtil::loadBootstrapConfig(bootstrap_, options, api()); - bootstrap_config_update_time_ = time_system_.systemTime(); + InstanceUtil::loadBootstrapConfig(bootstrap_, options, *api_); + bootstrap_config_update_time_ = time_source_.systemTime(); // Needs to happen as early as possible in the instantiation to preempt the objects that require // stats. @@ -247,10 +254,10 @@ void InstanceImpl::initialize(const Options& options, assert_action_registration_ = Assert::setDebugAssertionFailureRecordAction( [this]() { server_stats_->debug_assertion_failures_.inc(); }); - failHealthcheck(false); + InstanceImpl::failHealthcheck(false); uint64_t version_int; - if (!StringUtil::atoul(VersionInfo::revision().substr(0, 6).c_str(), version_int, 16)) { + if (!StringUtil::atoull(VersionInfo::revision().substr(0, 6).c_str(), version_int, 16)) { throw EnvoyException("compiled GIT SHA is invalid. Invalid build."); } @@ -258,15 +265,13 @@ void InstanceImpl::initialize(const Options& options, bootstrap_.mutable_node()->set_build_version(VersionInfo::version()); local_info_ = std::make_unique( - bootstrap_.node(), local_address, options.serviceZone(), options.serviceClusterName(), - options.serviceNodeName()); + bootstrap_.node(), std::move(local_address), options.serviceZone(), + options.serviceClusterName(), options.serviceNodeName()); Configuration::InitialImpl initial_config(bootstrap_); - HotRestart::ShutdownParentAdminInfo info; - info.original_start_time_ = original_start_time_; - restarter_.shutdownParentAdmin(info); - original_start_time_ = info.original_start_time_; + // Learn original_start_time_ if our parent is still around to inform us of it. + restarter_.sendParentAdminShutdownRequest(original_start_time_); admin_ = std::make_unique(initial_config.admin().profilePath(), *this); if (initial_config.admin().address()) { if (initial_config.admin().accessLogPath().empty()) { @@ -288,12 +293,15 @@ void InstanceImpl::initialize(const Options& options, loadServerFlags(initial_config.flagsPath()); // Initialize the overload manager early so other modules can register for actions. - overload_manager_ = std::make_unique(dispatcher(), stats(), threadLocal(), - bootstrap_.overload_manager(), api()); + overload_manager_ = std::make_unique( + *dispatcher_, stats_store_, thread_local_, bootstrap_.overload_manager(), *api_); + + heap_shrinker_ = + std::make_unique(*dispatcher_, *overload_manager_, stats_store_); // Workers get created first so they register for thread local updates. - listener_manager_ = - std::make_unique(*this, listener_component_factory_, worker_factory_); + listener_manager_ = std::make_unique( + *this, listener_component_factory_, worker_factory_, bootstrap_.enable_dispatcher_stats()); // The main thread is also registered for thread local updates so that code that does not care // whether it runs on the main thread or on workers can still use TLS. @@ -302,18 +310,25 @@ void InstanceImpl::initialize(const Options& options, // We can now initialize stats for threading. stats_store_.initializeThreading(*dispatcher_, thread_local_); + // It's now safe to start writing stats from the main thread's dispatcher. + if (bootstrap_.enable_dispatcher_stats()) { + dispatcher_->initializeStats(stats_store_, "server."); + } + // Runtime gets initialized before the main configuration since during main configuration // load things may grab a reference to the loader for later use. - runtime_loader_ = component_factory.createRuntime(*this, initial_config); + runtime_singleton_ = std::make_unique( + component_factory.createRuntime(*this, initial_config)); + hooks.onRuntimeCreated(); // Once we have runtime we can initialize the SSL context manager. ssl_context_manager_ = - std::make_unique(time_system_); + std::make_unique(time_source_); cluster_manager_factory_ = std::make_unique( - admin(), runtime(), stats(), threadLocal(), random(), dnsResolver(), sslContextManager(), - dispatcher(), localInfo(), secretManager(), api(), http_context_, accessLogManager(), - singletonManager()); + *admin_, Runtime::LoaderSingleton::get(), stats_store_, thread_local_, *random_generator_, + dns_resolver_, *ssl_context_manager_, *dispatcher_, *local_info_, *secret_manager_, *api_, + http_context_, access_log_manager_, *singleton_manager_); // Optional Wasm services. These must be initialied afer threading but before the main // configuration which many reference wasm vms. @@ -351,14 +366,15 @@ void InstanceImpl::initialize(const Options& options, if (bootstrap_.has_hds_config()) { const auto& hds_config = bootstrap_.hds_config(); async_client_manager_ = std::make_unique( - clusterManager(), thread_local_, time_system_, api()); + *config_.clusterManager(), thread_local_, time_source_, *api_); hds_delegate_ = std::make_unique( - stats(), - Config::Utility::factoryForGrpcApiConfigSource(*async_client_manager_, hds_config, stats()) + stats_store_, + Config::Utility::factoryForGrpcApiConfigSource(*async_client_manager_, hds_config, + stats_store_) ->create(), - dispatcher(), runtime(), stats(), sslContextManager(), random(), info_factory_, - access_log_manager_, clusterManager(), localInfo(), admin(), singletonManager(), - threadLocal(), api()); + *dispatcher_, Runtime::LoaderSingleton::get(), stats_store_, *ssl_context_manager_, + *random_generator_, info_factory_, access_log_manager_, *config_.clusterManager(), + *local_info_, *admin_, *singleton_manager_, thread_local_, *api_); } for (Stats::SinkPtr& sink : config_.statsSinks()) { @@ -372,7 +388,7 @@ void InstanceImpl::initialize(const Options& options, // GuardDog (deadlock detection) object and thread setup before workers are // started and before our own run() loop runs. - guard_dog_ = std::make_unique(stats_store_, config_, api()); + guard_dog_ = std::make_unique(stats_store_, config_, *api_); } void InstanceImpl::startWorkers() { @@ -386,21 +402,24 @@ void InstanceImpl::startWorkers() { Runtime::LoaderPtr InstanceUtil::createRuntime(Instance& server, Server::Configuration::Initial& config) { - if (config.runtime()) { - ENVOY_LOG(info, "runtime symlink: {}", config.runtime()->symlinkRoot()); - ENVOY_LOG(info, "runtime subdirectory: {}", config.runtime()->subdirectory()); + if (!config.baseRuntime().fields().empty()) { + ENVOY_LOG(info, "non-empty base runtime layer specified in bootstrap"); + } + if (config.diskRuntime()) { + ENVOY_LOG(info, "disk runtime symlink: {}", config.diskRuntime()->symlinkRoot()); + ENVOY_LOG(info, "disk runtime subdirectory: {}", config.diskRuntime()->subdirectory()); std::string override_subdirectory = - config.runtime()->overrideSubdirectory() + "/" + server.localInfo().clusterName(); - ENVOY_LOG(info, "runtime override subdirectory: {}", override_subdirectory); + config.diskRuntime()->overrideSubdirectory() + "/" + server.localInfo().clusterName(); + ENVOY_LOG(info, "disk runtime override subdirectory: {}", override_subdirectory); return std::make_unique( - server.dispatcher(), server.threadLocal(), config.runtime()->symlinkRoot(), - config.runtime()->subdirectory(), override_subdirectory, server.stats(), server.random(), - server.api()); + server.dispatcher(), server.threadLocal(), config.baseRuntime(), + config.diskRuntime()->symlinkRoot(), config.diskRuntime()->subdirectory(), + override_subdirectory, server.stats(), server.random(), server.api()); } else { - return std::make_unique(server.random(), server.stats(), - server.threadLocal()); + return std::make_unique(config.baseRuntime(), server.random(), + server.stats(), server.threadLocal()); } } @@ -412,17 +431,19 @@ void InstanceImpl::loadServerFlags(const absl::optional& flags_path ENVOY_LOG(info, "server flags path: {}", flags_path.value()); if (api_->fileSystem().fileExists(flags_path.value() + "/drain")) { ENVOY_LOG(info, "starting server in drain mode"); - failHealthcheck(true); + InstanceImpl::failHealthcheck(true); } } -uint64_t InstanceImpl::numConnections() { return listener_manager_->numConnections(); } - RunHelper::RunHelper(Instance& instance, const Options& options, Event::Dispatcher& dispatcher, Upstream::ClusterManager& cm, AccessLog::AccessLogManager& access_log_manager, - InitManagerImpl& init_manager, OverloadManager& overload_manager, - std::function workers_start_cb) { - + Init::Manager& init_manager, OverloadManager& overload_manager, + std::function workers_start_cb) + : init_watcher_("RunHelper", [&instance, workers_start_cb]() { + if (!instance.isShutdown()) { + workers_start_cb(); + } + }) { // Setup signals. if (options.signalHandlingEnabled()) { sigterm_ = dispatcher.listenForSignal(SIGTERM, [&instance]() { @@ -453,7 +474,7 @@ RunHelper::RunHelper(Instance& instance, const Options& options, Event::Dispatch // this can fire immediately if all clusters have already initialized. Also note that we need // to guard against shutdown at two different levels since SIGTERM can come in once the run loop // starts. - cm.setInitializedCb([&instance, &init_manager, &cm, workers_start_cb]() { + cm.setInitializedCb([&instance, &init_manager, &cm, this]() { if (instance.isShutdown()) { return; } @@ -464,16 +485,7 @@ RunHelper::RunHelper(Instance& instance, const Options& options, Event::Dispatch cm.adsMux().pause(Config::TypeUrl::get().RouteConfiguration); ENVOY_LOG(info, "all clusters initialized. initializing init manager"); - - // Note: the lambda below should not capture "this" since the RunHelper object may - // have been destructed by the time it gets executed. - init_manager.initialize([&instance, workers_start_cb]() { - if (instance.isShutdown()) { - return; - } - - workers_start_cb(); - }); + init_manager.initialize(init_watcher_); // Now that we're execute all the init callbacks we can resume RDS // as we've subscribed to all the statically defined RDS resources. @@ -482,23 +494,22 @@ RunHelper::RunHelper(Instance& instance, const Options& options, Event::Dispatch } void InstanceImpl::run() { - // We need the RunHelper to be available to call from InstanceImpl::shutdown() below, so - // we save it as a member variable. - run_helper_ = std::make_unique(*this, options_, *dispatcher_, clusterManager(), - access_log_manager_, init_manager_, overloadManager(), - [this]() -> void { startWorkers(); }); + // RunHelper exists primarily to facilitate testing of how we respond to early shutdown during + // startup (see RunHelperTest in server_test.cc). + auto run_helper = RunHelper(*this, options_, *dispatcher_, clusterManager(), access_log_manager_, + init_manager_, overloadManager(), [this] { startWorkers(); }); // Run the main dispatch loop waiting to exit. ENVOY_LOG(info, "starting main dispatch loop"); auto watchdog = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId()); watchdog->startWatchdog(*dispatcher_); + dispatcher_->post([this] { notifyCallbacksForStage(Stage::Startup); }); dispatcher_->run(Event::Dispatcher::RunType::Block); ENVOY_LOG(info, "main dispatch loop exited"); guard_dog_->stopWatching(watchdog); watchdog.reset(); terminate(); - run_helper_.reset(); } void InstanceImpl::terminate() { @@ -537,12 +548,13 @@ void InstanceImpl::terminate() { ENVOY_FLUSH_LOG(); } -Runtime::Loader& InstanceImpl::runtime() { return *runtime_loader_; } +Runtime::Loader& InstanceImpl::runtime() { return Runtime::LoaderSingleton::get(); } void InstanceImpl::shutdown() { + ENVOY_LOG(info, "shutting down server instance"); shutdown_ = true; - restarter_.terminateParent(); - dispatcher_->exit(); + restarter_.sendParentTerminateRequest(); + notifyCallbacksForStage(Stage::ShutdownExit, [this] { dispatcher_->exit(); }); } void InstanceImpl::shutdownAdmin() { @@ -554,8 +566,47 @@ void InstanceImpl::shutdownAdmin() { handler_->stopListeners(); admin_->closeSocket(); + // If we still have a parent, it should be terminated now that we have a child. ENVOY_LOG(warn, "terminating parent process"); - restarter_.terminateParent(); + restarter_.sendParentTerminateRequest(); +} + +void InstanceImpl::registerCallback(Stage stage, StageCallback callback) { + stage_callbacks_[stage].push_back(callback); +} + +void InstanceImpl::registerCallback(Stage stage, StageCallbackWithCompletion callback) { + ASSERT(stage == Stage::ShutdownExit); + stage_completable_callbacks_[stage].push_back(callback); +} + +void InstanceImpl::notifyCallbacksForStage(Stage stage, Event::PostCb completion_cb) { + ASSERT(std::this_thread::get_id() == main_thread_id_); + auto it = stage_callbacks_.find(stage); + if (it != stage_callbacks_.end()) { + for (const StageCallback& callback : it->second) { + callback(); + } + } + + auto it2 = stage_completable_callbacks_.find(stage); + if (it2 != stage_completable_callbacks_.end()) { + ASSERT(!it2->second.empty()); + // Wrap completion_cb so that it only gets invoked when all callbacks for this stage + // have finished their work. + auto completion_cb_count = std::make_shared(it2->second.size()); + Event::PostCb wrapped_cb = [this, completion_cb, completion_cb_count] { + ASSERT(std::this_thread::get_id() == main_thread_id_); + if (--*completion_cb_count == 0) { + completion_cb(); + } + }; + for (const StageCallbackWithCompletion& callback : it2->second) { + callback(wrapped_cb); + } + } else { + completion_cb(); + } } ProtobufTypes::MessagePtr InstanceImpl::dumpBootstrapConfig() { diff --git a/source/server/server.h b/source/server/server.h index 11f7ba46a86d2..24c7269810308 100644 --- a/source/server/server.h +++ b/source/server/server.h @@ -22,21 +22,22 @@ #include "common/common/logger_delegates.h" #include "common/grpc/async_client_manager_impl.h" #include "common/http/context_impl.h" +#include "common/init/manager_impl.h" +#include "common/memory/heap_shrinker.h" #include "common/runtime/runtime_impl.h" #include "common/secret/secret_manager_impl.h" #include "common/upstream/health_discovery_service.h" #include "server/configuration_impl.h" #include "server/http/admin.h" -#include "server/init_manager_impl.h" +#include "server/listener_hooks.h" #include "server/listener_manager_impl.h" #include "server/overload_manager_impl.h" -#include "server/test_hooks.h" #include "server/worker_impl.h" -#include "extensions/filters/common/ratelimit/ratelimit_registration.h" #include "extensions/transport_sockets/tls/context_manager_impl.h" +#include "absl/container/flat_hash_map.h" #include "absl/types/optional.h" namespace Envoy { @@ -123,10 +124,11 @@ class RunHelper : Logger::Loggable { public: RunHelper(Instance& instance, const Options& options, Event::Dispatcher& dispatcher, Upstream::ClusterManager& cm, AccessLog::AccessLogManager& access_log_manager, - InitManagerImpl& init_manager, OverloadManager& overload_manager, + Init::Manager& init_manager, OverloadManager& overload_manager, std::function workers_start_cb); private: + Init::WatcherImpl init_watcher_; Event::SignalEventPtr sigterm_; Event::SignalEventPtr sigint_; Event::SignalEventPtr sig_usr_1_; @@ -136,17 +138,19 @@ class RunHelper : Logger::Loggable { /** * This is the actual full standalone server which stitches together various common components. */ -class InstanceImpl : Logger::Loggable, public Instance { +class InstanceImpl : Logger::Loggable, + public Instance, + public ServerLifecycleNotifier { public: /** * @throw EnvoyException if initialization fails. */ InstanceImpl(const Options& options, Event::TimeSystem& time_system, - Network::Address::InstanceConstSharedPtr local_address, TestHooks& hooks, + Network::Address::InstanceConstSharedPtr local_address, ListenerHooks& hooks, HotRestart& restarter, Stats::StoreRoot& store, Thread::BasicLockable& access_log_lock, ComponentFactory& component_factory, Runtime::RandomGeneratorPtr&& random_generator, ThreadLocal::Instance& tls, - Thread::ThreadFactory& thread_factory); + Thread::ThreadFactory& thread_factory, Filesystem::Instance& file_system); ~InstanceImpl() override; @@ -163,9 +167,9 @@ class InstanceImpl : Logger::Loggable, public Instance { DrainManager& drainManager() override { return *drain_manager_; } AccessLog::AccessLogManager& accessLogManager() override { return access_log_manager_; } void failHealthcheck(bool fail) override; - void getParentStats(HotRestart::GetParentStatsInfo& info) override; HotRestart& hotRestart() override { return restarter_; } Init::Manager& initManager() override { return init_manager_; } + ServerLifecycleNotifier& lifecycleNotifier() override { return *this; } ListenerManager& listenerManager() override { return *listener_manager_; } Secret::SecretManager& secretManager() override { return *secret_manager_; } Envoy::MutexTracer* mutexTracer() override { return mutex_tracer_; } @@ -184,25 +188,40 @@ class InstanceImpl : Logger::Loggable, public Instance { Http::Context& httpContext() override { return http_context_; } ThreadLocal::Instance& threadLocal() override { return thread_local_; } const LocalInfo::LocalInfo& localInfo() override { return *local_info_; } - Event::TimeSystem& timeSystem() override { return time_system_; } + TimeSource& timeSource() override { return time_source_; } std::chrono::milliseconds statsFlushInterval() const override { return config_.statsFlushInterval(); } + // ServerLifecycleNotifier + void registerCallback(Stage stage, StageCallback callback) override; + void registerCallback(Stage stage, StageCallbackWithCompletion callback) override; + private: ProtobufTypes::MessagePtr dumpBootstrapConfig(); void flushStats(); void initialize(const Options& options, Network::Address::InstanceConstSharedPtr local_address, - ComponentFactory& component_factory); + ComponentFactory& component_factory, ListenerHooks& hooks); void loadServerFlags(const absl::optional& flags_path); - uint64_t numConnections(); void startWorkers(); void terminate(); + void notifyCallbacksForStage( + Stage stage, Event::PostCb completion_cb = [] {}); + // init_manager_ must come before any member that participates in initialization, and destructed + // only after referencing members are gone, since initialization continuation can potentially + // occur at any point during member lifetime. This init manager is populated with LdsApi targets. + Init::ManagerImpl init_manager_{"Server"}; + // secret_manager_ must come before listener_manager_, config_ and dispatcher_, and destructed + // only after these members can no longer reference it, since: + // - There may be active filter chains referencing it in listener_manager_. + // - There may be active clusters referencing it in config_.cluster_manager_. + // - There may be active connections referencing it. + std::unique_ptr secret_manager_; bool shutdown_; const Options& options_; - Event::TimeSystem& time_system_; + TimeSource& time_source_; HotRestart& restarter_; const time_t start_time_; time_t original_start_time_; @@ -211,15 +230,12 @@ class InstanceImpl : Logger::Loggable, public Instance { Assert::ActionRegistrationPtr assert_action_registration_; ThreadLocal::Instance& thread_local_; Api::ApiPtr api_; - // secret_manager_ must come before dispatcher_, since there may be active connections - // referencing it, so need to destruct these first. - std::unique_ptr secret_manager_; Event::DispatcherPtr dispatcher_; std::unique_ptr admin_; Singleton::ManagerPtr singleton_manager_; Network::ConnectionHandlerPtr handler_; Runtime::RandomGeneratorPtr random_generator_; - Runtime::LoaderPtr runtime_loader_; + std::unique_ptr runtime_singleton_; std::unique_ptr ssl_context_manager_; ProdListenerComponentFactory listener_component_factory_; ProdWorkerFactory worker_factory_; @@ -231,7 +247,6 @@ class InstanceImpl : Logger::Loggable, public Instance { DrainManagerPtr drain_manager_; AccessLog::AccessLogManagerImpl access_log_manager_; std::unique_ptr cluster_manager_factory_; - InitManagerImpl init_manager_; std::unique_ptr guard_dog_; bool terminated_; std::unique_ptr file_logger_; @@ -243,9 +258,12 @@ class InstanceImpl : Logger::Loggable, public Instance { Upstream::HdsDelegatePtr hds_delegate_; std::unique_ptr overload_manager_; std::vector> wasm_; - std::unique_ptr run_helper_; Envoy::MutexTracer* mutex_tracer_; Http::ContextImpl http_context_; + std::unique_ptr heap_shrinker_; + const std::thread::id main_thread_id_; + absl::flat_hash_map> stage_callbacks_; + absl::flat_hash_map> stage_completable_callbacks_; }; } // namespace Server diff --git a/source/server/worker_impl.cc b/source/server/worker_impl.cc index 660d1ac1cf739..bf38d2181d853 100644 --- a/source/server/worker_impl.cc +++ b/source/server/worker_impl.cc @@ -21,7 +21,7 @@ WorkerPtr ProdWorkerFactory::createWorker(OverloadManager& overload_manager) { overload_manager, api_)}; } -WorkerImpl::WorkerImpl(ThreadLocal::Instance& tls, TestHooks& hooks, +WorkerImpl::WorkerImpl(ThreadLocal::Instance& tls, ListenerHooks& hooks, Event::DispatcherPtr&& dispatcher, Network::ConnectionHandlerPtr handler, OverloadManager& overload_manager, Api::Api& api) : tls_(tls), hooks_(hooks), dispatcher_(std::move(dispatcher)), handler_(std::move(handler)), @@ -73,6 +73,10 @@ void WorkerImpl::start(GuardDog& guard_dog) { api_.threadFactory().createThread([this, &guard_dog]() -> void { threadRoutine(guard_dog); }); } +void WorkerImpl::initializeStats(Stats::Scope& scope, const std::string& prefix) { + dispatcher_->initializeStats(scope, prefix); +} + void WorkerImpl::stop() { // It's possible for the server to cleanly shut down while cluster initialization during startup // is happening, so we might not yet have a thread. diff --git a/source/server/worker_impl.h b/source/server/worker_impl.h index b59c7356f2134..3e56578303caf 100644 --- a/source/server/worker_impl.h +++ b/source/server/worker_impl.h @@ -12,14 +12,14 @@ #include "common/common/logger.h" -#include "server/test_hooks.h" +#include "server/listener_hooks.h" namespace Envoy { namespace Server { class ProdWorkerFactory : public WorkerFactory, Logger::Loggable { public: - ProdWorkerFactory(ThreadLocal::Instance& tls, Api::Api& api, TestHooks& hooks) + ProdWorkerFactory(ThreadLocal::Instance& tls, Api::Api& api, ListenerHooks& hooks) : tls_(tls), api_(api), hooks_(hooks) {} // Server::WorkerFactory @@ -28,7 +28,7 @@ class ProdWorkerFactory : public WorkerFactory, Logger::Loggable { public: - WorkerImpl(ThreadLocal::Instance& tls, TestHooks& hooks, Event::DispatcherPtr&& dispatcher, + WorkerImpl(ThreadLocal::Instance& tls, ListenerHooks& hooks, Event::DispatcherPtr&& dispatcher, Network::ConnectionHandlerPtr handler, OverloadManager& overload_manager, Api::Api& api); @@ -45,6 +45,7 @@ class WorkerImpl : public Worker, Logger::Loggable { uint64_t numConnections() override; void removeListener(Network::ListenerConfig& listener, std::function completion) override; void start(GuardDog& guard_dog) override; + void initializeStats(Stats::Scope& scope, const std::string& prefix) override; void stop() override; void stopListener(Network::ListenerConfig& listener) override; void stopListeners() override; @@ -54,7 +55,7 @@ class WorkerImpl : public Worker, Logger::Loggable { void stopAcceptingConnectionsCb(OverloadActionState state); ThreadLocal::Instance& tls_; - TestHooks& hooks_; + ListenerHooks& hooks_; Event::DispatcherPtr dispatcher_; Network::ConnectionHandlerPtr handler_; Api::Api& api_; diff --git a/support/bootstrap b/support/bootstrap index 0b1c52c007059..d53a7a7ca29e2 100755 --- a/support/bootstrap +++ b/support/bootstrap @@ -45,14 +45,14 @@ if test ! -d "${DOT_GIT_DIR}"; then fi HOOKS_DIR="${DOT_GIT_DIR}/hooks" -HOOKS_DIR_RELPATH=$(relpath "${HOOKS_DIR}" "${PWD}") +HOOKS_DIR_RELPATH=$(relpath "${HOOKS_DIR}" "$(dirname $0)") if [ ! -e "${HOOKS_DIR}/prepare-commit-msg" ]; then echo "Installing hook 'prepare-commit-msg'" - ln -s "${HOOKS_DIR_RELPATH}/support/hooks/prepare-commit-msg" "${HOOKS_DIR}/prepare-commit-msg" + ln -sf "${HOOKS_DIR_RELPATH}/hooks/prepare-commit-msg" "${HOOKS_DIR}/prepare-commit-msg" fi if [ ! -e "${HOOKS_DIR}/pre-push" ]; then echo "Installing hook 'pre-push'" - ln -s "${HOOKS_DIR_RELPATH}/support/hooks/pre-push" "${HOOKS_DIR}/pre-push" + ln -sf "${HOOKS_DIR_RELPATH}/hooks/pre-push" "${HOOKS_DIR}/pre-push" fi diff --git a/support/hooks/pre-push b/support/hooks/pre-push index d31e5ff20d714..38ef688b9bd52 100755 --- a/support/hooks/pre-push +++ b/support/hooks/pre-push @@ -57,19 +57,24 @@ do # `$CLANG_FORMAT` and `$BUILDIFY` are defined, or that the default values it # assumes for these variables correspond to real binaries on the system. If # either of these things aren't true, the check fails. - for i in $(git diff --name-only $RANGE --diff-filter=ACMR 2>&1); do + for i in $(git diff --name-only $RANGE --diff-filter=ACMR --ignore-submodules=all 2>&1); do echo -ne " Checking format for $i - " "$SCRIPT_DIR"/check_format.py check $i if [[ $? -ne 0 ]]; then exit 1 fi + + echo " Checking spelling for $i" + "$SCRIPT_DIR"/check_spelling_pedantic.py check $i + if [[ $? -ne 0 ]]; then + exit 1 + fi done "$SCRIPT_DIR"/format_python_tools.sh check if [[ $? -ne 0 ]]; then exit 1 fi - # Check correctness of repositories definitions. echo " Checking repositories definitions" if ! "$SCRIPT_DIR"/check_repositories.sh; then diff --git a/test/BUILD b/test/BUILD index 27e8571b3a77c..fa80bf514d36d 100644 --- a/test/BUILD +++ b/test/BUILD @@ -18,8 +18,10 @@ envoy_cc_test_library( name = "main", srcs = [ "main.cc", + "test_listener.cc", "test_runner.h", ], + hdrs = ["test_listener.h"], external_deps = [ "abseil_symbolize", ], @@ -27,7 +29,9 @@ envoy_cc_test_library( "//source/common/common:logger_lib", "//source/common/common:thread_lib", "//source/common/event:libevent_lib", - "//source/common/thread:thread_factory_singleton_lib", + "//source/common/http/http2:codec_lib", + "//source/exe:process_wide_lib", + "//test/common/runtime:utility_lib", "//test/mocks/access_log:access_log_mocks", "//test/test_common:environment_lib", "//test/test_common:global_lib", diff --git a/test/README.md b/test/README.md index 76f4c866f4f7c..3617c73719c1d 100644 --- a/test/README.md +++ b/test/README.md @@ -58,6 +58,26 @@ Example: EXPECT_THAT(response->headers(), HeaderMapEqualRef(expected_headers)); ``` +### ProtoEq, ProtoEqIgnoringField, RepeatedProtoEq + +Tests equality of protobufs, with a variant that ignores the value (including +presence) of a single named field. Another variant can be used to compare two +instances of Protobuf::RepeatedPtrField element-by-element. + +Example: + +```cpp +envoy::api::v2::DeltaDiscoveryRequest expected_request; +// (not shown: set some fields of expected_request...) +EXPECT_CALL(async_stream_, sendMessage(ProtoEqIgnoringField(expected_request, "response_nonce"), false)); + +response->mutable_resources()->Add(); +response->mutable_resources()->Add(); +response->mutable_resources()->Add(); +// (not shown: do something to populate those empty added items...) +EXPECT_CALL(callbacks_, onConfigUpdate(RepeatedProtoEq(response->resources()), version)); +``` + ### IsSubsetOfHeaders and IsSupersetOfHeaders Tests that one `HeaderMap` argument contains every header in another diff --git a/test/common/access_log/BUILD b/test/common/access_log/BUILD index b8bfca1d05bff..475884944d7ff 100644 --- a/test/common/access_log/BUILD +++ b/test/common/access_log/BUILD @@ -37,6 +37,7 @@ envoy_cc_test( "//source/common/common:utility_lib", "//source/common/http:header_map_lib", "//test/mocks/http:http_mocks", + "//test/mocks/ssl:ssl_mocks", "//test/mocks/stream_info:stream_info_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:utility_lib", diff --git a/test/common/access_log/access_log_formatter_fuzz_test.cc b/test/common/access_log/access_log_formatter_fuzz_test.cc index f6bcfb4bda8c0..1df65dd1736e6 100644 --- a/test/common/access_log/access_log_formatter_fuzz_test.cc +++ b/test/common/access_log/access_log_formatter_fuzz_test.cc @@ -6,6 +6,7 @@ namespace Envoy { namespace Fuzz { +namespace { DEFINE_PROTO_FUZZER(const test::common::access_log::TestCase& input) { try { @@ -22,5 +23,6 @@ DEFINE_PROTO_FUZZER(const test::common::access_log::TestCase& input) { } } +} // namespace } // namespace Fuzz } // namespace Envoy diff --git a/test/common/access_log/access_log_formatter_test.cc b/test/common/access_log/access_log_formatter_test.cc index 38a81cd2c6d3f..1c9741ca1a5af 100644 --- a/test/common/access_log/access_log_formatter_test.cc +++ b/test/common/access_log/access_log_formatter_test.cc @@ -8,13 +8,14 @@ #include "common/http/header_map_impl.h" #include "test/mocks/http/mocks.h" +#include "test/mocks/ssl/mocks.h" #include "test/mocks/stream_info/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/printers.h" -#include "test/test_common/test_base.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" +#include "gtest/gtest.h" using testing::_; using testing::Const; @@ -24,6 +25,7 @@ using testing::ReturnRef; namespace Envoy { namespace AccessLog { +namespace { TEST(AccessLogFormatUtilsTest, protocolToString) { EXPECT_EQ("HTTP/1.0", AccessLogFormatUtils::protocolToString(Http::Protocol::Http10)); @@ -123,6 +125,20 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_EQ("0", response_code_format.format(header, header, header, stream_info)); } + { + StreamInfoFormatter response_format("RESPONSE_CODE_DETAILS"); + absl::optional rc_details; + EXPECT_CALL(stream_info, responseCodeDetails()).WillRepeatedly(ReturnRef(rc_details)); + EXPECT_EQ("-", response_format.format(header, header, header, stream_info)); + } + + { + StreamInfoFormatter response_code_format("RESPONSE_CODE_DETAILS"); + absl::optional rc_details{"via_upstream"}; + EXPECT_CALL(stream_info, responseCodeDetails()).WillRepeatedly(ReturnRef(rc_details)); + EXPECT_EQ("via_upstream", response_code_format.format(header, header, header, stream_info)); + } + { StreamInfoFormatter bytes_sent_format("BYTES_SENT"); EXPECT_CALL(stream_info, bytesSent()).WillOnce(Return(1)); @@ -202,6 +218,117 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { .WillRepeatedly(ReturnRef(requested_server_name)); EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info)); } + { + StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_URI_SAN"); + NiceMock connection_info; + const std::vector sans{"san"}; + ON_CALL(connection_info, uriSanPeerCertificate()).WillByDefault(Return(sans)); + EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(&connection_info)); + EXPECT_EQ("san", upstream_format.format(header, header, header, stream_info)); + } + { + StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_URI_SAN"); + NiceMock connection_info; + const std::vector sans{"san1", "san2"}; + ON_CALL(connection_info, uriSanPeerCertificate()).WillByDefault(Return(sans)); + EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(&connection_info)); + EXPECT_EQ("san1,san2", upstream_format.format(header, header, header, stream_info)); + } + { + StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_URI_SAN"); + NiceMock connection_info; + ON_CALL(connection_info, uriSanPeerCertificate()) + .WillByDefault(Return(std::vector())); + EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(&connection_info)); + EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info)); + } + { + EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_URI_SAN"); + EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info)); + } + { + StreamInfoFormatter upstream_format("DOWNSTREAM_LOCAL_URI_SAN"); + NiceMock connection_info; + const std::vector sans{"san"}; + ON_CALL(connection_info, uriSanLocalCertificate()).WillByDefault(Return(sans)); + EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(&connection_info)); + EXPECT_EQ("san", upstream_format.format(header, header, header, stream_info)); + } + { + StreamInfoFormatter upstream_format("DOWNSTREAM_LOCAL_URI_SAN"); + NiceMock connection_info; + const std::vector sans{"san1", "san2"}; + ON_CALL(connection_info, uriSanLocalCertificate()).WillByDefault(Return(sans)); + EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(&connection_info)); + EXPECT_EQ("san1,san2", upstream_format.format(header, header, header, stream_info)); + } + { + StreamInfoFormatter upstream_format("DOWNSTREAM_LOCAL_URI_SAN"); + NiceMock connection_info; + ON_CALL(connection_info, uriSanLocalCertificate()) + .WillByDefault(Return(std::vector())); + EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(&connection_info)); + EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info)); + } + { + EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + StreamInfoFormatter upstream_format("DOWNSTREAM_LOCAL_URI_SAN"); + EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info)); + } + { + StreamInfoFormatter upstream_format("DOWNSTREAM_LOCAL_SUBJECT"); + NiceMock connection_info; + ON_CALL(connection_info, subjectLocalCertificate()).WillByDefault(Return("subject")); + EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(&connection_info)); + EXPECT_EQ("subject", upstream_format.format(header, header, header, stream_info)); + } + { + StreamInfoFormatter upstream_format("DOWNSTREAM_LOCAL_SUBJECT"); + NiceMock connection_info; + ON_CALL(connection_info, subjectLocalCertificate()).WillByDefault(Return("")); + EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(&connection_info)); + EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info)); + } + { + EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + StreamInfoFormatter upstream_format("DOWNSTREAM_LOCAL_SUBJECT"); + EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info)); + } + { + StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_SUBJECT"); + NiceMock connection_info; + ON_CALL(connection_info, subjectPeerCertificate()).WillByDefault(Return("subject")); + EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(&connection_info)); + EXPECT_EQ("subject", upstream_format.format(header, header, header, stream_info)); + } + { + StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_SUBJECT"); + NiceMock connection_info; + ON_CALL(connection_info, subjectPeerCertificate()).WillByDefault(Return("")); + EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(&connection_info)); + EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info)); + } + { + EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_SUBJECT"); + EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info)); + } + { + StreamInfoFormatter upstream_format("UPSTREAM_TRANSPORT_FAILURE_REASON"); + std::string upstream_transport_failure_reason = "SSL error"; + EXPECT_CALL(stream_info, upstreamTransportFailureReason()) + .WillRepeatedly(ReturnRef(upstream_transport_failure_reason)); + EXPECT_EQ("SSL error", upstream_format.format(header, header, header, stream_info)); + } + + { + StreamInfoFormatter upstream_format("UPSTREAM_TRANSPORT_FAILURE_REASON"); + std::string upstream_transport_failure_reason; + EXPECT_CALL(stream_info, upstreamTransportFailureReason()) + .WillRepeatedly(ReturnRef(upstream_transport_failure_reason)); + EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info)); + } } TEST(AccessLogFormatterTest, requestHeaderFormatter) { @@ -529,18 +656,13 @@ TEST(AccessLogFormatterTest, JsonFormatterStartTimeTest) { Http::TestHeaderMapImpl response_header; Http::TestHeaderMapImpl response_trailer; - time_t test_epoch = 1522280158; - SystemTime time = std::chrono::system_clock::from_time_t(test_epoch); + time_t expected_time_in_epoch = 1522280158; + SystemTime time = std::chrono::system_clock::from_time_t(expected_time_in_epoch); EXPECT_CALL(stream_info, startTime()).WillRepeatedly(Return(time)); - // Needed to take into account the behavior in non-GMT timezones. - struct tm time_val; - gmtime_r(&test_epoch, &time_val); - time_t expected_time_t = mktime(&time_val); - std::unordered_map expected_json_map = { {"simple_date", "2018/03/28"}, - {"test_time", fmt::format("{}", expected_time_t)}, + {"test_time", fmt::format("{}", expected_time_in_epoch)}, {"bad_format", "bad_format"}, {"default", "2018-03-28T23:35:58.000Z"}, {"all_zeroes", "000000000.0.00.000"}}; @@ -637,18 +759,13 @@ TEST(AccessLogFormatterTest, CompositeFormatterSuccess) { const std::string format = "%START_TIME(%Y/%m/%d)%|%START_TIME(%s)%|%START_TIME(bad_format)%|" "%START_TIME%|%START_TIME(%f.%1f.%2f.%3f)%"; - time_t test_epoch = 1522280158; - SystemTime time = std::chrono::system_clock::from_time_t(test_epoch); + time_t expected_time_in_epoch = 1522280158; + SystemTime time = std::chrono::system_clock::from_time_t(expected_time_in_epoch); EXPECT_CALL(stream_info, startTime()).WillRepeatedly(Return(time)); FormatterImpl formatter(format); - // Needed to take into account the behavior in non-GMT timezones. - struct tm time_val; - gmtime_r(&test_epoch, &time_val); - time_t expected_time_t = mktime(&time_val); - EXPECT_EQ(fmt::format("2018/03/28|{}|bad_format|2018-03-28T23:35:58.000Z|000000000.0.00.000", - expected_time_t), + expected_time_in_epoch), formatter.format(request_header, response_header, response_trailer, stream_info)); } @@ -690,7 +807,7 @@ TEST(AccessLogFormatterTest, CompositeFormatterSuccess) { { // This tests START_TIME specifier that has shorter segments when formatted, i.e. - // strftime("%%%%"") equals "%%", %1f will have 1 as its size. + // absl::FormatTime("%%%%"") equals "%%", %1f will have 1 as its size. const std::string format = "%START_TIME(%%%%|%%%%%f|%s%%%%%3f|%1f%%%%%s)%"; const SystemTime start_time(std::chrono::microseconds(1522796769123456)); EXPECT_CALL(stream_info, startTime()).WillOnce(Return(start_time)); @@ -732,5 +849,6 @@ TEST(AccessLogFormatterTest, ParserFailures) { } } +} // namespace } // namespace AccessLog } // namespace Envoy diff --git a/test/common/access_log/access_log_impl_test.cc b/test/common/access_log/access_log_impl_test.cc index a5c13c589c2af..3b57f66cf3230 100644 --- a/test/common/access_log/access_log_impl_test.cc +++ b/test/common/access_log/access_log_impl_test.cc @@ -20,10 +20,10 @@ #include "test/mocks/server/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/printers.h" -#include "test/test_common/test_base.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" +#include "gtest/gtest.h" using testing::_; using testing::NiceMock; @@ -48,9 +48,9 @@ envoy::config::filter::accesslog::v2::AccessLog parseAccessLogFromV2Yaml(const s return access_log; } -class AccessLogImplTest : public TestBase { +class AccessLogImplTest : public testing::Test { public: - AccessLogImplTest() : file_(new Filesystem::MockFile()) { + AccessLogImplTest() : file_(new MockAccessLogFile()) { ON_CALL(context_, runtime()).WillByDefault(ReturnRef(runtime_)); ON_CALL(context_, accessLogManager()).WillByDefault(ReturnRef(log_manager_)); ON_CALL(log_manager_, createAccessLog(_)).WillByDefault(Return(file_)); @@ -61,7 +61,7 @@ class AccessLogImplTest : public TestBase { Http::TestHeaderMapImpl response_headers_; Http::TestHeaderMapImpl response_trailers_; TestStreamInfo stream_info_; - std::shared_ptr file_; + std::shared_ptr file_; StringViewSaver output_; NiceMock runtime_; @@ -548,23 +548,25 @@ TEST(AccessLogFilterTest, DurationWithRuntimeKey) { Config::FilterJson::translateAccessLogFilter(*filter_object, config); DurationFilter filter(config.duration_filter(), runtime); Http::TestHeaderMapImpl request_headers{{":method", "GET"}, {":path", "/"}}; + Http::TestHeaderMapImpl response_headers; + Http::TestHeaderMapImpl response_trailers; TestStreamInfo stream_info; stream_info.end_time_ = stream_info.startTimeMonotonic() + std::chrono::microseconds(100000); EXPECT_CALL(runtime.snapshot_, getInteger("key", 1000000)).WillOnce(Return(1)); - EXPECT_TRUE(filter.evaluate(stream_info, request_headers)); + EXPECT_TRUE(filter.evaluate(stream_info, request_headers, response_headers, response_trailers)); EXPECT_CALL(runtime.snapshot_, getInteger("key", 1000000)).WillOnce(Return(1000)); - EXPECT_FALSE(filter.evaluate(stream_info, request_headers)); + EXPECT_FALSE(filter.evaluate(stream_info, request_headers, response_headers, response_trailers)); stream_info.end_time_ = stream_info.startTimeMonotonic() + std::chrono::microseconds(100000001000); EXPECT_CALL(runtime.snapshot_, getInteger("key", 1000000)).WillOnce(Return(100000000)); - EXPECT_TRUE(filter.evaluate(stream_info, request_headers)); + EXPECT_TRUE(filter.evaluate(stream_info, request_headers, response_headers, response_trailers)); stream_info.end_time_ = stream_info.startTimeMonotonic() + std::chrono::microseconds(10000); EXPECT_CALL(runtime.snapshot_, getInteger("key", 1000000)).WillOnce(Return(100000000)); - EXPECT_FALSE(filter.evaluate(stream_info, request_headers)); + EXPECT_FALSE(filter.evaluate(stream_info, request_headers, response_headers, response_trailers)); } TEST(AccessLogFilterTest, StatusCodeWithRuntimeKey) { @@ -583,14 +585,16 @@ TEST(AccessLogFilterTest, StatusCodeWithRuntimeKey) { StatusCodeFilter filter(config.status_code_filter(), runtime); Http::TestHeaderMapImpl request_headers{{":method", "GET"}, {":path", "/"}}; + Http::TestHeaderMapImpl response_headers; + Http::TestHeaderMapImpl response_trailers; TestStreamInfo info; info.response_code_ = 400; EXPECT_CALL(runtime.snapshot_, getInteger("key", 300)).WillOnce(Return(350)); - EXPECT_TRUE(filter.evaluate(info, request_headers)); + EXPECT_TRUE(filter.evaluate(info, request_headers, response_headers, response_trailers)); EXPECT_CALL(runtime.snapshot_, getInteger("key", 300)).WillOnce(Return(500)); - EXPECT_FALSE(filter.evaluate(info, request_headers)); + EXPECT_FALSE(filter.evaluate(info, request_headers, response_headers, response_trailers)); } TEST_F(AccessLogImplTest, StatusCodeLessThan) { @@ -837,11 +841,12 @@ name: envoy.file_access_log - RLSE - DC - URX + - SI config: path: /dev/null )EOF"; - static_assert(StreamInfo::ResponseFlag::LastFlag == 0x8000, + static_assert(StreamInfo::ResponseFlag::LastFlag == 0x10000, "A flag has been added. Fix this code."); std::vector all_response_flags = { @@ -861,6 +866,7 @@ name: envoy.file_access_log StreamInfo::ResponseFlag::RateLimitServiceError, StreamInfo::ResponseFlag::DownstreamConnectionTermination, StreamInfo::ResponseFlag::UpstreamRetryLimitExceeded, + StreamInfo::ResponseFlag::StreamIdleTimeout, }; InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); @@ -891,7 +897,7 @@ name: envoy.file_access_log "[\"embedded message failed validation\"] | caused by " "ResponseFlagFilterValidationError.Flags[i]: [\"value must be in list \" [\"LH\" \"UH\" " "\"UT\" \"LR\" \"UR\" \"UF\" \"UC\" \"UO\" \"NR\" \"DI\" \"FI\" \"RL\" \"UAEX\" \"RLSE\" " - "\"DC\" \"URX\"]]): " + "\"DC\" \"URX\" \"SI\"]]): " "response_flag_filter {\n flags: \"UnsupportedFlag\"\n}\n"); } @@ -914,10 +920,189 @@ name: envoy.file_access_log "[\"embedded message failed validation\"] | caused by " "ResponseFlagFilterValidationError.Flags[i]: [\"value must be in list \" [\"LH\" \"UH\" " "\"UT\" \"LR\" \"UR\" \"UF\" \"UC\" \"UO\" \"NR\" \"DI\" \"FI\" \"RL\" \"UAEX\" \"RLSE\" " - "\"DC\" \"URX\"]]): " + "\"DC\" \"URX\" \"SI\"]]): " "response_flag_filter {\n flags: \"UnsupportedFlag\"\n}\n"); } +TEST_F(AccessLogImplTest, GrpcStatusFilterValues) { + const std::string yaml_template = R"EOF( +name: envoy.file_access_log +filter: + grpc_status_filter: + statuses: + - {} +config: + path: /dev/null +)EOF"; + + const auto desc = envoy::config::filter::accesslog::v2::GrpcStatusFilter_Status_descriptor(); + const int grpcStatuses = static_cast(Grpc::Status::GrpcStatus::MaximumValid) + 1; + if (desc->value_count() != grpcStatuses) { + FAIL() << "Mismatch in number of gRPC statuses, GrpcStatus has " << grpcStatuses + << ", GrpcStatusFilter_Status has " << desc->value_count() << "."; + } + + for (int i = 0; i < desc->value_count(); i++) { + InstanceSharedPtr log = AccessLogFactory::fromProto( + parseAccessLogFromV2Yaml(fmt::format(yaml_template, desc->value(i)->name())), context_); + + EXPECT_CALL(*file_, write(_)); + + response_trailers_.addCopy(Http::Headers::get().GrpcStatus, std::to_string(i)); + log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); + response_trailers_.remove(Http::Headers::get().GrpcStatus); + } +} + +TEST_F(AccessLogImplTest, GrpcStatusFilterUnsupportedValue) { + const std::string yaml = R"EOF( +name: envoy.file_access_log +filter: + grpc_status_filter: + statuses: + - NOT_A_VALID_CODE +config: + path: /dev/null + )EOF"; + + EXPECT_THROW_WITH_REGEX(AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_), + EnvoyException, ".*\"NOT_A_VALID_CODE\" for type TYPE_ENUM.*"); +} + +TEST_F(AccessLogImplTest, GrpcStatusFilterBlock) { + const std::string yaml = R"EOF( +name: envoy.file_access_log +filter: + grpc_status_filter: + statuses: + - OK +config: + path: /dev/null + )EOF"; + + const InstanceSharedPtr log = + AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + + response_trailers_.addCopy(Http::Headers::get().GrpcStatus, "1"); + + EXPECT_CALL(*file_, write(_)).Times(0); + log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); +} + +TEST_F(AccessLogImplTest, GrpcStatusFilterHttpCodes) { + const std::string yaml_template = R"EOF( +name: envoy.file_access_log +filter: + grpc_status_filter: + statuses: + - {} +config: + path: /dev/null +)EOF"; + + // This mapping includes UNKNOWN <-> 200 because we expect that gRPC should provide an explicit + // status code for successes. In general, the only status codes that receive an HTTP mapping are + // those enumerated below with a non-UNKNOWN mapping. See: //source/common/grpc/status.cc and + // https://github.com/grpc/grpc/blob/master/doc/http-grpc-status-mapping.md. + const std::vector> statusMapping = { + {"UNKNOWN", 200}, {"INTERNAL", 400}, {"UNAUTHENTICATED", 401}, + {"PERMISSION_DENIED", 403}, {"UNAVAILABLE", 429}, {"UNIMPLEMENTED", 404}, + {"UNAVAILABLE", 502}, {"UNAVAILABLE", 503}, {"UNAVAILABLE", 504}}; + + for (const auto& pair : statusMapping) { + stream_info_.response_code_ = pair.second; + + const InstanceSharedPtr log = AccessLogFactory::fromProto( + parseAccessLogFromV2Yaml(fmt::format(yaml_template, pair.first)), context_); + + EXPECT_CALL(*file_, write(_)); + log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); + } +} + +TEST_F(AccessLogImplTest, GrpcStatusFilterNoCode) { + const std::string yaml = R"EOF( +name: envoy.file_access_log +filter: + grpc_status_filter: + statuses: + - UNKNOWN +config: + path: /dev/null + )EOF"; + + const InstanceSharedPtr log = + AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + + EXPECT_CALL(*file_, write(_)); + log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); +} + +TEST_F(AccessLogImplTest, GrpcStatusFilterExclude) { + const std::string yaml = R"EOF( +name: envoy.file_access_log +filter: + grpc_status_filter: + exclude: true + statuses: + - OK +config: + path: /dev/null + )EOF"; + + const InstanceSharedPtr log = + AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + + for (int i = 0; i <= static_cast(Grpc::Status::GrpcStatus::MaximumValid); i++) { + EXPECT_CALL(*file_, write(_)).Times(i == 0 ? 0 : 1); + + response_trailers_.addCopy(Http::Headers::get().GrpcStatus, std::to_string(i)); + log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); + response_trailers_.remove(Http::Headers::get().GrpcStatus); + } +} + +TEST_F(AccessLogImplTest, GrpcStatusFilterExcludeFalse) { + const std::string yaml = R"EOF( +name: envoy.file_access_log +filter: + grpc_status_filter: + exclude: false + statuses: + - OK +config: + path: /dev/null + )EOF"; + + const InstanceSharedPtr log = + AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + + response_trailers_.addCopy(Http::Headers::get().GrpcStatus, "0"); + + EXPECT_CALL(*file_, write(_)); + log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); +} + +TEST_F(AccessLogImplTest, GrpcStatusFilterHeader) { + const std::string yaml = R"EOF( +name: envoy.file_access_log +filter: + grpc_status_filter: + statuses: + - OK +config: + path: /dev/null + )EOF"; + + const InstanceSharedPtr log = + AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + + EXPECT_CALL(*file_, write(_)); + + response_headers_.addCopy(Http::Headers::get().GrpcStatus, "0"); + log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); +} + } // namespace } // namespace AccessLog } // namespace Envoy diff --git a/test/common/access_log/access_log_manager_impl_test.cc b/test/common/access_log/access_log_manager_impl_test.cc index cde2c1a0fb8fc..4681e9453a7cc 100644 --- a/test/common/access_log/access_log_manager_impl_test.cc +++ b/test/common/access_log/access_log_manager_impl_test.cc @@ -1,46 +1,381 @@ #include #include "common/access_log/access_log_manager_impl.h" +#include "common/filesystem/file_shared_impl.h" #include "common/stats/isolated_store_impl.h" #include "test/mocks/access_log/mocks.h" #include "test/mocks/api/mocks.h" #include "test/mocks/event/mocks.h" #include "test/mocks/filesystem/mocks.h" -#include "test/test_common/test_base.h" #include "gmock/gmock.h" +#include "gtest/gtest.h" using testing::_; +using testing::ByMove; +using testing::NiceMock; using testing::Return; +using testing::ReturnNew; using testing::ReturnRef; +using testing::Sequence; namespace Envoy { namespace AccessLog { +namespace { -TEST(AccessLogManagerImpl, reopenAllFiles) { - Api::MockApi api; - Filesystem::MockInstance file_system; - EXPECT_CALL(api, fileSystem()).WillRepeatedly(ReturnRef(file_system)); - Event::MockDispatcher dispatcher; - Thread::MutexBasicLockable lock; - - std::shared_ptr log1(new Filesystem::MockFile()); - std::shared_ptr log2(new Filesystem::MockFile()); - AccessLogManagerImpl access_log_manager(api, dispatcher, lock); - EXPECT_CALL(file_system, createFile("foo", _, _)).WillOnce(Return(log1)); - access_log_manager.createAccessLog("foo"); - EXPECT_CALL(file_system, createFile("bar", _, _)).WillOnce(Return(log2)); - access_log_manager.createAccessLog("bar"); +class AccessLogManagerImplTest : public testing::Test { +protected: + AccessLogManagerImplTest() + : file_(new NiceMock), thread_factory_(Thread::threadFactoryForTest()), + access_log_manager_(timeout_40ms_, api_, dispatcher_, lock_, store_) { + EXPECT_CALL(file_system_, createFile("foo")) + .WillOnce(Return(ByMove(std::unique_ptr>(file_)))); + + EXPECT_CALL(api_, fileSystem()).WillRepeatedly(ReturnRef(file_system_)); + EXPECT_CALL(api_, threadFactory()).WillRepeatedly(ReturnRef(thread_factory_)); + } + + NiceMock api_; + NiceMock file_system_; + NiceMock* file_; + const std::chrono::milliseconds timeout_40ms_{40}; + Stats::IsolatedStoreImpl store_; + Thread::ThreadFactory& thread_factory_; + NiceMock dispatcher_; + Thread::MutexBasicLockable lock_; + AccessLogManagerImpl access_log_manager_; +}; + +TEST_F(AccessLogManagerImplTest, BadFile) { + EXPECT_CALL(dispatcher_, createTimer_(_)); + EXPECT_CALL(*file_, open_()).WillOnce(Return(ByMove(Filesystem::resultFailure(false, 0)))); + EXPECT_THROW(access_log_manager_.createAccessLog("foo"), EnvoyException); +} + +TEST_F(AccessLogManagerImplTest, flushToLogFilePeriodically) { + NiceMock* timer = new NiceMock(&dispatcher_); + + EXPECT_CALL(*file_, open_()).WillOnce(Return(ByMove(Filesystem::resultSuccess(true)))); + AccessLogFileSharedPtr log_file = access_log_manager_.createAccessLog("foo"); + + EXPECT_CALL(*timer, enableTimer(timeout_40ms_)); + EXPECT_CALL(*file_, write_(_)) + .WillOnce(Invoke([](absl::string_view data) -> Api::IoCallSizeResult { + EXPECT_EQ(0, data.compare("test")); + return Filesystem::resultSuccess(static_cast(data.length())); + })); + + log_file->write("test"); + + { + Thread::LockGuard lock(file_->write_mutex_); + while (file_->num_writes_ != 1) { + file_->write_event_.wait(file_->write_mutex_); + } + } + + EXPECT_CALL(*file_, write_(_)) + .WillOnce(Invoke([](absl::string_view data) -> Api::IoCallSizeResult { + EXPECT_EQ(0, data.compare("test2")); + return Filesystem::resultSuccess(static_cast(data.length())); + })); + + // make sure timer is re-enabled on callback call + log_file->write("test2"); + EXPECT_CALL(*timer, enableTimer(timeout_40ms_)); + timer->callback_(); + + { + Thread::LockGuard lock(file_->write_mutex_); + while (file_->num_writes_ != 2) { + file_->write_event_.wait(file_->write_mutex_); + } + } + EXPECT_CALL(*file_, close_()).WillOnce(Return(ByMove(Filesystem::resultSuccess(true)))); +} + +TEST_F(AccessLogManagerImplTest, flushToLogFileOnDemand) { + NiceMock* timer = new NiceMock(&dispatcher_); + + EXPECT_CALL(*file_, open_()).WillOnce(Return(ByMove(Filesystem::resultSuccess(true)))); + AccessLogFileSharedPtr log_file = access_log_manager_.createAccessLog("foo"); + + EXPECT_CALL(*timer, enableTimer(timeout_40ms_)); + + // The first write to a given file will start the flush thread, which can flush + // immediately (race on whether it will or not). So do a write and flush to + // get that state out of the way, then test that small writes don't trigger a flush. + EXPECT_CALL(*file_, write_(_)) + .WillOnce(Invoke([](absl::string_view data) -> Api::IoCallSizeResult { + return Filesystem::resultSuccess(static_cast(data.length())); + })); + log_file->write("prime-it"); + log_file->flush(); + uint32_t expected_writes = 1; + { + Thread::LockGuard lock(file_->write_mutex_); + EXPECT_EQ(expected_writes, file_->num_writes_); + } + + EXPECT_CALL(*file_, write_(_)) + .WillOnce(Invoke([](absl::string_view data) -> Api::IoCallSizeResult { + EXPECT_EQ(0, data.compare("test")); + return Filesystem::resultSuccess(static_cast(data.length())); + })); + + log_file->write("test"); + + { + Thread::LockGuard lock(file_->write_mutex_); + EXPECT_EQ(expected_writes, file_->num_writes_); + } + + log_file->flush(); + expected_writes++; + { + Thread::LockGuard lock(file_->write_mutex_); + EXPECT_EQ(expected_writes, file_->num_writes_); + } + + EXPECT_CALL(*file_, write_(_)) + .WillOnce(Invoke([](absl::string_view data) -> Api::IoCallSizeResult { + EXPECT_EQ(0, data.compare("test2")); + return Filesystem::resultSuccess(static_cast(data.length())); + })); + + // make sure timer is re-enabled on callback call + log_file->write("test2"); + EXPECT_CALL(*timer, enableTimer(timeout_40ms_)); + timer->callback_(); + expected_writes++; + + { + Thread::LockGuard lock(file_->write_mutex_); + while (file_->num_writes_ != expected_writes) { + file_->write_event_.wait(file_->write_mutex_); + } + } + EXPECT_CALL(*file_, close_()).WillOnce(Return(ByMove(Filesystem::resultSuccess(true)))); +} + +TEST_F(AccessLogManagerImplTest, reopenFile) { + NiceMock* timer = new NiceMock(&dispatcher_); + + Sequence sq; + EXPECT_CALL(*file_, open_()) + .InSequence(sq) + .WillOnce(Return(ByMove(Filesystem::resultSuccess(true)))); + AccessLogFileSharedPtr log_file = access_log_manager_.createAccessLog("foo"); + + EXPECT_CALL(*file_, write_(_)) + .InSequence(sq) + .WillOnce(Invoke([](absl::string_view data) -> Api::IoCallSizeResult { + EXPECT_EQ(0, data.compare("before")); + return Filesystem::resultSuccess(static_cast(data.length())); + })); + + log_file->write("before"); + timer->callback_(); + + { + Thread::LockGuard lock(file_->write_mutex_); + while (file_->num_writes_ != 1) { + file_->write_event_.wait(file_->write_mutex_); + } + } + + EXPECT_CALL(*file_, close_()) + .InSequence(sq) + .WillOnce(Return(ByMove(Filesystem::resultSuccess(true)))); + EXPECT_CALL(*file_, open_()) + .InSequence(sq) + .WillOnce(Return(ByMove(Filesystem::resultSuccess(true)))); + + EXPECT_CALL(*file_, write_(_)) + .InSequence(sq) + .WillOnce(Invoke([](absl::string_view data) -> Api::IoCallSizeResult { + EXPECT_EQ(0, data.compare("reopened")); + return Filesystem::resultSuccess(static_cast(data.length())); + })); + + EXPECT_CALL(*file_, close_()) + .InSequence(sq) + .WillOnce(Return(ByMove(Filesystem::resultSuccess(true)))); + + log_file->reopen(); + log_file->write("reopened"); + timer->callback_(); + + { + Thread::LockGuard lock(file_->write_mutex_); + while (file_->num_writes_ != 2) { + file_->write_event_.wait(file_->write_mutex_); + } + } +} + +TEST_F(AccessLogManagerImplTest, reopenThrows) { + NiceMock* timer = new NiceMock(&dispatcher_); + + EXPECT_CALL(*file_, write_(_)) + .WillRepeatedly(Invoke([](absl::string_view data) -> Api::IoCallSizeResult { + return Filesystem::resultSuccess(static_cast(data.length())); + })); + + Sequence sq; + EXPECT_CALL(*file_, open_()) + .InSequence(sq) + .WillOnce(Return(ByMove(Filesystem::resultSuccess(true)))); + + AccessLogFileSharedPtr log_file = access_log_manager_.createAccessLog("foo"); + EXPECT_CALL(*file_, close_()) + .InSequence(sq) + .WillOnce(Return(ByMove(Filesystem::resultSuccess(true)))); + EXPECT_CALL(*file_, open_()) + .InSequence(sq) + .WillOnce(Return(ByMove(Filesystem::resultFailure(false, 0)))); + + log_file->write("test write"); + timer->callback_(); + { + Thread::LockGuard lock(file_->write_mutex_); + while (file_->num_writes_ != 1) { + file_->write_event_.wait(file_->write_mutex_); + } + } + log_file->reopen(); + + log_file->write("this is to force reopen"); + timer->callback_(); + + { + Thread::LockGuard lock(file_->open_mutex_); + while (file_->num_opens_ != 2) { + file_->open_event_.wait(file_->open_mutex_); + } + } + + // write call should not cause any exceptions + log_file->write("random data"); + timer->callback_(); +} + +TEST_F(AccessLogManagerImplTest, bigDataChunkShouldBeFlushedWithoutTimer) { + EXPECT_CALL(*file_, open_()).WillOnce(Return(ByMove(Filesystem::resultSuccess(true)))); + AccessLogFileSharedPtr log_file = access_log_manager_.createAccessLog("foo"); + + EXPECT_CALL(*file_, write_(_)) + .WillOnce(Invoke([](absl::string_view data) -> Api::IoCallSizeResult { + EXPECT_EQ(0, data.compare("a")); + return Filesystem::resultSuccess(static_cast(data.length())); + })); + + log_file->write("a"); + + { + Thread::LockGuard lock(file_->write_mutex_); + while (file_->num_writes_ != 1) { + file_->write_event_.wait(file_->write_mutex_); + } + } + + // First write happens without waiting on thread_flush_. Now make a big string and it should be + // flushed even when timer is not enabled + EXPECT_CALL(*file_, write_(_)) + .WillOnce(Invoke([](absl::string_view data) -> Api::IoCallSizeResult { + std::string expected(1024 * 64 + 1, 'b'); + EXPECT_EQ(0, data.compare(expected)); + return Filesystem::resultSuccess(static_cast(data.length())); + })); + + std::string big_string(1024 * 64 + 1, 'b'); + log_file->write(big_string); + + { + Thread::LockGuard lock(file_->write_mutex_); + while (file_->num_writes_ != 2) { + file_->write_event_.wait(file_->write_mutex_); + } + } + EXPECT_CALL(*file_, close_()).WillOnce(Return(ByMove(Filesystem::resultSuccess(true)))); +} + +TEST_F(AccessLogManagerImplTest, reopenAllFiles) { + EXPECT_CALL(dispatcher_, createTimer_(_)).WillRepeatedly(ReturnNew>()); + + Sequence sq; + EXPECT_CALL(*file_, open_()) + .InSequence(sq) + .WillOnce(Return(ByMove(Filesystem::resultSuccess(true)))); + AccessLogFileSharedPtr log = access_log_manager_.createAccessLog("foo"); + + NiceMock* file2 = new NiceMock; + EXPECT_CALL(file_system_, createFile("bar")) + .WillOnce(Return(ByMove(std::unique_ptr>(file2)))); + + Sequence sq2; + EXPECT_CALL(*file2, open_()) + .InSequence(sq2) + .WillOnce(Return(ByMove(Filesystem::resultSuccess(true)))); + AccessLogFileSharedPtr log2 = access_log_manager_.createAccessLog("bar"); // Make sure that getting the access log with the same name returns the same underlying file. - EXPECT_EQ(log1, access_log_manager.createAccessLog("foo")); - EXPECT_EQ(log2, access_log_manager.createAccessLog("bar")); + EXPECT_EQ(log, access_log_manager_.createAccessLog("foo")); + EXPECT_EQ(log2, access_log_manager_.createAccessLog("bar")); + + // Test that reopen reopens all of the files + EXPECT_CALL(*file_, write_(_)) + .WillRepeatedly(Invoke([](absl::string_view data) -> Api::IoCallSizeResult { + return Filesystem::resultSuccess(static_cast(data.length())); + })); + + EXPECT_CALL(*file2, write_(_)) + .WillRepeatedly(Invoke([](absl::string_view data) -> Api::IoCallSizeResult { + return Filesystem::resultSuccess(static_cast(data.length())); + })); + + EXPECT_CALL(*file_, close_()) + .InSequence(sq) + .WillOnce(Return(ByMove(Filesystem::resultSuccess(true)))); + EXPECT_CALL(*file2, close_()) + .InSequence(sq2) + .WillOnce(Return(ByMove(Filesystem::resultSuccess(true)))); + + EXPECT_CALL(*file_, open_()) + .InSequence(sq) + .WillOnce(Return(ByMove(Filesystem::resultSuccess(true)))); + EXPECT_CALL(*file2, open_()) + .InSequence(sq2) + .WillOnce(Return(ByMove(Filesystem::resultSuccess(true)))); + + access_log_manager_.reopen(); + + log->write("this is to force reopen"); + log2->write("this is to force reopen"); + + { + Thread::LockGuard lock(file_->open_mutex_); + while (file_->num_opens_ != 2) { + file_->open_event_.wait(file_->open_mutex_); + } + } + + { + Thread::LockGuard lock(file2->open_mutex_); + while (file2->num_opens_ != 2) { + file2->open_event_.wait(file2->open_mutex_); + } + } - EXPECT_CALL(*log1, reopen()); - EXPECT_CALL(*log2, reopen()); - access_log_manager.reopen(); + EXPECT_CALL(*file_, close_()) + .InSequence(sq) + .WillOnce(Return(ByMove(Filesystem::resultSuccess(true)))); + EXPECT_CALL(*file2, close_()) + .InSequence(sq2) + .WillOnce(Return(ByMove(Filesystem::resultSuccess(true)))); } +} // namespace } // namespace AccessLog } // namespace Envoy diff --git a/test/common/buffer/BUILD b/test/common/buffer/BUILD index 3e46f1b317dc9..1809fc522fb83 100644 --- a/test/common/buffer/BUILD +++ b/test/common/buffer/BUILD @@ -2,10 +2,12 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_fuzz_test", "envoy_cc_test", "envoy_cc_test_binary", "envoy_cc_test_library", "envoy_package", + "envoy_proto_library", ) envoy_package() @@ -19,6 +21,31 @@ envoy_cc_test_library( ], ) +envoy_proto_library( + name = "buffer_fuzz_proto", + srcs = ["buffer_fuzz.proto"], +) + +envoy_cc_test_library( + name = "buffer_fuzz_lib", + srcs = ["buffer_fuzz.cc"], + hdrs = ["buffer_fuzz.h"], + deps = [ + ":buffer_fuzz_proto_cc", + "//source/common/buffer:buffer_lib", + "//source/common/common:stack_array", + "//source/common/memory:stats_lib", + "//source/common/network:io_socket_handle_lib", + ], +) + +envoy_cc_fuzz_test( + name = "buffer_fuzz_test", + srcs = ["buffer_fuzz_test.cc"], + corpus = "buffer_corpus", + deps = [":buffer_fuzz_lib"], +) + envoy_cc_test( name = "buffer_test", srcs = ["buffer_test.cc"], @@ -30,12 +57,22 @@ envoy_cc_test( ], ) +envoy_cc_fuzz_test( + name = "new_buffer_fuzz_test", + srcs = ["new_buffer_fuzz_test.cc"], + corpus = "buffer_corpus", + deps = [":buffer_fuzz_lib"], +) + envoy_cc_test( name = "owned_impl_test", srcs = ["owned_impl_test.cc"], deps = [ + ":utility_lib", "//source/common/buffer:buffer_lib", + "//source/common/network:io_socket_handle_lib", "//test/mocks/api:api_mocks", + "//test/test_common:logging_lib", "//test/test_common:threadsafe_singleton_injector_lib", ], ) @@ -44,8 +81,10 @@ envoy_cc_test( name = "watermark_buffer_test", srcs = ["watermark_buffer_test.cc"], deps = [ + ":utility_lib", "//source/common/buffer:buffer_lib", "//source/common/buffer:watermark_buffer_lib", + "//source/common/network:io_socket_handle_lib", ], ) @@ -53,6 +92,7 @@ envoy_cc_test( name = "zero_copy_input_stream_test", srcs = ["zero_copy_input_stream_test.cc"], deps = [ + ":utility_lib", "//source/common/buffer:zero_copy_input_stream_lib", ], ) diff --git a/test/common/buffer/buffer_corpus/basic b/test/common/buffer/buffer_corpus/basic new file mode 100644 index 0000000000000..9fd31255e2e63 --- /dev/null +++ b/test/common/buffer/buffer_corpus/basic @@ -0,0 +1,54 @@ +actions { + add_buffer_fragment: 1 +} +actions { + add_string: 3 +} +actions { + target_index: 1 + add_buffer: 0 +} +actions { + prepend_string: 5 +} +actions { + target_index: 1 + prepend_buffer: 0 +} +actions { + reserve_commit { + reserve_length: 2048 + commit_length: 765 + } +} +actions { + copy_out { + start: 7 + length: 200 + } +} +actions { + drain: 98 +} +actions { + linearize: 45 +} +actions { + target_index: 1 + move { + source_index: 0 + length: 23 + } +} +actions { + target_index: 0 + move { + source_index: 1 + } +} +actions { + read: 2789 +} +actions { + write: {} +} diff --git a/test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5080353465696256 b/test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5080353465696256 new file mode 100644 index 0000000000000..108599bf81f45 --- /dev/null +++ b/test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5080353465696256 @@ -0,0 +1,137 @@ +actions { } actions { +} actions { add_string: 8 +} +actions { + target_index: 1 + read: 256 +} +actions { +} +actions { + drain: 0 +} +actions { +} +actions { + target_index: 1 + add_buffer: 196608 +} +actions { + target_index: 1 + add_buffer: 1 +} +actions { +} +actions { + target_index: 98 +} +actions { +} +actions { + target_index: 1 + add_buffer: 704643070 +} +actions { + add_buffer: 4294967294 +} +actions { + drain: 0 +} +actions { + prepend_buffer: 0 +} +actions { +} +actions { + target_index: 2789 +} +actions { + target_index: 2789 +} +actions { + prepend_string: 1600414817 +} +actions { + target_index: 1 +} +actions { + add_string: 2789 +} +actions { + target_index: 1 + add_buffer: 1 +} +actions { + target_index: 2789 +} +actions { + target_index: 1 + add_buffer: 196608 +} +actions { +} +actions { + target_index: 2789 +} +actions { + target_index: 2789 + add_buffer: 3841982464 +} +actions { + drain: 0 +} +actions { + target_index: 1 + add_buffer: 0 +} +actions { + add_string: 0 +} +actions { +} +actions { + target_index: 1 +} +actions { + target_index: 1 + add_buffer: 4294967294 +} +actions { + add_buffer: 1 +} +actions { + target_index: 2789 + add_buffer: 1 +} +actions { + target_index: 2789 + add_buffer: 1 +} +actions { + target_index: 1 + add_buffer: 0 +} +actions { + target_index: 1 +} +actions { + target_index: 229 +} +actions { +} +actions { +} +actions { + target_index: 2789 + add_buffer: 1 +} +actions { + target_index: 1 + add_buffer: 196608 +} +actions { +} +actions { + target_index: 2789 + add_buffer: 1 +} diff --git a/test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5644734729551872 b/test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5644734729551872 new file mode 100644 index 0000000000000..85a74bf34145c --- /dev/null +++ b/test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5644734729551872 @@ -0,0 +1,77 @@ +actions { } actions { +} actions { } actions { write { } } actions { } actions { } actions { } actions { +} +actions { +} +actions { +} +actions { +} +actions { +} +actions { +} +actions { +} +actions { +} +actions { +} +actions { +} +actions { + add_string: 2097152 +} +actions { + target_index: 1 + add_buffer: 0 +} +actions { + target_index: 1 +} +actions { + add_buffer: 1 +} +actions { +} +actions { + target_index: 1 + add_buffer: 0 +} +actions { + add_buffer: 1 +} +actions { + add_buffer: 268435456 +} +actions { + add_buffer: 1 +} +actions { + target_index: 1 + add_buffer: 0 +} +actions { + target_index: 1 + add_buffer: 0 +} +actions { + add_buffer: 4 +} +actions { + add_buffer: 1 +} +actions { + add_buffer: 1 +} +actions { + add_buffer: 1 +} +actions { + target_index: 1 + add_buffer: 0 +} +actions { + target_index: 1 + add_buffer: 0 +} diff --git a/test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5654939127250944 b/test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5654939127250944 new file mode 100644 index 0000000000000..6a0f316806248 --- /dev/null +++ b/test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5654939127250944 @@ -0,0 +1,78 @@ +actions { + add_string: 11927552 +} +actions { + target_index: 4 + add_buffer: 0 +} +actions { + prepend_string: 1869177088 +} +actions { +} +actions { + move { + source_index: 4294967293 + } +} +actions { +} +actions { + linearize: 8388608 +} +actions { + prepend_string: 1869177088 +} +actions { + linearize: 1 +} +actions { + copy_out { + length: 4194304 + } +} +actions { + drain: 1 +} +actions { +} +actions { + add_string: 65534 +} +actions { + target_index: 1769235297 +} +actions { + add_string: 11927552 +} +actions { + target_index: 3053453312 + add_string: 11927552 +} +actions { +} +actions { + target_index: 11927552 + drain: 1 +} +actions { + target_index: 1769235297 +} +actions { + write { + } +} +actions { + target_index: 1769235297 +} +actions { + linearize: 1 +} +actions { + add_buffer_fragment: 1 +} +actions { + copy_out { + length: 4194304 + } +} diff --git a/test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5668091688648704 b/test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5668091688648704 new file mode 100644 index 0000000000000..2c1dd0c5716df --- /dev/null +++ b/test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5668091688648704 @@ -0,0 +1 @@ +actions { target_index: 1 read: 1 } actions { add_buffer_fragment: 1 } actions { prepend_string: 0 } actions { add_string: 4 } actions { target_index: 1 move { length: 1 } } actions { target_index: 1 add_buffer_fragment: 1 } diff --git a/test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5716850116132864 b/test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5716850116132864 new file mode 100644 index 0000000000000..250ea3ecc9b52 --- /dev/null +++ b/test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5716850116132864 @@ -0,0 +1,32 @@ +actions { + add_string: 6 +} +actions { + reserve_commit { + reserve_length: 971 + commit_length: 1 + } +} +actions { + target_index: 1 + add_string: 2 +} +actions { + target_index: 1 + prepend_string: 3 +} +actions { + target_index: 1 + prepend_buffer: 0 +} +actions { + move { + source_index: 1 + } +} +actions { + target_index: 1 + move { + length: 11 + } +} diff --git a/test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5744501719564288 b/test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5744501719564288 new file mode 100644 index 0000000000000..8b14a84b8a43c --- /dev/null +++ b/test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5744501719564288 @@ -0,0 +1 @@ +actions { } actions { read: 1 } actions { read: 997 } actions { } actions { } actions { } actions { add_buffer_fragment: 4 } actions { read: 1 } actions { read: 997 } actions { } actions { add_buffer_fragment: 1 } actions { target_index: 1 read: 1 } actions { read: 4 } actions { read: 997 } actions { add_buffer_fragment: 1 } actions { prepend_buffer: 4 } actions { prepend_string: 4 } actions { read: 1 } actions { read: 997 } actions { add_buffer_fragment: 4 } actions { read: 1 } actions { } actions { read: 2789 } actions { } actions { read: 536 } actions { write { } } actions { } diff --git a/test/common/buffer/buffer_corpus/crash-d60939b6186fa6186e0b574ac67aa6df8f1081cd b/test/common/buffer/buffer_corpus/crash-d60939b6186fa6186e0b574ac67aa6df8f1081cd new file mode 100644 index 0000000000000..6c924acd31c07 --- /dev/null +++ b/test/common/buffer/buffer_corpus/crash-d60939b6186fa6186e0b574ac67aa6df8f1081cd @@ -0,0 +1,53 @@ +actions { + prepend_string: 1024 +} +actions { +} +actions { + add_buffer_fragment: 1 +} +actions { + read: 2789 +} +actions { + read: 0 +} +actions { + linearize: 45 +} +actions { + reserve_commit { + reserve_length: 14 + commit_length: 1048576 + } +} +actions { + move { + source_index: 1 + } +} +actions { + reserve_commit { + reserve_length: 2048 + commit_length: 1048576 + } +} +actions { + read: 0 +} +actions { + drain: 1024 +} +actions { + move { + length: 23 + } +} +actions { + linearize: 45 +} +actions { + copy_out { + length: 5 + } +} diff --git a/test/common/buffer/buffer_corpus/crash-ed103900aec1285149aafc05102a541d9ec51363 b/test/common/buffer/buffer_corpus/crash-ed103900aec1285149aafc05102a541d9ec51363 new file mode 100644 index 0000000000000..a25b54ec0ca3e --- /dev/null +++ b/test/common/buffer/buffer_corpus/crash-ed103900aec1285149aafc05102a541d9ec51363 @@ -0,0 +1,49 @@ +actions { + add_buffer_fragment: 1 +} +actions { + add_string: 3 +} +actions { + target_index: 1 + add_buffer: 0 +} +actions { + prepend_string: 5 +} +actions { + target_index: 1 + prepend_buffer: 0 +} +actions { +} +actions { + copy_out { + start: 7 + length: 200 + } +} +actions { + drain: 98 +} +actions { + linearize: 45 +} +actions { + target_index: 1 + move { + length: 23 + } +} +actions { + move { + source_index: 1 + } +} +actions { + read: 2789 +} +actions { + write { + } +} diff --git a/test/common/buffer/buffer_fuzz.cc b/test/common/buffer/buffer_fuzz.cc new file mode 100644 index 0000000000000..0ac318f664831 --- /dev/null +++ b/test/common/buffer/buffer_fuzz.cc @@ -0,0 +1,422 @@ +#include "test/common/buffer/buffer_fuzz.h" + +#include +#include + +#include "common/buffer/buffer_impl.h" +#include "common/common/assert.h" +#include "common/common/logger.h" +#include "common/common/stack_array.h" +#include "common/memory/stats.h" +#include "common/network/io_socket_handle_impl.h" + +#include "gtest/gtest.h" + +// Strong assertion that applies across all compilation modes and doesn't rely +// on gtest, which only provides soft fails that don't trip oss-fuzz failures. +#define FUZZ_ASSERT(x) RELEASE_ASSERT(x, "") + +namespace Envoy { + +namespace { + +// The number of buffers tracked. Each buffer fuzzer action references one or +// more of these. We don't need a ton of buffers to capture the range of +// possible behaviors, at least two to properly model move operations, let's +// assume only 3 for now. +constexpr uint32_t BufferCount = 3; + +// These data are exogenous to the buffer, we don't need to worry about their +// deallocation, just keep them around until the fuzz run is over. +struct Context { + std::vector> fragments_; + std::vector> strings_; +}; + +// Bound the maximum allocation size. +constexpr uint32_t MaxAllocation = 2 * 1024 * 1024; + +uint32_t clampSize(uint32_t size, uint32_t max_alloc) { + return std::min(size, std::min(MaxAllocation, max_alloc)); +} + +void releaseFragmentAllocation(const void* p, size_t, const Buffer::BufferFragmentImpl*) { + ::free(const_cast(p)); +} + +// Really simple string implementation of Buffer. +class StringBuffer : public Buffer::Instance { +public: + void add(const void* data, uint64_t size) override { + data_ += std::string(std::string(static_cast(data), size)); + } + + void addBufferFragment(Buffer::BufferFragment& fragment) override { + add(fragment.data(), fragment.size()); + fragment.done(); + } + + void add(absl::string_view data) override { add(data.data(), data.size()); } + + void add(const Buffer::Instance& data) override { + const StringBuffer& src = dynamic_cast(data); + data_ += src.data_; + } + + void prepend(absl::string_view data) override { data_ = std::string(data) + data_; } + + void prepend(Instance& data) override { + StringBuffer& src = dynamic_cast(data); + data_ = src.data_ + data_; + src.data_.clear(); + } + + void commit(Buffer::RawSlice* iovecs, uint64_t num_iovecs) override { + FUZZ_ASSERT(num_iovecs == 1); + FUZZ_ASSERT(tmp_buf_.get() == iovecs[0].mem_); + data_ += std::string(tmp_buf_.get(), iovecs[0].len_); + } + + void copyOut(size_t start, uint64_t size, void* data) const override { + ::memcpy(data, data_.data() + start, size); + } + + void drain(uint64_t size) override { data_ = data_.substr(size); } + + uint64_t getRawSlices(Buffer::RawSlice* out, uint64_t out_size) const override { + FUZZ_ASSERT(out_size > 0); + // Sketchy, but probably will work for test purposes. + out->mem_ = const_cast(data_.data()); + out->len_ = data_.size(); + return 1; + } + + uint64_t length() const override { return data_.size(); } + + void* linearize(uint32_t /*size*/) override { + // Sketchy, but probably will work for test purposes. + return const_cast(data_.data()); + } + + void move(Buffer::Instance& rhs) override { move(rhs, rhs.length()); } + + void move(Buffer::Instance& rhs, uint64_t length) override { + StringBuffer& src = dynamic_cast(rhs); + data_ += src.data_.substr(0, length); + src.data_ = src.data_.substr(length); + } + + Api::IoCallUint64Result read(Network::IoHandle& io_handle, uint64_t max_length) override { + FUZZ_ASSERT(max_length <= MaxAllocation); + Buffer::RawSlice slice{tmp_buf_.get(), MaxAllocation}; + Api::IoCallUint64Result result = io_handle.readv(max_length, &slice, 1); + FUZZ_ASSERT(result.ok() && result.rc_ > 0); + data_ += std::string(tmp_buf_.get(), result.rc_); + return result; + } + + uint64_t reserve(uint64_t length, Buffer::RawSlice* iovecs, uint64_t num_iovecs) override { + FUZZ_ASSERT(num_iovecs > 0); + FUZZ_ASSERT(length <= MaxAllocation); + iovecs[0].mem_ = tmp_buf_.get(); + iovecs[0].len_ = length; + return 1; + } + + ssize_t search(const void* data, uint64_t size, size_t start) const override { + return data_.find(std::string(static_cast(data), size), start); + } + + std::string toString() const override { return data_; } + + Api::IoCallUint64Result write(Network::IoHandle& io_handle) override { + const Buffer::RawSlice slice{const_cast(data_.data()), data_.size()}; + Api::IoCallUint64Result result = io_handle.writev(&slice, 1); + FUZZ_ASSERT(result.ok()); + data_ = data_.substr(result.rc_); + return result; + } + + std::string data_; + std::unique_ptr tmp_buf_{new char[MaxAllocation]}; +}; + +typedef std::vector> BufferList; + +// Process a single buffer operation. +uint32_t bufferAction(Context& ctxt, char insert_value, uint32_t max_alloc, BufferList& buffers, + const test::common::buffer::Action& action) { + const uint32_t target_index = action.target_index() % BufferCount; + Buffer::Instance& target_buffer = *buffers[target_index]; + uint32_t allocated = 0; + + switch (action.action_selector_case()) { + case test::common::buffer::Action::kAddBufferFragment: { + const uint32_t size = clampSize(action.add_buffer_fragment(), max_alloc); + allocated += size; + void* p = ::malloc(size); + FUZZ_ASSERT(p != nullptr); + ::memset(p, insert_value, size); + auto fragment = + std::make_unique(p, size, releaseFragmentAllocation); + ctxt.fragments_.emplace_back(std::move(fragment)); + const uint32_t previous_length = target_buffer.length(); + const std::string new_value{static_cast(p), size}; + target_buffer.addBufferFragment(*ctxt.fragments_.back()); + FUZZ_ASSERT(previous_length == target_buffer.search(new_value.data(), size, previous_length)); + break; + } + case test::common::buffer::Action::kAddString: { + const uint32_t size = clampSize(action.add_string(), max_alloc); + allocated += size; + auto string = std::make_unique(size, insert_value); + ctxt.strings_.emplace_back(std::move(string)); + const uint32_t previous_length = target_buffer.length(); + target_buffer.add(absl::string_view(*ctxt.strings_.back())); + FUZZ_ASSERT(previous_length == + target_buffer.search(ctxt.strings_.back()->data(), size, previous_length)); + break; + } + case test::common::buffer::Action::kAddBuffer: { + const uint32_t source_index = action.add_buffer() % BufferCount; + if (target_index == source_index) { + break; + } + Buffer::Instance& source_buffer = *buffers[source_index]; + const std::string source_contents = source_buffer.toString(); + const uint32_t previous_length = target_buffer.length(); + target_buffer.add(source_buffer); + FUZZ_ASSERT(previous_length == target_buffer.search(source_contents.data(), + source_contents.size(), previous_length)); + break; + } + case test::common::buffer::Action::kPrependString: { + const uint32_t size = clampSize(action.prepend_string(), max_alloc); + allocated += size; + auto string = std::make_unique(size, insert_value); + ctxt.strings_.emplace_back(std::move(string)); + target_buffer.prepend(absl::string_view(*ctxt.strings_.back())); + FUZZ_ASSERT(target_buffer.search(ctxt.strings_.back()->data(), size, 0) == 0); + break; + } + case test::common::buffer::Action::kPrependBuffer: { + const uint32_t source_index = action.prepend_buffer() % BufferCount; + if (target_index == source_index) { + break; + } + Buffer::Instance& source_buffer = *buffers[source_index]; + const std::string source_contents = source_buffer.toString(); + target_buffer.prepend(source_buffer); + FUZZ_ASSERT(target_buffer.search(source_contents.data(), source_contents.size(), 0) == 0); + break; + } + case test::common::buffer::Action::kReserveCommit: { + const uint32_t previous_length = target_buffer.length(); + const uint32_t reserve_length = clampSize(action.reserve_commit().reserve_length(), max_alloc); + allocated += reserve_length; + if (reserve_length == 0) { + break; + } + constexpr uint32_t reserve_slices = 16; + Buffer::RawSlice slices[reserve_slices]; + const uint32_t allocated_slices = target_buffer.reserve(reserve_length, slices, reserve_slices); + uint32_t allocated_length = 0; + for (uint32_t i = 0; i < allocated_slices; ++i) { + ::memset(slices[i].mem_, insert_value, slices[i].len_); + allocated_length += slices[i].len_; + } + FUZZ_ASSERT(reserve_length <= allocated_length); + const uint32_t target_length = + std::min(reserve_length, action.reserve_commit().commit_length()); + uint32_t shrink_length = allocated_length; + int32_t shrink_slice = allocated_slices - 1; + while (shrink_length > target_length) { + FUZZ_ASSERT(shrink_slice >= 0); + const uint32_t available = slices[shrink_slice].len_; + const uint32_t remainder = shrink_length - target_length; + if (available >= remainder) { + slices[shrink_slice].len_ -= remainder; + break; + } + shrink_length -= available; + slices[shrink_slice--].len_ = 0; + } + target_buffer.commit(slices, allocated_slices); + FUZZ_ASSERT(previous_length + target_length == target_buffer.length()); + break; + } + case test::common::buffer::Action::kCopyOut: { + const uint32_t start = + std::min(action.copy_out().start(), static_cast(target_buffer.length())); + uint8_t copy_buffer[2 * 1024 * 1024]; + const uint32_t length = + std::min(static_cast(target_buffer.length() - start), + std::min(action.copy_out().length(), static_cast(sizeof(copy_buffer)))); + target_buffer.copyOut(start, length, copy_buffer); + const std::string contents = target_buffer.toString(); + FUZZ_ASSERT(::memcmp(copy_buffer, contents.data() + start, length) == 0); + break; + } + case test::common::buffer::Action::kDrain: { + const uint32_t previous_length = target_buffer.length(); + const uint32_t drain_length = + std::min(static_cast(target_buffer.length()), action.drain()); + target_buffer.drain(drain_length); + FUZZ_ASSERT(previous_length - drain_length == target_buffer.length()); + break; + } + case test::common::buffer::Action::kLinearize: { + const uint32_t linearize_size = + std::min(static_cast(target_buffer.length()), action.linearize()); + target_buffer.linearize(linearize_size); + Buffer::RawSlice slices[1]; + const uint64_t slices_used = target_buffer.getRawSlices(slices, 1); + if (linearize_size > 0) { + FUZZ_ASSERT(slices_used >= 1); + FUZZ_ASSERT(slices[0].len_ >= linearize_size); + } + break; + } + case test::common::buffer::Action::kMove: { + const uint32_t source_index = action.move().source_index() % BufferCount; + if (target_index == source_index) { + break; + } + Buffer::Instance& source_buffer = *buffers[source_index]; + if (action.move().length() == 0) { + target_buffer.move(source_buffer); + } else { + target_buffer.move(source_buffer, std::min(static_cast(source_buffer.length()), + action.move().length())); + } + break; + } + case test::common::buffer::Action::kRead: { + const uint32_t max_length = clampSize(action.read(), max_alloc); + allocated += max_length; + if (max_length == 0) { + break; + } + int pipe_fds[2] = {0, 0}; + FUZZ_ASSERT(::pipe(pipe_fds) == 0); + Network::IoSocketHandleImpl io_handle(pipe_fds[0]); + FUZZ_ASSERT(::fcntl(pipe_fds[0], F_SETFL, O_NONBLOCK) == 0); + FUZZ_ASSERT(::fcntl(pipe_fds[1], F_SETFL, O_NONBLOCK) == 0); + std::string data(max_length, insert_value); + const ssize_t rc = ::write(pipe_fds[1], data.data(), max_length); + FUZZ_ASSERT(rc > 0); + const uint32_t previous_length = target_buffer.length(); + Api::IoCallUint64Result result = target_buffer.read(io_handle, max_length); + FUZZ_ASSERT(result.rc_ == static_cast(rc)); + FUZZ_ASSERT(::close(pipe_fds[1]) == 0); + FUZZ_ASSERT(previous_length == target_buffer.search(data.data(), rc, previous_length)); + break; + } + case test::common::buffer::Action::kWrite: { + int pipe_fds[2] = {0, 0}; + FUZZ_ASSERT(::pipe(pipe_fds) == 0); + Network::IoSocketHandleImpl io_handle(pipe_fds[1]); + FUZZ_ASSERT(::fcntl(pipe_fds[0], F_SETFL, O_NONBLOCK) == 0); + FUZZ_ASSERT(::fcntl(pipe_fds[1], F_SETFL, O_NONBLOCK) == 0); + uint64_t rc; + do { + const bool empty = target_buffer.length() == 0; + const std::string previous_data = target_buffer.toString(); + const auto result = target_buffer.write(io_handle); + FUZZ_ASSERT(result.ok()); + rc = result.rc_; + ENVOY_LOG_MISC(trace, "Write rc: {} errno: {}", rc, + result.err_ != nullptr ? result.err_->getErrorDetails() : "-"); + if (empty) { + FUZZ_ASSERT(rc == 0); + } else { + auto buf = std::make_unique(rc); + FUZZ_ASSERT(static_cast(::read(pipe_fds[0], buf.get(), rc)) == rc); + FUZZ_ASSERT(::memcmp(buf.get(), previous_data.data(), rc) == 0); + } + } while (rc > 0); + FUZZ_ASSERT(::close(pipe_fds[0]) == 0); + break; + } + default: + // Maybe nothing is set? + break; + } + + return allocated; +} + +} // namespace + +void BufferFuzz::bufferFuzz(const test::common::buffer::BufferFuzzTestCase& input, bool old_impl) { + ENVOY_LOG_MISC(trace, "Using {} buffer implementation", old_impl ? "old" : "new"); + Buffer::OwnedImpl::useOldImpl(old_impl); + Context ctxt; + // Fuzzed buffers. + BufferList buffers; + // Shadow buffers based on StringBuffer. + BufferList linear_buffers; + for (uint32_t i = 0; i < BufferCount; ++i) { + buffers.emplace_back(new Buffer::OwnedImpl()); + linear_buffers.emplace_back(new StringBuffer()); + } + + const uint64_t initial_allocated_bytes = Memory::Stats::totalCurrentlyAllocated(); + + // Soft bound on the available memory for allocation to avoid OOMs and + // timeouts. + uint32_t available_alloc = 2 * MaxAllocation; + constexpr auto max_actions = 1024; + for (int i = 0; i < std::min(max_actions, input.actions().size()); ++i) { + const char insert_value = 'a' + i % 26; + const auto& action = input.actions(i); + const uint64_t current_allocated_bytes = Memory::Stats::totalCurrentlyAllocated(); + ENVOY_LOG_MISC(debug, "Action {}", action.DebugString()); + const uint32_t allocated = bufferAction(ctxt, insert_value, available_alloc, buffers, action); + const uint32_t linear_allocated = + bufferAction(ctxt, insert_value, available_alloc, linear_buffers, action); + FUZZ_ASSERT(allocated == linear_allocated); + FUZZ_ASSERT(allocated <= available_alloc); + available_alloc -= allocated; + // When tracing, dump everything. + for (uint32_t j = 0; j < BufferCount; ++j) { + ENVOY_LOG_MISC(trace, "Buffer at index {}", j); + ENVOY_LOG_MISC(trace, "B: {}", buffers[j]->toString()); + ENVOY_LOG_MISC(trace, "L: {}", linear_buffers[j]->toString()); + } + // Verification pass, only non-mutating methods for buffers. + for (uint32_t j = 0; j < BufferCount; ++j) { + if (buffers[j]->toString() != linear_buffers[j]->toString()) { + ENVOY_LOG_MISC(debug, "Mismatched buffers at index {}", j); + ENVOY_LOG_MISC(debug, "B: {}", buffers[j]->toString()); + ENVOY_LOG_MISC(debug, "L: {}", linear_buffers[j]->toString()); + FUZZ_ASSERT(false); + } + FUZZ_ASSERT(buffers[j]->length() == linear_buffers[j]->length()); + constexpr uint32_t max_slices = 16; + Buffer::RawSlice slices[max_slices]; + buffers[j]->getRawSlices(slices, max_slices); + // This string should never appear (e.g. we don't synthesize _garbage as a + // pattern), verify that it's never found. + std::string garbage{"_garbage"}; + FUZZ_ASSERT(buffers[j]->search(garbage.data(), garbage.size(), 0) == -1); + } + ENVOY_LOG_MISC(debug, "[{} MB allocated total, {} MB since start]", + current_allocated_bytes / (1024.0 * 1024), + (current_allocated_bytes - initial_allocated_bytes) / (1024.0 * 1024)); + // We bail out if buffers get too big, otherwise we will OOM the sanitizer. + // We can't use Memory::Stats::totalCurrentlyAllocated() here as we don't + // have tcmalloc in ASAN builds, so just do a simple count. + uint64_t total_length = 0; + for (const auto& buf : buffers) { + total_length += buf->length(); + } + if (total_length > 4 * MaxAllocation) { + ENVOY_LOG_MISC(debug, "Terminating early with total buffer length {} to avoid OOM", + total_length); + break; + } + } +} + +} // namespace Envoy diff --git a/test/common/buffer/buffer_fuzz.h b/test/common/buffer/buffer_fuzz.h new file mode 100644 index 0000000000000..108ac4cab45d7 --- /dev/null +++ b/test/common/buffer/buffer_fuzz.h @@ -0,0 +1,12 @@ +#pragma once + +#include "test/common/buffer/buffer_fuzz.pb.h" + +namespace Envoy { + +class BufferFuzz { +public: + static void bufferFuzz(const test::common::buffer::BufferFuzzTestCase& input, bool old_impl); +}; + +} // namespace Envoy diff --git a/test/common/buffer/buffer_fuzz.proto b/test/common/buffer/buffer_fuzz.proto new file mode 100644 index 0000000000000..bb88022b59583 --- /dev/null +++ b/test/common/buffer/buffer_fuzz.proto @@ -0,0 +1,42 @@ +syntax = "proto3"; + +package test.common.buffer; + +import "google/protobuf/empty.proto"; + +message ReserveCommit { + uint32 reserve_length = 1; + uint32 commit_length = 2; +} + +message CopyOut { + uint32 start = 1; + uint32 length = 2; +} + +message Move { + uint32 source_index = 1; + uint32 length = 2; +} + +message Action { + uint32 target_index = 1; + oneof action_selector { + uint32 add_buffer_fragment = 2; + uint32 add_string = 3; + uint32 add_buffer = 4; + uint32 prepend_string = 5; + uint32 prepend_buffer = 6; + ReserveCommit reserve_commit = 7; + CopyOut copy_out = 8; + uint32 drain = 9; + uint32 linearize = 10; + Move move = 11; + uint32 read = 12; + google.protobuf.Empty write = 13; + } +} + +message BufferFuzzTestCase { + repeated Action actions = 1; +} diff --git a/test/common/buffer/buffer_fuzz_test.cc b/test/common/buffer/buffer_fuzz_test.cc new file mode 100644 index 0000000000000..fa23082b40965 --- /dev/null +++ b/test/common/buffer/buffer_fuzz_test.cc @@ -0,0 +1,12 @@ +#include "test/common/buffer/buffer_fuzz.h" +#include "test/common/buffer/buffer_fuzz.pb.h" +#include "test/fuzz/fuzz_runner.h" + +namespace Envoy { + +// Fuzz the old owned buffer implementation. +DEFINE_PROTO_FUZZER(const test::common::buffer::BufferFuzzTestCase& input) { + Envoy::BufferFuzz::bufferFuzz(input, true); +} + +} // namespace Envoy diff --git a/test/common/buffer/buffer_test.cc b/test/common/buffer/buffer_test.cc index f9574b5ecc66f..5df98389460ab 100644 --- a/test/common/buffer/buffer_test.cc +++ b/test/common/buffer/buffer_test.cc @@ -6,16 +6,277 @@ #include "test/common/buffer/utility.h" #include "test/test_common/printers.h" -#include "test/test_common/test_base.h" #include "test/test_common/utility.h" +#include "gtest/gtest.h" + namespace Envoy { namespace Buffer { namespace { -TEST(BufferHelperTest, PeekI8) { +class DummySlice : public Slice { +public: + DummySlice(const std::string& data, const std::function& deletion_callback) + : Slice(0, data.size(), data.size()), deletion_callback_(deletion_callback) { + base_ = reinterpret_cast(const_cast(data.c_str())); + } + ~DummySlice() override { + if (deletion_callback_ != nullptr) { + deletion_callback_(); + } + } + +private: + const std::function deletion_callback_; +}; + +class OwnedSliceTest : public testing::Test { +protected: + static void expectReservationSuccess(const Slice::Reservation& reservation, const Slice& slice, + uint64_t reservation_size) { + EXPECT_NE(nullptr, reservation.mem_); + EXPECT_EQ(static_cast(slice.data()) + slice.dataSize(), reservation.mem_); + EXPECT_EQ(reservation_size, reservation.len_); + EXPECT_EQ(0, slice.reservableSize()); + } + + static void expectReservationFailure(const Slice::Reservation& reservation, const Slice& slice, + uint64_t reservable_size) { + EXPECT_EQ(nullptr, reservation.mem_); + EXPECT_EQ(0, reservation.mem_); + EXPECT_EQ(reservable_size, slice.reservableSize()); + } + + static void expectCommitSuccess(bool committed, const Slice& slice, uint64_t data_size, + uint64_t reservable_size) { + EXPECT_TRUE(committed); + EXPECT_EQ(data_size, slice.dataSize()); + EXPECT_EQ(reservable_size, slice.reservableSize()); + } +}; + +bool sliceMatches(const SlicePtr& slice, const std::string& expected) { + return slice != nullptr && slice->dataSize() == expected.size() && + memcmp(slice->data(), expected.data(), expected.size()) == 0; +} + +TEST_F(OwnedSliceTest, Create) { + static constexpr uint64_t Sizes[] = {0, 1, 64, 4096 - sizeof(OwnedSlice), 65535}; + for (const auto size : Sizes) { + auto slice = OwnedSlice::create(size); + EXPECT_NE(nullptr, slice->data()); + EXPECT_EQ(0, slice->dataSize()); + EXPECT_LE(size, slice->reservableSize()); + } +} + +TEST_F(OwnedSliceTest, ReserveCommit) { + auto slice = OwnedSlice::create(100); + const uint64_t initial_capacity = slice->reservableSize(); + EXPECT_LE(100, initial_capacity); + + { + // Verify that a zero-byte reservation is rejected. + Slice::Reservation reservation = slice->reserve(0); + expectReservationFailure(reservation, *slice, initial_capacity); + } + + { + // Create a reservation smaller than the reservable size. + // It should reserve the exact number of bytes requested. + Slice::Reservation reservation = slice->reserve(10); + expectReservationSuccess(reservation, *slice, 10); + + // Request a second reservation while the first reservation remains uncommitted. + // This should fail. + EXPECT_EQ(0, slice->reservableSize()); + Slice::Reservation reservation2 = slice->reserve(1); + expectReservationFailure(reservation2, *slice, 0); + + // Commit the entire reserved size. + bool committed = slice->commit(reservation); + expectCommitSuccess(committed, *slice, 10, initial_capacity - 10); + + // Verify that a reservation can only be committed once. + EXPECT_FALSE(slice->commit(reservation)); + } + + { + // Request another reservation, and commit only part of it. + Slice::Reservation reservation = slice->reserve(10); + expectReservationSuccess(reservation, *slice, 10); + reservation.len_ = 5; + bool committed = slice->commit(reservation); + expectCommitSuccess(committed, *slice, 15, initial_capacity - 15); + } + + { + // Request another reservation, and commit only part of it. + Slice::Reservation reservation = slice->reserve(10); + expectReservationSuccess(reservation, *slice, 10); + reservation.len_ = 5; + bool committed = slice->commit(reservation); + expectCommitSuccess(committed, *slice, 20, initial_capacity - 20); + } + + { + // Request another reservation, and commit zero bytes of it. + // This should clear the reservation. + Slice::Reservation reservation = slice->reserve(10); + expectReservationSuccess(reservation, *slice, 10); + reservation.len_ = 0; + bool committed = slice->commit(reservation); + expectCommitSuccess(committed, *slice, 20, initial_capacity - 20); + } + + { + // Try to commit a reservation from the wrong slice, and verify that the slice rejects it. + Slice::Reservation reservation = slice->reserve(10); + expectReservationSuccess(reservation, *slice, 10); + auto other_slice = OwnedSlice::create(100); + Slice::Reservation other_reservation = other_slice->reserve(10); + expectReservationSuccess(other_reservation, *other_slice, 10); + EXPECT_FALSE(slice->commit(other_reservation)); + EXPECT_FALSE(other_slice->commit(reservation)); + + // Commit the reservations to the proper slices to clear them. + reservation.len_ = 0; + bool committed = slice->commit(reservation); + EXPECT_TRUE(committed); + other_reservation.len_ = 0; + committed = other_slice->commit(other_reservation); + EXPECT_TRUE(committed); + } + + { + // Try to reserve more space than is available in the slice. + uint64_t reservable_size = slice->reservableSize(); + Slice::Reservation reservation = slice->reserve(reservable_size + 1); + expectReservationSuccess(reservation, *slice, reservable_size); + bool committed = slice->commit(reservation); + expectCommitSuccess(committed, *slice, initial_capacity, 0); + } + + { + // Now that the view has no more reservable space, verify that it rejects + // subsequent reservation requests. + Slice::Reservation reservation = slice->reserve(1); + expectReservationFailure(reservation, *slice, 0); + } +} + +TEST_F(OwnedSliceTest, Drain) { + // Create a slice and commit all the available space. + auto slice = OwnedSlice::create(100); + Slice::Reservation reservation = slice->reserve(slice->reservableSize()); + bool committed = slice->commit(reservation); + EXPECT_TRUE(committed); + EXPECT_EQ(0, slice->reservableSize()); + + // Drain some data from the front of the view and verify that the data start moves accordingly. + const uint8_t* original_data = static_cast(slice->data()); + uint64_t original_size = slice->dataSize(); + slice->drain(0); + EXPECT_EQ(original_data, slice->data()); + EXPECT_EQ(original_size, slice->dataSize()); + slice->drain(10); + EXPECT_EQ(original_data + 10, slice->data()); + EXPECT_EQ(original_size - 10, slice->dataSize()); + slice->drain(50); + EXPECT_EQ(original_data + 60, slice->data()); + EXPECT_EQ(original_size - 60, slice->dataSize()); + + // Drain all the remaining data. + slice->drain(slice->dataSize()); + EXPECT_EQ(0, slice->dataSize()); + EXPECT_EQ(original_size, slice->reservableSize()); +} + +TEST(UnownedSliceTest, CreateDelete) { + constexpr char input[] = "hello world"; + bool release_callback_called = false; + BufferFragmentImpl fragment( + input, sizeof(input) - 1, + [&release_callback_called](const void*, size_t, const BufferFragmentImpl*) { + release_callback_called = true; + }); + auto slice = std::make_unique(fragment); + EXPECT_EQ(11, slice->dataSize()); + EXPECT_EQ(0, slice->reservableSize()); + EXPECT_EQ(0, memcmp(slice->data(), input, slice->dataSize())); + EXPECT_FALSE(release_callback_called); + slice.reset(nullptr); + EXPECT_TRUE(release_callback_called); +} + +TEST(SliceDequeTest, CreateDelete) { + bool slice1_deleted = false; + bool slice2_deleted = false; + bool slice3_deleted = false; + + { + // Create an empty deque. + SliceDeque slices; + EXPECT_TRUE(slices.empty()); + EXPECT_EQ(0, slices.size()); + + // Append a view to the deque. + const std::string slice1 = "slice1"; + slices.emplace_back( + std::make_unique(slice1, [&slice1_deleted]() { slice1_deleted = true; })); + EXPECT_FALSE(slices.empty()); + ASSERT_EQ(1, slices.size()); + EXPECT_FALSE(slice1_deleted); + EXPECT_TRUE(sliceMatches(slices.front(), slice1)); + + // Append another view to the deque, and verify that both views are accessible. + const std::string slice2 = "slice2"; + slices.emplace_back( + std::make_unique(slice2, [&slice2_deleted]() { slice2_deleted = true; })); + EXPECT_FALSE(slices.empty()); + ASSERT_EQ(2, slices.size()); + EXPECT_FALSE(slice1_deleted); + EXPECT_FALSE(slice2_deleted); + EXPECT_TRUE(sliceMatches(slices.front(), slice1)); + EXPECT_TRUE(sliceMatches(slices.back(), slice2)); + + // Prepend a view to the deque, to exercise the ring buffer wraparound case. + const std::string slice3 = "slice3"; + slices.emplace_front( + std::make_unique(slice3, [&slice3_deleted]() { slice3_deleted = true; })); + EXPECT_FALSE(slices.empty()); + ASSERT_EQ(3, slices.size()); + EXPECT_FALSE(slice1_deleted); + EXPECT_FALSE(slice2_deleted); + EXPECT_FALSE(slice3_deleted); + EXPECT_TRUE(sliceMatches(slices.front(), slice3)); + EXPECT_TRUE(sliceMatches(slices.back(), slice2)); + + // Remove the first view from the deque, and verify that its slice is deleted. + slices.pop_front(); + EXPECT_FALSE(slices.empty()); + ASSERT_EQ(2, slices.size()); + EXPECT_FALSE(slice1_deleted); + EXPECT_FALSE(slice2_deleted); + EXPECT_TRUE(slice3_deleted); + EXPECT_TRUE(sliceMatches(slices.front(), slice1)); + EXPECT_TRUE(sliceMatches(slices.back(), slice2)); + } + + EXPECT_TRUE(slice1_deleted); + EXPECT_TRUE(slice2_deleted); + EXPECT_TRUE(slice3_deleted); +} + +class BufferHelperTest : public BufferImplementationParamTest {}; + +INSTANTIATE_TEST_CASE_P(BufferHelperTest, BufferHelperTest, + testing::ValuesIn({BufferImplementation::Old, BufferImplementation::New})); + +TEST_P(BufferHelperTest, PeekI8) { { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addSeq(buffer, {0, 1, 0xFE}); EXPECT_EQ(buffer.peekInt(), 0); EXPECT_EQ(buffer.peekInt(0), 0); @@ -26,19 +287,22 @@ TEST(BufferHelperTest, PeekI8) { { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); EXPECT_THROW_WITH_MESSAGE(buffer.peekInt(0), EnvoyException, "buffer underflow"); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeByte(0); EXPECT_THROW_WITH_MESSAGE(buffer.peekInt(1), EnvoyException, "buffer underflow"); } } -TEST(BufferHelperTest, PeekLEI16) { +TEST_P(BufferHelperTest, PeekLEI16) { { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF}); EXPECT_EQ(buffer.peekLEInt(), 0x0100); EXPECT_EQ(buffer.peekLEInt(0), 0x0100); @@ -50,19 +314,22 @@ TEST(BufferHelperTest, PeekLEI16) { { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt(0), EnvoyException, "buffer underflow"); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addRepeated(buffer, 2, 0); EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt(1), EnvoyException, "buffer underflow"); } } -TEST(BufferHelperTest, PeekLEI32) { +TEST_P(BufferHelperTest, PeekLEI32) { { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF, 0xFF, 0xFF}); EXPECT_EQ(buffer.peekLEInt(), 0x03020100); EXPECT_EQ(buffer.peekLEInt(0), 0x03020100); @@ -73,19 +340,22 @@ TEST(BufferHelperTest, PeekLEI32) { } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt(0), EnvoyException, "buffer underflow"); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addRepeated(buffer, 4, 0); EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt(1), EnvoyException, "buffer underflow"); } } -TEST(BufferHelperTest, PeekLEI64) { +TEST_P(BufferHelperTest, PeekLEI64) { { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 4, 5, 6, 7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}); EXPECT_EQ(buffer.peekLEInt(), 0x0706050403020100); EXPECT_EQ(buffer.peekLEInt(0), 0x0706050403020100); @@ -105,6 +375,7 @@ TEST(BufferHelperTest, PeekLEI64) { { // signed Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addSeq(buffer, {0xFF, 0xFF, 0xFF, 0x00, 0xFF, 0xFE, 0xFF, 0xFF}); EXPECT_EQ((buffer.peekLEInt()), -1); EXPECT_EQ((buffer.peekLEInt(2)), 255); // 0x00FF @@ -114,6 +385,7 @@ TEST(BufferHelperTest, PeekLEI64) { { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 4, 5, 6, 7, 0xFF, 0xFF}); EXPECT_THROW_WITH_MESSAGE( (buffer.peekLEInt(buffer.length() - sizeof(int64_t) + 1)), @@ -122,19 +394,22 @@ TEST(BufferHelperTest, PeekLEI64) { { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt(0), EnvoyException, "buffer underflow"); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addRepeated(buffer, 8, 0); EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt(1), EnvoyException, "buffer underflow"); } } -TEST(BufferHelperTest, PeekLEU16) { +TEST_P(BufferHelperTest, PeekLEU16) { { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF}); EXPECT_EQ(buffer.peekLEInt(), 0x0100); EXPECT_EQ(buffer.peekLEInt(0), 0x0100); @@ -145,19 +420,22 @@ TEST(BufferHelperTest, PeekLEU16) { } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt(0), EnvoyException, "buffer underflow"); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addRepeated(buffer, 2, 0); EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt(1), EnvoyException, "buffer underflow"); } } -TEST(BufferHelperTest, PeekLEU32) { +TEST_P(BufferHelperTest, PeekLEU32) { { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF, 0xFF, 0xFF}); EXPECT_EQ(buffer.peekLEInt(), 0x03020100); EXPECT_EQ(buffer.peekLEInt(0), 0x03020100); @@ -168,19 +446,22 @@ TEST(BufferHelperTest, PeekLEU32) { } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt(0), EnvoyException, "buffer underflow"); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addRepeated(buffer, 4, 0); EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt(1), EnvoyException, "buffer underflow"); } } -TEST(BufferHelperTest, PeekLEU64) { +TEST_P(BufferHelperTest, PeekLEU64) { { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 4, 5, 6, 7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}); EXPECT_EQ(buffer.peekLEInt(), 0x0706050403020100); EXPECT_EQ(buffer.peekLEInt(0), 0x0706050403020100); @@ -191,19 +472,22 @@ TEST(BufferHelperTest, PeekLEU64) { } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt(0), EnvoyException, "buffer underflow"); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addRepeated(buffer, 8, 0); EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt(1), EnvoyException, "buffer underflow"); } } -TEST(BufferHelperTest, PeekBEI16) { +TEST_P(BufferHelperTest, PeekBEI16) { { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF}); EXPECT_EQ(buffer.peekBEInt(), 1); EXPECT_EQ(buffer.peekBEInt(0), 1); @@ -215,19 +499,22 @@ TEST(BufferHelperTest, PeekBEI16) { { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt(0), EnvoyException, "buffer underflow"); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addRepeated(buffer, 2, 0); EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt(1), EnvoyException, "buffer underflow"); } } -TEST(BufferHelperTest, PeekBEI32) { +TEST_P(BufferHelperTest, PeekBEI32) { { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF, 0xFF, 0xFF}); EXPECT_EQ(buffer.peekBEInt(), 0x00010203); EXPECT_EQ(buffer.peekBEInt(0), 0x00010203); @@ -238,19 +525,22 @@ TEST(BufferHelperTest, PeekBEI32) { } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt(0), EnvoyException, "buffer underflow"); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addRepeated(buffer, 4, 0); EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt(1), EnvoyException, "buffer underflow"); } } -TEST(BufferHelperTest, PeekBEI64) { +TEST_P(BufferHelperTest, PeekBEI64) { { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 4, 5, 6, 7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}); EXPECT_EQ(buffer.peekBEInt(), 0x0001020304050607); EXPECT_EQ(buffer.peekBEInt(0), 0x0001020304050607); @@ -269,6 +559,7 @@ TEST(BufferHelperTest, PeekBEI64) { { // signed Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addSeq(buffer, {0xFF, 0xFF, 0xFF, 0x00, 0xFF, 0xFF, 0xFF, 0xFE}); EXPECT_EQ((buffer.peekBEInt()), -1); EXPECT_EQ((buffer.peekBEInt(2)), -256); // 0xFF00 @@ -278,6 +569,7 @@ TEST(BufferHelperTest, PeekBEI64) { { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 4, 5, 6, 7, 0xFF, 0xFF}); EXPECT_THROW_WITH_MESSAGE( (buffer.peekBEInt(buffer.length() - sizeof(int64_t) + 1)), @@ -286,19 +578,22 @@ TEST(BufferHelperTest, PeekBEI64) { { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt(0), EnvoyException, "buffer underflow"); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addRepeated(buffer, 8, 0); EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt(1), EnvoyException, "buffer underflow"); } } -TEST(BufferHelperTest, PeekBEU16) { +TEST_P(BufferHelperTest, PeekBEU16) { { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF}); EXPECT_EQ(buffer.peekBEInt(), 1); EXPECT_EQ(buffer.peekBEInt(0), 1); @@ -309,19 +604,22 @@ TEST(BufferHelperTest, PeekBEU16) { } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt(0), EnvoyException, "buffer underflow"); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addRepeated(buffer, 2, 0); EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt(1), EnvoyException, "buffer underflow"); } } -TEST(BufferHelperTest, PeekBEU32) { +TEST_P(BufferHelperTest, PeekBEU32) { { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF, 0xFF, 0xFF}); EXPECT_EQ(buffer.peekBEInt(), 0x00010203); EXPECT_EQ(buffer.peekBEInt(0), 0x00010203); @@ -332,19 +630,22 @@ TEST(BufferHelperTest, PeekBEU32) { } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt(0), EnvoyException, "buffer underflow"); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addRepeated(buffer, 4, 0); EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt(1), EnvoyException, "buffer underflow"); } } -TEST(BufferHelperTest, PeekBEU64) { +TEST_P(BufferHelperTest, PeekBEU64) { { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 4, 5, 6, 7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}); EXPECT_EQ(buffer.peekBEInt(), 0x0001020304050607); EXPECT_EQ(buffer.peekBEInt(0), 0x0001020304050607); @@ -355,18 +656,21 @@ TEST(BufferHelperTest, PeekBEU64) { } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt(0), EnvoyException, "buffer underflow"); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addRepeated(buffer, 8, 0); EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt(1), EnvoyException, "buffer underflow"); } } -TEST(BufferHelperTest, DrainI8) { +TEST_P(BufferHelperTest, DrainI8) { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addSeq(buffer, {0, 1, 0xFE}); EXPECT_EQ(buffer.drainInt(), 0); EXPECT_EQ(buffer.drainInt(), 1); @@ -374,8 +678,9 @@ TEST(BufferHelperTest, DrainI8) { EXPECT_EQ(buffer.length(), 0); } -TEST(BufferHelperTest, DrainLEI16) { +TEST_P(BufferHelperTest, DrainLEI16) { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF}); EXPECT_EQ(buffer.drainLEInt(), 0x0100); EXPECT_EQ(buffer.drainLEInt(), 0x0302); @@ -383,40 +688,45 @@ TEST(BufferHelperTest, DrainLEI16) { EXPECT_EQ(buffer.length(), 0); } -TEST(BufferHelperTest, DrainLEI32) { +TEST_P(BufferHelperTest, DrainLEI32) { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF, 0xFF, 0xFF}); EXPECT_EQ(buffer.drainLEInt(), 0x03020100); EXPECT_EQ(buffer.drainLEInt(), -1); EXPECT_EQ(buffer.length(), 0); } -TEST(BufferHelperTest, DrainLEI64) { +TEST_P(BufferHelperTest, DrainLEI64) { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 4, 5, 6, 7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}); EXPECT_EQ(buffer.drainLEInt(), 0x0706050403020100); EXPECT_EQ(buffer.drainLEInt(), -1); EXPECT_EQ(buffer.length(), 0); } -TEST(BufferHelperTest, DrainLEU32) { +TEST_P(BufferHelperTest, DrainLEU32) { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF, 0xFF, 0xFF}); EXPECT_EQ(buffer.drainLEInt(), 0x03020100); EXPECT_EQ(buffer.drainLEInt(), 0xFFFFFFFF); EXPECT_EQ(buffer.length(), 0); } -TEST(BufferHelperTest, DrainLEU64) { +TEST_P(BufferHelperTest, DrainLEU64) { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 4, 5, 6, 7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}); EXPECT_EQ(buffer.drainLEInt(), 0x0706050403020100); EXPECT_EQ(buffer.drainLEInt(), 0xFFFFFFFFFFFFFFFF); EXPECT_EQ(buffer.length(), 0); } -TEST(BufferHelperTest, DrainBEI16) { +TEST_P(BufferHelperTest, DrainBEI16) { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF}); EXPECT_EQ(buffer.drainBEInt(), 1); EXPECT_EQ(buffer.drainBEInt(), 0x0203); @@ -424,40 +734,45 @@ TEST(BufferHelperTest, DrainBEI16) { EXPECT_EQ(buffer.length(), 0); } -TEST(BufferHelperTest, DrainBEI32) { +TEST_P(BufferHelperTest, DrainBEI32) { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF, 0xFF, 0xFF}); EXPECT_EQ(buffer.drainBEInt(), 0x00010203); EXPECT_EQ(buffer.drainBEInt(), -1); EXPECT_EQ(buffer.length(), 0); } -TEST(BufferHelperTest, DrainBEI64) { +TEST_P(BufferHelperTest, DrainBEI64) { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 4, 5, 6, 7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}); EXPECT_EQ(buffer.drainBEInt(), 0x0001020304050607); EXPECT_EQ(buffer.drainBEInt(), -1); EXPECT_EQ(buffer.length(), 0); } -TEST(BufferHelperTest, DrainBEU32) { +TEST_P(BufferHelperTest, DrainBEU32) { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF, 0xFF, 0xFF}); EXPECT_EQ(buffer.drainBEInt(), 0x00010203); EXPECT_EQ(buffer.drainBEInt(), 0xFFFFFFFF); EXPECT_EQ(buffer.length(), 0); } -TEST(BufferHelperTest, DrainBEU64) { +TEST_P(BufferHelperTest, DrainBEU64) { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 4, 5, 6, 7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}); EXPECT_EQ(buffer.drainBEInt(), 0x0001020304050607); EXPECT_EQ(buffer.drainBEInt(), 0xFFFFFFFFFFFFFFFF); EXPECT_EQ(buffer.length(), 0); } -TEST(BufferHelperTest, WriteI8) { +TEST_P(BufferHelperTest, WriteI8) { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeByte(-128); buffer.writeByte(-1); buffer.writeByte(0); @@ -467,229 +782,269 @@ TEST(BufferHelperTest, WriteI8) { EXPECT_EQ(std::string("\x80\xFF\0\x1\x7F", 5), buffer.toString()); } -TEST(BufferHelperTest, WriteLEI16) { +TEST_P(BufferHelperTest, WriteLEI16) { { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeLEInt(std::numeric_limits::min()); EXPECT_EQ(std::string("\0\x80", 2), buffer.toString()); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeLEInt(0); EXPECT_EQ(std::string("\0\0", 2), buffer.toString()); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeLEInt(1); EXPECT_EQ(std::string("\x1\0", 2), buffer.toString()); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeLEInt(std::numeric_limits::max()); EXPECT_EQ("\xFF\x7F", buffer.toString()); } } -TEST(BufferHelperTest, WriteLEU16) { +TEST_P(BufferHelperTest, WriteLEU16) { { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeLEInt(0); EXPECT_EQ(std::string("\0\0", 2), buffer.toString()); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeLEInt(1); EXPECT_EQ(std::string("\x1\0", 2), buffer.toString()); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeLEInt(static_cast(std::numeric_limits::max()) + 1); EXPECT_EQ(std::string("\0\x80", 2), buffer.toString()); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeLEInt(std::numeric_limits::max()); EXPECT_EQ("\xFF\xFF", buffer.toString()); } } -TEST(BufferHelperTest, WriteLEI32) { +TEST_P(BufferHelperTest, WriteLEI32) { { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeLEInt(std::numeric_limits::min()); EXPECT_EQ(std::string("\0\0\0\x80", 4), buffer.toString()); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeLEInt(0); EXPECT_EQ(std::string("\0\0\0\0", 4), buffer.toString()); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeLEInt(1); EXPECT_EQ(std::string("\x1\0\0\0", 4), buffer.toString()); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeLEInt(std::numeric_limits::max()); EXPECT_EQ("\xFF\xFF\xFF\x7F", buffer.toString()); } } -TEST(BufferHelperTest, WriteLEU32) { +TEST_P(BufferHelperTest, WriteLEU32) { { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeLEInt(0); EXPECT_EQ(std::string("\0\0\0\0", 4), buffer.toString()); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeLEInt(1); EXPECT_EQ(std::string("\x1\0\0\0", 4), buffer.toString()); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeLEInt(static_cast(std::numeric_limits::max()) + 1); EXPECT_EQ(std::string("\0\0\0\x80", 4), buffer.toString()); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeLEInt(std::numeric_limits::max()); EXPECT_EQ("\xFF\xFF\xFF\xFF", buffer.toString()); } } -TEST(BufferHelperTest, WriteLEI64) { +TEST_P(BufferHelperTest, WriteLEI64) { { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeLEInt(std::numeric_limits::min()); EXPECT_EQ(std::string("\0\0\0\0\0\0\0\x80", 8), buffer.toString()); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeLEInt(1); EXPECT_EQ(std::string("\x1\0\0\0\0\0\0\0", 8), buffer.toString()); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeLEInt(0); EXPECT_EQ(std::string("\0\0\0\0\0\0\0\0", 8), buffer.toString()); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeLEInt(std::numeric_limits::max()); EXPECT_EQ("\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x7F", buffer.toString()); } } -TEST(BufferHelperTest, WriteBEI16) { +TEST_P(BufferHelperTest, WriteBEI16) { { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeBEInt(std::numeric_limits::min()); EXPECT_EQ(std::string("\x80\0", 2), buffer.toString()); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeBEInt(0); EXPECT_EQ(std::string("\0\0", 2), buffer.toString()); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeBEInt(1); EXPECT_EQ(std::string("\0\x1", 2), buffer.toString()); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeBEInt(std::numeric_limits::max()); EXPECT_EQ("\x7F\xFF", buffer.toString()); } } -TEST(BufferHelperTest, WriteBEU16) { +TEST_P(BufferHelperTest, WriteBEU16) { { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeBEInt(0); EXPECT_EQ(std::string("\0\0", 2), buffer.toString()); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeBEInt(1); EXPECT_EQ(std::string("\0\x1", 2), buffer.toString()); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeBEInt(static_cast(std::numeric_limits::max()) + 1); EXPECT_EQ(std::string("\x80\0", 2), buffer.toString()); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeBEInt(std::numeric_limits::max()); EXPECT_EQ("\xFF\xFF", buffer.toString()); } } -TEST(BufferHelperTest, WriteBEI32) { +TEST_P(BufferHelperTest, WriteBEI32) { { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeBEInt(std::numeric_limits::min()); EXPECT_EQ(std::string("\x80\0\0\0", 4), buffer.toString()); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeBEInt(0); EXPECT_EQ(std::string("\0\0\0\0", 4), buffer.toString()); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeBEInt(1); EXPECT_EQ(std::string("\0\0\0\x1", 4), buffer.toString()); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeBEInt(std::numeric_limits::max()); EXPECT_EQ("\x7F\xFF\xFF\xFF", buffer.toString()); } } -TEST(BufferHelperTest, WriteBEU32) { +TEST_P(BufferHelperTest, WriteBEU32) { { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeBEInt(0); EXPECT_EQ(std::string("\0\0\0\0", 4), buffer.toString()); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeBEInt(1); EXPECT_EQ(std::string("\0\0\0\x1", 4), buffer.toString()); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeBEInt(static_cast(std::numeric_limits::max()) + 1); EXPECT_EQ(std::string("\x80\0\0\0", 4), buffer.toString()); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeBEInt(std::numeric_limits::max()); EXPECT_EQ("\xFF\xFF\xFF\xFF", buffer.toString()); } } -TEST(BufferHelperTest, WriteBEI64) { +TEST_P(BufferHelperTest, WriteBEI64) { { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeBEInt(std::numeric_limits::min()); EXPECT_EQ(std::string("\x80\0\0\0\0\0\0\0\0", 8), buffer.toString()); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeBEInt(1); EXPECT_EQ(std::string("\0\0\0\0\0\0\0\x1", 8), buffer.toString()); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeBEInt(0); EXPECT_EQ(std::string("\0\0\0\0\0\0\0\0", 8), buffer.toString()); } { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.writeBEInt(std::numeric_limits::max()); EXPECT_EQ("\x7F\xFF\xFF\xFF\xFF\xFF\xFF\xFF", buffer.toString()); } diff --git a/test/common/buffer/new_buffer_fuzz_test.cc b/test/common/buffer/new_buffer_fuzz_test.cc new file mode 100644 index 0000000000000..bc8f71910526a --- /dev/null +++ b/test/common/buffer/new_buffer_fuzz_test.cc @@ -0,0 +1,12 @@ +#include "test/common/buffer/buffer_fuzz.h" +#include "test/common/buffer/buffer_fuzz.pb.h" +#include "test/fuzz/fuzz_runner.h" + +namespace Envoy { + +// Fuzz the new owned buffer implementation. +DEFINE_PROTO_FUZZER(const test::common::buffer::BufferFuzzTestCase& input) { + Envoy::BufferFuzz::bufferFuzz(input, false); +} + +} // namespace Envoy diff --git a/test/common/buffer/owned_impl_test.cc b/test/common/buffer/owned_impl_test.cc index 66aa39dd39bc6..07a658bf80bd5 100644 --- a/test/common/buffer/owned_impl_test.cc +++ b/test/common/buffer/owned_impl_test.cc @@ -1,12 +1,16 @@ -#include "common/api/os_sys_calls_impl.h" +#include "envoy/api/io_error.h" + #include "common/buffer/buffer_impl.h" +#include "common/network/io_socket_handle_impl.h" +#include "test/common/buffer/utility.h" #include "test/mocks/api/mocks.h" -#include "test/test_common/test_base.h" +#include "test/test_common/logging.h" #include "test/test_common/threadsafe_singleton_injector.h" #include "absl/strings/str_cat.h" #include "gmock/gmock.h" +#include "gtest/gtest.h" using testing::_; using testing::Return; @@ -15,15 +19,31 @@ namespace Envoy { namespace Buffer { namespace { -class OwnedImplTest : public TestBase { +class OwnedImplTest : public BufferImplementationParamTest { public: bool release_callback_called_ = false; + +protected: + static void clearReservation(Buffer::RawSlice* iovecs, uint64_t num_iovecs, OwnedImpl& buffer) { + for (uint64_t i = 0; i < num_iovecs; i++) { + iovecs[i].len_ = 0; + } + buffer.commit(iovecs, num_iovecs); + } + + static void commitReservation(Buffer::RawSlice* iovecs, uint64_t num_iovecs, OwnedImpl& buffer) { + buffer.commit(iovecs, num_iovecs); + } }; -TEST_F(OwnedImplTest, AddBufferFragmentNoCleanup) { +INSTANTIATE_TEST_CASE_P(OwnedImplTest, OwnedImplTest, + testing::ValuesIn({BufferImplementation::Old, BufferImplementation::New})); + +TEST_P(OwnedImplTest, AddBufferFragmentNoCleanup) { char input[] = "hello world"; BufferFragmentImpl frag(input, 11, nullptr); Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.addBufferFragment(frag); EXPECT_EQ(11, buffer.length()); @@ -31,12 +51,13 @@ TEST_F(OwnedImplTest, AddBufferFragmentNoCleanup) { EXPECT_EQ(0, buffer.length()); } -TEST_F(OwnedImplTest, AddBufferFragmentWithCleanup) { +TEST_P(OwnedImplTest, AddBufferFragmentWithCleanup) { char input[] = "hello world"; BufferFragmentImpl frag(input, 11, [this](const void*, size_t, const BufferFragmentImpl*) { release_callback_called_ = true; }); Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.addBufferFragment(frag); EXPECT_EQ(11, buffer.length()); @@ -49,7 +70,7 @@ TEST_F(OwnedImplTest, AddBufferFragmentWithCleanup) { EXPECT_TRUE(release_callback_called_); } -TEST_F(OwnedImplTest, AddBufferFragmentDynamicAllocation) { +TEST_P(OwnedImplTest, AddBufferFragmentDynamicAllocation) { char input_stack[] = "hello world"; char* input = new char[11]; std::copy(input_stack, input_stack + 11, input); @@ -62,6 +83,7 @@ TEST_F(OwnedImplTest, AddBufferFragmentDynamicAllocation) { }); Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.addBufferFragment(*frag); EXPECT_EQ(11, buffer.length()); @@ -74,19 +96,59 @@ TEST_F(OwnedImplTest, AddBufferFragmentDynamicAllocation) { EXPECT_TRUE(release_callback_called_); } -TEST_F(OwnedImplTest, Prepend) { - std::string suffix = "World!", prefix = "Hello, "; +TEST_P(OwnedImplTest, Add) { + const std::string string1 = "Hello, ", string2 = "World!"; + Buffer::OwnedImpl buffer; + verifyImplementation(buffer); + + buffer.add(string1); + EXPECT_EQ(string1.size(), buffer.length()); + EXPECT_EQ(string1, buffer.toString()); + + buffer.add(string2); + EXPECT_EQ(string1.size() + string2.size(), buffer.length()); + EXPECT_EQ(string1 + string2, buffer.toString()); + + // Append a large string that will only partially fit in the space remaining + // at the end of the buffer. + std::string big_suffix; + big_suffix.reserve(16385); + for (unsigned i = 0; i < 16; i++) { + big_suffix += std::string(1024, 'A' + i); + } + big_suffix.push_back('-'); + buffer.add(big_suffix); + EXPECT_EQ(string1.size() + string2.size() + big_suffix.size(), buffer.length()); + EXPECT_EQ(string1 + string2 + big_suffix, buffer.toString()); +} + +TEST_P(OwnedImplTest, Prepend) { + const std::string suffix = "World!", prefix = "Hello, "; Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.add(suffix); buffer.prepend(prefix); EXPECT_EQ(suffix.size() + prefix.size(), buffer.length()); EXPECT_EQ(prefix + suffix, buffer.toString()); + + // Prepend a large string that will only partially fit in the space remaining + // at the front of the buffer. + std::string big_prefix; + big_prefix.reserve(16385); + for (unsigned i = 0; i < 16; i++) { + big_prefix += std::string(1024, 'A' + i); + } + big_prefix.push_back('-'); + buffer.prepend(big_prefix); + EXPECT_EQ(big_prefix.size() + prefix.size() + suffix.size(), buffer.length()); + EXPECT_EQ(big_prefix + prefix + suffix, buffer.toString()); } -TEST_F(OwnedImplTest, PrependToEmptyBuffer) { +TEST_P(OwnedImplTest, PrependToEmptyBuffer) { std::string data = "Hello, World!"; Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.prepend(data); EXPECT_EQ(data.size(), buffer.length()); @@ -98,11 +160,13 @@ TEST_F(OwnedImplTest, PrependToEmptyBuffer) { EXPECT_EQ(data, buffer.toString()); } -TEST_F(OwnedImplTest, PrependBuffer) { +TEST_P(OwnedImplTest, PrependBuffer) { std::string suffix = "World!", prefix = "Hello, "; Buffer::OwnedImpl buffer; + verifyImplementation(buffer); buffer.add(suffix); Buffer::OwnedImpl prefixBuffer; + verifyImplementation(buffer); prefixBuffer.add(prefix); buffer.prepend(prefixBuffer); @@ -112,67 +176,201 @@ TEST_F(OwnedImplTest, PrependBuffer) { EXPECT_EQ(0, prefixBuffer.length()); } -TEST_F(OwnedImplTest, Write) { +TEST_P(OwnedImplTest, Write) { Api::MockOsSysCalls os_sys_calls; TestThreadsafeSingletonInjector os_calls(&os_sys_calls); Buffer::OwnedImpl buffer; + verifyImplementation(buffer); + Network::IoSocketHandleImpl io_handle; buffer.add("example"); EXPECT_CALL(os_sys_calls, writev(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{7, 0})); - Api::SysCallIntResult result = buffer.write(-1); + Api::IoCallUint64Result result = buffer.write(io_handle); + EXPECT_TRUE(result.ok()); EXPECT_EQ(7, result.rc_); EXPECT_EQ(0, buffer.length()); buffer.add("example"); EXPECT_CALL(os_sys_calls, writev(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{6, 0})); - result = buffer.write(-1); + result = buffer.write(io_handle); + EXPECT_TRUE(result.ok()); EXPECT_EQ(6, result.rc_); EXPECT_EQ(1, buffer.length()); EXPECT_CALL(os_sys_calls, writev(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{0, 0})); - result = buffer.write(-1); + result = buffer.write(io_handle); + EXPECT_TRUE(result.ok()); EXPECT_EQ(0, result.rc_); EXPECT_EQ(1, buffer.length()); EXPECT_CALL(os_sys_calls, writev(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{-1, 0})); - result = buffer.write(-1); - EXPECT_EQ(-1, result.rc_); + result = buffer.write(io_handle); + EXPECT_EQ(Api::IoError::IoErrorCode::UnknownError, result.err_->getErrorCode()); + EXPECT_EQ(0, result.rc_); + EXPECT_EQ(1, buffer.length()); + + EXPECT_CALL(os_sys_calls, writev(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{-1, EAGAIN})); + result = buffer.write(io_handle); + EXPECT_EQ(Api::IoError::IoErrorCode::Again, result.err_->getErrorCode()); + EXPECT_EQ(0, result.rc_); EXPECT_EQ(1, buffer.length()); EXPECT_CALL(os_sys_calls, writev(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{1, 0})); - result = buffer.write(-1); + result = buffer.write(io_handle); + EXPECT_TRUE(result.ok()); EXPECT_EQ(1, result.rc_); EXPECT_EQ(0, buffer.length()); EXPECT_CALL(os_sys_calls, writev(_, _, _)).Times(0); - result = buffer.write(-1); + result = buffer.write(io_handle); EXPECT_EQ(0, result.rc_); EXPECT_EQ(0, buffer.length()); } -TEST_F(OwnedImplTest, Read) { +TEST_P(OwnedImplTest, Read) { Api::MockOsSysCalls os_sys_calls; TestThreadsafeSingletonInjector os_calls(&os_sys_calls); Buffer::OwnedImpl buffer; + verifyImplementation(buffer); + Network::IoSocketHandleImpl io_handle; EXPECT_CALL(os_sys_calls, readv(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{0, 0})); - Api::SysCallIntResult result = buffer.read(-1, 100); + Api::IoCallUint64Result result = buffer.read(io_handle, 100); + EXPECT_TRUE(result.ok()); EXPECT_EQ(0, result.rc_); EXPECT_EQ(0, buffer.length()); EXPECT_CALL(os_sys_calls, readv(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{-1, 0})); - result = buffer.read(-1, 100); - EXPECT_EQ(-1, result.rc_); + result = buffer.read(io_handle, 100); + EXPECT_EQ(Api::IoError::IoErrorCode::UnknownError, result.err_->getErrorCode()); + EXPECT_EQ(0, result.rc_); + EXPECT_EQ(0, buffer.length()); + + EXPECT_CALL(os_sys_calls, readv(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{-1, EAGAIN})); + result = buffer.read(io_handle, 100); + EXPECT_EQ(Api::IoError::IoErrorCode::Again, result.err_->getErrorCode()); + EXPECT_EQ(0, result.rc_); EXPECT_EQ(0, buffer.length()); EXPECT_CALL(os_sys_calls, readv(_, _, _)).Times(0); - result = buffer.read(-1, 0); + result = buffer.read(io_handle, 0); EXPECT_EQ(0, result.rc_); EXPECT_EQ(0, buffer.length()); } -TEST_F(OwnedImplTest, ToString) { +TEST_P(OwnedImplTest, ReserveCommit) { + // This fragment will later be added to the buffer. It is declared in an enclosing scope to + // ensure it is not destructed until after the buffer is. + const std::string input = "Hello, world"; + BufferFragmentImpl fragment(input.c_str(), input.size(), nullptr); + + { + Buffer::OwnedImpl buffer; + verifyImplementation(buffer); + + // A zero-byte reservation should fail. + static constexpr uint64_t NumIovecs = 16; + Buffer::RawSlice iovecs[NumIovecs]; + uint64_t num_reserved = buffer.reserve(0, iovecs, NumIovecs); + EXPECT_EQ(0, num_reserved); + clearReservation(iovecs, num_reserved, buffer); + EXPECT_EQ(0, buffer.length()); + + // Test and commit a small reservation. This should succeed. + num_reserved = buffer.reserve(1, iovecs, NumIovecs); + EXPECT_EQ(1, num_reserved); + // The implementation might provide a bigger reservation than requested. + EXPECT_LE(1, iovecs[0].len_); + iovecs[0].len_ = 1; + commitReservation(iovecs, num_reserved, buffer); + EXPECT_EQ(1, buffer.length()); + + // The remaining tests validate internal optimizations of the new deque-of-slices + // implementation, so they're not valid for the old evbuffer implementation. + if (buffer.usesOldImpl()) { + return; + } + + // Request a reservation that fits in the remaining space at the end of the last slice. + num_reserved = buffer.reserve(1, iovecs, NumIovecs); + EXPECT_EQ(1, num_reserved); + EXPECT_LE(1, iovecs[0].len_); + iovecs[0].len_ = 1; + const void* slice1 = iovecs[0].mem_; + clearReservation(iovecs, num_reserved, buffer); + + // Request a reservation that is too large to fit in the remaining space at the end of + // the last slice, and allow the buffer to use only one slice. This should result in the + // creation of a new slice within the buffer. + num_reserved = buffer.reserve(4096 - sizeof(OwnedSlice), iovecs, 1); + const void* slice2 = iovecs[0].mem_; + EXPECT_EQ(1, num_reserved); + EXPECT_NE(slice1, slice2); + clearReservation(iovecs, num_reserved, buffer); + + // Request the same size reservation, but allow the buffer to use multiple slices. This + // should result in the buffer splitting the reservation between its last two slices. + num_reserved = buffer.reserve(4096 - sizeof(OwnedSlice), iovecs, NumIovecs); + EXPECT_EQ(2, num_reserved); + EXPECT_EQ(slice1, iovecs[0].mem_); + EXPECT_EQ(slice2, iovecs[1].mem_); + clearReservation(iovecs, num_reserved, buffer); + + // Request a reservation that too big to fit in the existing slices. This should result + // in the creation of a third slice. + num_reserved = buffer.reserve(8192, iovecs, NumIovecs); + EXPECT_EQ(3, num_reserved); + EXPECT_EQ(slice1, iovecs[0].mem_); + EXPECT_EQ(slice2, iovecs[1].mem_); + const void* slice3 = iovecs[2].mem_; + clearReservation(iovecs, num_reserved, buffer); + + // Append a fragment to the buffer, and then request a small reservation. The buffer + // should make a new slice to satisfy the reservation; it cannot safely use any of + // the previously seen slices, because they are no longer at the end of the buffer. + buffer.addBufferFragment(fragment); + EXPECT_EQ(13, buffer.length()); + num_reserved = buffer.reserve(1, iovecs, NumIovecs); + EXPECT_EQ(1, num_reserved); + EXPECT_NE(slice1, iovecs[0].mem_); + EXPECT_NE(slice2, iovecs[0].mem_); + EXPECT_NE(slice3, iovecs[0].mem_); + commitReservation(iovecs, num_reserved, buffer); + EXPECT_EQ(14, buffer.length()); + } +} + +TEST_P(OwnedImplTest, Search) { + // Populate a buffer with a string split across many small slices, to + // exercise edge cases in the search implementation. + static const char* Inputs[] = {"ab", "a", "", "aaa", "b", "a", "aaa", "ab", "a"}; + Buffer::OwnedImpl buffer; + verifyImplementation(buffer); + for (const auto& input : Inputs) { + buffer.appendSliceForTest(input); + } + EXPECT_STREQ("abaaaabaaaaaba", buffer.toString().c_str()); + + EXPECT_EQ(-1, buffer.search("c", 1, 0)); + EXPECT_EQ(0, buffer.search("", 0, 0)); + EXPECT_EQ(buffer.length(), buffer.search("", 0, buffer.length())); + EXPECT_EQ(-1, buffer.search("", 0, buffer.length() + 1)); + EXPECT_EQ(0, buffer.search("a", 1, 0)); + EXPECT_EQ(1, buffer.search("b", 1, 1)); + EXPECT_EQ(2, buffer.search("a", 1, 1)); + EXPECT_EQ(0, buffer.search("abaa", 4, 0)); + EXPECT_EQ(2, buffer.search("aaaa", 4, 0)); + EXPECT_EQ(2, buffer.search("aaaa", 4, 1)); + EXPECT_EQ(2, buffer.search("aaaa", 4, 2)); + EXPECT_EQ(7, buffer.search("aaaaab", 6, 0)); + EXPECT_EQ(0, buffer.search("abaaaabaaaaaba", 14, 0)); + EXPECT_EQ(12, buffer.search("ba", 2, 10)); + EXPECT_EQ(-1, buffer.search("abaaaabaaaaabaa", 15, 0)); +} + +TEST_P(OwnedImplTest, ToString) { Buffer::OwnedImpl buffer; + verifyImplementation(buffer); EXPECT_EQ("", buffer.toString()); auto append = [&buffer](absl::string_view str) { buffer.add(str.data(), str.size()); }; append("Hello, "); @@ -186,6 +384,59 @@ TEST_F(OwnedImplTest, ToString) { EXPECT_EQ(absl::StrCat("Hello, world!" + long_string), buffer.toString()); } +TEST_P(OwnedImplTest, AppendSliceForTest) { + static constexpr size_t NumInputs = 3; + static constexpr const char* Inputs[] = {"one", "2", "", "four", ""}; + Buffer::OwnedImpl buffer; + RawSlice slices[NumInputs]; + EXPECT_EQ(0, buffer.getRawSlices(slices, NumInputs)); + for (const auto& input : Inputs) { + buffer.appendSliceForTest(input); + } + // getRawSlices will only return the 3 slices with nonzero length. + EXPECT_EQ(3, buffer.getRawSlices(slices, NumInputs)); + + auto expectSlice = [](const RawSlice& slice, const char* expected) { + size_t length = strlen(expected); + EXPECT_EQ(length, slice.len_); + EXPECT_EQ(0, memcmp(slice.mem_, expected, length)); + }; + + expectSlice(slices[0], "one"); + expectSlice(slices[1], "2"); + expectSlice(slices[2], "four"); +} + +// Regression test for oss-fuzz issue +// https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=13263, where prepending +// an empty buffer resulted in a corrupted libevent internal state. +TEST_P(OwnedImplTest, PrependEmpty) { + Buffer::OwnedImpl buf; + Buffer::OwnedImpl other_buf; + char input[] = "foo"; + BufferFragmentImpl frag(input, 3, nullptr); + buf.addBufferFragment(frag); + buf.prepend(""); + other_buf.move(buf, 1); + buf.add("bar"); + EXPECT_EQ("oobar", buf.toString()); + buf.drain(5); + EXPECT_EQ(0, buf.length()); +} + +TEST(OverflowDetectingUInt64, Arithmetic) { + Logger::StderrSinkDelegate stderr_sink(Logger::Registry::getSink()); // For coverage build. + OverflowDetectingUInt64 length; + length += 1; + length -= 1; + length -= 0; + EXPECT_DEATH(length -= 1, "underflow"); + uint64_t half = uint64_t(1) << 63; + length += half; + length += (half - 1); // length is now 2^64 - 1 + EXPECT_DEATH(length += 1, "overflow"); +} + } // namespace } // namespace Buffer } // namespace Envoy diff --git a/test/common/buffer/utility.h b/test/common/buffer/utility.h index 1670a2da040ea..30723ccbe8b6a 100644 --- a/test/common/buffer/utility.h +++ b/test/common/buffer/utility.h @@ -4,12 +4,42 @@ #include "common/buffer/buffer_impl.h" -#include "test/test_common/test_base.h" +#include "gtest/gtest.h" namespace Envoy { namespace Buffer { namespace { +/** Used to specify which OwnedImpl implementation to test. */ +enum class BufferImplementation { + Old, // original evbuffer-based version + New // new deque-of-slices version +}; + +/** + * Base class for tests that are parameterized based on BufferImplementation. + */ +class BufferImplementationParamTest : public testing::TestWithParam { +protected: + BufferImplementationParamTest() { + OwnedImpl::useOldImpl(GetParam() == BufferImplementation::Old); + } + + virtual ~BufferImplementationParamTest() {} + + /** Verify that a buffer has been constructed using the expected implementation. */ + void verifyImplementation(const OwnedImpl& buffer) { + switch (GetParam()) { + case BufferImplementation::Old: + ASSERT_TRUE(buffer.usesOldImpl()); + break; + case BufferImplementation::New: + ASSERT_FALSE(buffer.usesOldImpl()); + break; + } + } +}; + inline void addRepeated(Buffer::Instance& buffer, int n, int8_t value) { for (int i = 0; i < n; i++) { buffer.add(&value, 1); diff --git a/test/common/buffer/watermark_buffer_test.cc b/test/common/buffer/watermark_buffer_test.cc index 18efc42460822..571a72a859580 100644 --- a/test/common/buffer/watermark_buffer_test.cc +++ b/test/common/buffer/watermark_buffer_test.cc @@ -2,8 +2,11 @@ #include "common/buffer/buffer_impl.h" #include "common/buffer/watermark_buffer.h" +#include "common/network/io_socket_handle_impl.h" -#include "test/test_common/test_base.h" +#include "test/common/buffer/utility.h" + +#include "gtest/gtest.h" namespace Envoy { namespace Buffer { @@ -11,9 +14,12 @@ namespace { const char TEN_BYTES[] = "0123456789"; -class WatermarkBufferTest : public TestBase { +class WatermarkBufferTest : public BufferImplementationParamTest { public: - WatermarkBufferTest() { buffer_.setWatermarks(5, 10); } + WatermarkBufferTest() { + verifyImplementation(buffer_); + buffer_.setWatermarks(5, 10); + } Buffer::WatermarkBuffer buffer_{[&]() -> void { ++times_low_watermark_called_; }, [&]() -> void { ++times_high_watermark_called_; }}; @@ -21,9 +27,12 @@ class WatermarkBufferTest : public TestBase { uint32_t times_high_watermark_called_{0}; }; -TEST_F(WatermarkBufferTest, TestWatermark) { ASSERT_EQ(10, buffer_.highWatermark()); } +INSTANTIATE_TEST_CASE_P(WatermarkBufferTest, WatermarkBufferTest, + testing::ValuesIn({BufferImplementation::Old, BufferImplementation::New})); + +TEST_P(WatermarkBufferTest, TestWatermark) { ASSERT_EQ(10, buffer_.highWatermark()); } -TEST_F(WatermarkBufferTest, CopyOut) { +TEST_P(WatermarkBufferTest, CopyOut) { buffer_.add("hello world"); std::array out; buffer_.copyOut(0, out.size(), out.data()); @@ -36,7 +45,7 @@ TEST_F(WatermarkBufferTest, CopyOut) { buffer_.copyOut(4, 0, out.data()); } -TEST_F(WatermarkBufferTest, AddChar) { +TEST_P(WatermarkBufferTest, AddChar) { buffer_.add(TEN_BYTES, 10); EXPECT_EQ(0, times_high_watermark_called_); buffer_.add("a", 1); @@ -44,7 +53,7 @@ TEST_F(WatermarkBufferTest, AddChar) { EXPECT_EQ(11, buffer_.length()); } -TEST_F(WatermarkBufferTest, AddString) { +TEST_P(WatermarkBufferTest, AddString) { buffer_.add(std::string(TEN_BYTES)); EXPECT_EQ(0, times_high_watermark_called_); buffer_.add(std::string("a")); @@ -52,7 +61,7 @@ TEST_F(WatermarkBufferTest, AddString) { EXPECT_EQ(11, buffer_.length()); } -TEST_F(WatermarkBufferTest, AddBuffer) { +TEST_P(WatermarkBufferTest, AddBuffer) { OwnedImpl first(TEN_BYTES); buffer_.add(first); EXPECT_EQ(0, times_high_watermark_called_); @@ -62,7 +71,7 @@ TEST_F(WatermarkBufferTest, AddBuffer) { EXPECT_EQ(11, buffer_.length()); } -TEST_F(WatermarkBufferTest, Prepend) { +TEST_P(WatermarkBufferTest, Prepend) { std::string suffix = "World!", prefix = "Hello, "; buffer_.add(suffix); @@ -72,7 +81,7 @@ TEST_F(WatermarkBufferTest, Prepend) { EXPECT_EQ(suffix.size() + prefix.size(), buffer_.length()); } -TEST_F(WatermarkBufferTest, PrependToEmptyBuffer) { +TEST_P(WatermarkBufferTest, PrependToEmptyBuffer) { std::string suffix = "World!", prefix = "Hello, "; buffer_.prepend(suffix); @@ -88,7 +97,7 @@ TEST_F(WatermarkBufferTest, PrependToEmptyBuffer) { EXPECT_EQ(suffix.size() + prefix.size(), buffer_.length()); } -TEST_F(WatermarkBufferTest, PrependBuffer) { +TEST_P(WatermarkBufferTest, PrependBuffer) { std::string suffix = "World!", prefix = "Hello, "; uint32_t prefix_buffer_low_watermark_hits{0}; @@ -109,7 +118,7 @@ TEST_F(WatermarkBufferTest, PrependBuffer) { EXPECT_EQ(0, prefixBuffer.length()); } -TEST_F(WatermarkBufferTest, Commit) { +TEST_P(WatermarkBufferTest, Commit) { buffer_.add(TEN_BYTES, 10); EXPECT_EQ(0, times_high_watermark_called_); RawSlice out; @@ -121,7 +130,7 @@ TEST_F(WatermarkBufferTest, Commit) { EXPECT_EQ(20, buffer_.length()); } -TEST_F(WatermarkBufferTest, Drain) { +TEST_P(WatermarkBufferTest, Drain) { // Draining from above to below the low watermark does nothing if the high // watermark never got hit. buffer_.add(TEN_BYTES, 10); @@ -144,7 +153,7 @@ TEST_F(WatermarkBufferTest, Drain) { EXPECT_EQ(2, times_high_watermark_called_); } -TEST_F(WatermarkBufferTest, MoveFullBuffer) { +TEST_P(WatermarkBufferTest, MoveFullBuffer) { buffer_.add(TEN_BYTES, 10); OwnedImpl data("a"); @@ -154,7 +163,7 @@ TEST_F(WatermarkBufferTest, MoveFullBuffer) { EXPECT_EQ(11, buffer_.length()); } -TEST_F(WatermarkBufferTest, MoveOneByte) { +TEST_P(WatermarkBufferTest, MoveOneByte) { buffer_.add(TEN_BYTES, 9); OwnedImpl data("ab"); @@ -167,7 +176,7 @@ TEST_F(WatermarkBufferTest, MoveOneByte) { EXPECT_EQ(11, buffer_.length()); } -TEST_F(WatermarkBufferTest, WatermarkFdFunctions) { +TEST_P(WatermarkBufferTest, WatermarkFdFunctions) { int pipe_fds[2] = {0, 0}; ASSERT_EQ(0, pipe(pipe_fds)); @@ -177,10 +186,11 @@ TEST_F(WatermarkBufferTest, WatermarkFdFunctions) { EXPECT_EQ(0, times_low_watermark_called_); int bytes_written_total = 0; + Network::IoSocketHandleImpl io_handle1(pipe_fds[1]); while (bytes_written_total < 20) { - Api::SysCallIntResult result = buffer_.write(pipe_fds[1]); - if (result.rc_ < 0) { - ASSERT_EQ(EAGAIN, result.errno_); + Api::IoCallUint64Result result = buffer_.write(io_handle1); + if (!result.ok()) { + ASSERT_EQ(Api::IoError::IoErrorCode::Again, result.err_->getErrorCode()); } else { bytes_written_total += result.rc_; } @@ -190,15 +200,16 @@ TEST_F(WatermarkBufferTest, WatermarkFdFunctions) { EXPECT_EQ(0, buffer_.length()); int bytes_read_total = 0; + Network::IoSocketHandleImpl io_handle2(pipe_fds[0]); while (bytes_read_total < 20) { - Api::SysCallIntResult result = buffer_.read(pipe_fds[0], 20); + Api::IoCallUint64Result result = buffer_.read(io_handle2, 20); bytes_read_total += result.rc_; } EXPECT_EQ(2, times_high_watermark_called_); EXPECT_EQ(20, buffer_.length()); } -TEST_F(WatermarkBufferTest, MoveWatermarks) { +TEST_P(WatermarkBufferTest, MoveWatermarks) { buffer_.add(TEN_BYTES, 9); EXPECT_EQ(0, times_high_watermark_called_); buffer_.setWatermarks(1, 9); @@ -222,7 +233,7 @@ TEST_F(WatermarkBufferTest, MoveWatermarks) { EXPECT_EQ(2, times_low_watermark_called_); } -TEST_F(WatermarkBufferTest, GetRawSlices) { +TEST_P(WatermarkBufferTest, GetRawSlices) { buffer_.add(TEN_BYTES, 10); RawSlice slices[2]; @@ -234,7 +245,7 @@ TEST_F(WatermarkBufferTest, GetRawSlices) { EXPECT_EQ(data_pointer, slices[0].mem_); } -TEST_F(WatermarkBufferTest, Search) { +TEST_P(WatermarkBufferTest, Search) { buffer_.add(TEN_BYTES, 10); EXPECT_EQ(1, buffer_.search(&TEN_BYTES[1], 2, 0)); @@ -242,7 +253,7 @@ TEST_F(WatermarkBufferTest, Search) { EXPECT_EQ(-1, buffer_.search(&TEN_BYTES[1], 2, 5)); } -TEST_F(WatermarkBufferTest, MoveBackWithWatermarks) { +TEST_P(WatermarkBufferTest, MoveBackWithWatermarks) { int high_watermark_buffer1 = 0; int low_watermark_buffer1 = 0; Buffer::WatermarkBuffer buffer1{[&]() -> void { ++low_watermark_buffer1; }, diff --git a/test/common/buffer/zero_copy_input_stream_test.cc b/test/common/buffer/zero_copy_input_stream_test.cc index 1a748fe1146b6..bd747ed20a674 100644 --- a/test/common/buffer/zero_copy_input_stream_test.cc +++ b/test/common/buffer/zero_copy_input_stream_test.cc @@ -1,16 +1,19 @@ #include "common/buffer/buffer_impl.h" #include "common/buffer/zero_copy_input_stream_impl.h" -#include "test/test_common/test_base.h" +#include "test/common/buffer/utility.h" + +#include "gtest/gtest.h" namespace Envoy { namespace Buffer { namespace { -class ZeroCopyInputStreamTest : public TestBase { +class ZeroCopyInputStreamTest : public BufferImplementationParamTest { public: ZeroCopyInputStreamTest() { Buffer::OwnedImpl buffer{"abcd"}; + verifyImplementation(buffer); stream_.move(buffer); } @@ -21,21 +24,23 @@ class ZeroCopyInputStreamTest : public TestBase { int size_; }; -TEST_F(ZeroCopyInputStreamTest, Move) { +TEST_P(ZeroCopyInputStreamTest, Move) { Buffer::OwnedImpl buffer{"abcd"}; + verifyImplementation(buffer); stream_.move(buffer); EXPECT_EQ(0, buffer.length()); } -TEST_F(ZeroCopyInputStreamTest, Next) { +TEST_P(ZeroCopyInputStreamTest, Next) { EXPECT_TRUE(stream_.Next(&data_, &size_)); EXPECT_EQ(4, size_); EXPECT_EQ(0, memcmp(slice_data_.data(), data_, size_)); } -TEST_F(ZeroCopyInputStreamTest, TwoSlices) { +TEST_P(ZeroCopyInputStreamTest, TwoSlices) { Buffer::OwnedImpl buffer("efgh"); + verifyImplementation(buffer); stream_.move(buffer); @@ -47,7 +52,7 @@ TEST_F(ZeroCopyInputStreamTest, TwoSlices) { EXPECT_EQ(0, memcmp("efgh", data_, size_)); } -TEST_F(ZeroCopyInputStreamTest, BackUp) { +TEST_P(ZeroCopyInputStreamTest, BackUp) { EXPECT_TRUE(stream_.Next(&data_, &size_)); EXPECT_EQ(4, size_); @@ -60,7 +65,7 @@ TEST_F(ZeroCopyInputStreamTest, BackUp) { EXPECT_EQ(4, stream_.ByteCount()); } -TEST_F(ZeroCopyInputStreamTest, BackUpFull) { +TEST_P(ZeroCopyInputStreamTest, BackUpFull) { EXPECT_TRUE(stream_.Next(&data_, &size_)); EXPECT_EQ(4, size_); @@ -71,13 +76,13 @@ TEST_F(ZeroCopyInputStreamTest, BackUpFull) { EXPECT_EQ(4, stream_.ByteCount()); } -TEST_F(ZeroCopyInputStreamTest, ByteCount) { +TEST_P(ZeroCopyInputStreamTest, ByteCount) { EXPECT_EQ(0, stream_.ByteCount()); EXPECT_TRUE(stream_.Next(&data_, &size_)); EXPECT_EQ(4, stream_.ByteCount()); } -TEST_F(ZeroCopyInputStreamTest, Finish) { +TEST_P(ZeroCopyInputStreamTest, Finish) { EXPECT_TRUE(stream_.Next(&data_, &size_)); EXPECT_TRUE(stream_.Next(&data_, &size_)); EXPECT_EQ(0, size_); diff --git a/test/common/common/BUILD b/test/common/common/BUILD index dc68632fe5bb6..2aa4a66dc28f3 100644 --- a/test/common/common/BUILD +++ b/test/common/common/BUILD @@ -119,6 +119,7 @@ envoy_cc_test( deps = [ "//source/common/common:mutex_tracer_lib", "//test/test_common:contention_lib", + "//test/test_common:utility_lib", ], ) @@ -181,17 +182,6 @@ envoy_cc_test( deps = ["//source/common/common:callback_impl_lib"], ) -envoy_cc_test( - name = "block_memory_hash_set_test", - srcs = ["block_memory_hash_set_test.cc"], - deps = [ - "//include/envoy/stats:stats_interface", - "//source/common/common:block_memory_hash_set_lib", - "//source/common/common:hash_lib", - "//source/common/stats:stats_lib", - ], -) - envoy_cc_binary( name = "utility_speed_test", srcs = ["utility_speed_test.cc"], diff --git a/test/common/common/assert_test.cc b/test/common/common/assert_test.cc index 078e085144b63..44d65b4957666 100644 --- a/test/common/common/assert_test.cc +++ b/test/common/common/assert_test.cc @@ -1,7 +1,8 @@ #include "common/common/assert.h" #include "test/test_common/logging.h" -#include "test/test_common/test_base.h" + +#include "gtest/gtest.h" namespace Envoy { diff --git a/test/common/common/backoff_strategy_test.cc b/test/common/common/backoff_strategy_test.cc index 3396c166a1ad2..010768349983d 100644 --- a/test/common/common/backoff_strategy_test.cc +++ b/test/common/common/backoff_strategy_test.cc @@ -1,7 +1,8 @@ #include "common/common/backoff_strategy.h" #include "test/mocks/runtime/mocks.h" -#include "test/test_common/test_base.h" + +#include "gtest/gtest.h" using testing::NiceMock; using testing::Return; diff --git a/test/common/common/base64_test.cc b/test/common/common/base64_test.cc index 257ad00603260..0affe2f93a135 100644 --- a/test/common/common/base64_test.cc +++ b/test/common/common/base64_test.cc @@ -4,7 +4,8 @@ #include "common/common/base64.h" #include "test/test_common/printers.h" -#include "test/test_common/test_base.h" + +#include "gtest/gtest.h" namespace Envoy { TEST(Base64Test, EmptyBufferEncode) { @@ -30,8 +31,11 @@ TEST(Base64Test, SingleSliceBufferEncode) { TEST(Base64Test, EncodeString) { EXPECT_EQ("", Base64::encode("", 0)); EXPECT_EQ("AAA=", Base64::encode("\0\0", 2)); + EXPECT_EQ("AAA", Base64::encode("\0\0", 2, false)); EXPECT_EQ("Zm9v", Base64::encode("foo", 3)); EXPECT_EQ("Zm8=", Base64::encode("fo", 2)); + EXPECT_EQ("Zg==", Base64::encode("f", 1)); + EXPECT_EQ("Zg", Base64::encode("f", 1, false)); } TEST(Base64Test, Decode) { diff --git a/test/common/common/block_memory_hash_set_test.cc b/test/common/common/block_memory_hash_set_test.cc deleted file mode 100644 index fbeb56f7c38d9..0000000000000 --- a/test/common/common/block_memory_hash_set_test.cc +++ /dev/null @@ -1,218 +0,0 @@ -#include -#include -#include -#include -#include - -#include "common/common/block_memory_hash_set.h" -#include "common/common/fmt.h" -#include "common/common/hash.h" -#include "common/stats/stats_options_impl.h" - -#include "test/test_common/test_base.h" - -#include "absl/strings/string_view.h" - -namespace Envoy { - -// Tests BlockMemoryHashSet. -class BlockMemoryHashSetTest : public TestBase { -protected: - // TestValue that doesn't define a hash. - struct TestValueBase { - absl::string_view key() const { return name; } - void initialize(absl::string_view key, const Stats::StatsOptions& stats_options) { - ASSERT(key.size() <= stats_options.maxNameLength()); - memcpy(name, key.data(), key.size()); - name[key.size()] = '\0'; - } - static uint64_t structSizeWithOptions(const Stats::StatsOptions& stats_options) { - UNREFERENCED_PARAMETER(stats_options); - return sizeof(TestValue); - } - - int64_t number; - char name[256]; - }; - - // TestValue that uses an always-zero hash. - struct TestValueZeroHash : public TestValueBase { - static uint64_t hash(absl::string_view /* key */) { return 0; } - }; - - // TestValue that uses a real hash function. - struct TestValue : public TestValueBase { - static uint64_t hash(absl::string_view key) { return HashUtil::xxHash64(key); } - }; - - typedef BlockMemoryHashSet::ValueCreatedPair ValueCreatedPair; - - template void setUp() { - hash_set_options_.capacity = 100; - hash_set_options_.num_slots = 5; - const uint32_t mem_size = - BlockMemoryHashSet::numBytes(hash_set_options_, stats_options_); - memory_ = std::make_unique(mem_size); - memset(memory_.get(), 0, mem_size); - } - - /** - * Returns a string describing the contents of the map, including the control - * bits and the keys in each slot. - */ - template - std::string hashSetToString(BlockMemoryHashSet& hs) { - std::string ret; - static const uint32_t sentinel = BlockMemoryHashSet::Sentinel; - std::string control_string = - fmt::format("{} size={} free_cell_index={}", hs.control_->hash_set_options.toString(), - hs.control_->size, hs.control_->free_cell_index); - ret = fmt::format("options={}\ncontrol={}\n", hs.control_->hash_set_options.toString(), - control_string); - for (uint32_t i = 0; i < hs.control_->hash_set_options.num_slots; ++i) { - ret += fmt::format("slot {}:", i); - for (uint32_t j = hs.slots_[i]; j != sentinel; j = hs.getCell(j).next_cell_index) { - ret += " " + std::string(hs.getCell(j).value.key()); - } - ret += "\n"; - } - return ret; - } - - BlockMemoryHashSetOptions hash_set_options_; - Stats::StatsOptionsImpl stats_options_; - std::unique_ptr memory_; -}; - -TEST_F(BlockMemoryHashSetTest, initAndAttach) { - setUp(); - { - BlockMemoryHashSet hash_set1(hash_set_options_, true, memory_.get(), - stats_options_); // init - BlockMemoryHashSet hash_set2(hash_set_options_, false, memory_.get(), - stats_options_); // attach - } - - // If we tweak an option, we can no longer attach it. - bool constructor_completed = false; - bool constructor_threw = false; - try { - hash_set_options_.capacity = 99; - BlockMemoryHashSet hash_set3(hash_set_options_, false, memory_.get(), - stats_options_); - constructor_completed = false; - } catch (const std::exception& e) { - constructor_threw = true; - } - EXPECT_TRUE(constructor_threw); - EXPECT_FALSE(constructor_completed); -} - -TEST_F(BlockMemoryHashSetTest, putRemove) { - setUp(); - { - BlockMemoryHashSet hash_set1(hash_set_options_, true, memory_.get(), stats_options_); - hash_set1.sanityCheck(); - EXPECT_EQ(0, hash_set1.size()); - EXPECT_EQ(nullptr, hash_set1.get("no such key")); - ValueCreatedPair vc = hash_set1.insert("good key"); - EXPECT_TRUE(vc.second); - vc.first->number = 12345; - hash_set1.sanityCheck(); - EXPECT_EQ(1, hash_set1.size()); - EXPECT_EQ(12345, hash_set1.get("good key")->number); - EXPECT_EQ(nullptr, hash_set1.get("no such key")); - - vc = hash_set1.insert("good key"); - EXPECT_FALSE(vc.second) << "re-used, not newly created"; - vc.first->number = 6789; - EXPECT_EQ(6789, hash_set1.get("good key")->number); - EXPECT_EQ(1, hash_set1.size()); - } - - { - // Now init a new hash-map with the same memory. - BlockMemoryHashSet hash_set2(hash_set_options_, false, memory_.get(), - stats_options_); - EXPECT_EQ(1, hash_set2.size()); - EXPECT_EQ(nullptr, hash_set2.get("no such key")); - EXPECT_EQ(6789, hash_set2.get("good key")->number) << hashSetToString(hash_set2); - EXPECT_FALSE(hash_set2.remove("no such key")); - hash_set2.sanityCheck(); - EXPECT_TRUE(hash_set2.remove("good key")); - hash_set2.sanityCheck(); - EXPECT_EQ(nullptr, hash_set2.get("good key")); - EXPECT_EQ(0, hash_set2.size()); - } -} - -TEST_F(BlockMemoryHashSetTest, tooManyValues) { - setUp(); - BlockMemoryHashSet hash_set1(hash_set_options_, true, memory_.get(), stats_options_); - std::vector keys; - for (uint32_t i = 0; i < hash_set_options_.capacity + 1; ++i) { - keys.push_back(fmt::format("key{}", i)); - } - - for (uint32_t i = 0; i < hash_set_options_.capacity; ++i) { - TestValue* value = hash_set1.insert(keys[i]).first; - ASSERT_NE(nullptr, value); - value->number = i; - } - hash_set1.sanityCheck(); - EXPECT_EQ(hash_set_options_.capacity, hash_set1.size()); - - for (uint32_t i = 0; i < hash_set_options_.capacity; ++i) { - const TestValue* value = hash_set1.get(keys[i]); - ASSERT_NE(nullptr, value); - EXPECT_EQ(i, value->number); - } - hash_set1.sanityCheck(); - - // We can't fit one more value. - EXPECT_EQ(nullptr, hash_set1.insert(keys[hash_set_options_.capacity]).first); - hash_set1.sanityCheck(); - EXPECT_EQ(hash_set_options_.capacity, hash_set1.size()); - - // Now remove everything one by one. - for (uint32_t i = 0; i < hash_set_options_.capacity; ++i) { - EXPECT_TRUE(hash_set1.remove(keys[i])); - } - hash_set1.sanityCheck(); - EXPECT_EQ(0, hash_set1.size()); - - // Now we can put in that last key we weren't able to before. - TestValue* value = hash_set1.insert(keys[hash_set_options_.capacity]).first; - EXPECT_NE(nullptr, value); - value->number = 314519; - EXPECT_EQ(1, hash_set1.size()); - EXPECT_EQ(314519, hash_set1.get(keys[hash_set_options_.capacity])->number); - hash_set1.sanityCheck(); -} - -TEST_F(BlockMemoryHashSetTest, severalKeysZeroHash) { - setUp(); - BlockMemoryHashSet hash_set1(hash_set_options_, true, memory_.get(), - stats_options_); - hash_set1.insert("one").first->number = 1; - hash_set1.insert("two").first->number = 2; - hash_set1.insert("three").first->number = 3; - EXPECT_TRUE(hash_set1.remove("two")); - hash_set1.sanityCheck(); - hash_set1.insert("four").first->number = 4; - hash_set1.sanityCheck(); - EXPECT_FALSE(hash_set1.remove("two")); - hash_set1.sanityCheck(); -} - -class BlockMemoryHashSetDeathTest : public BlockMemoryHashSetTest {}; - -TEST_F(BlockMemoryHashSetDeathTest, sanityCheckZeroedMemoryDeathTest) { - setUp(); - BlockMemoryHashSet hash_set1(hash_set_options_, true, memory_.get(), - stats_options_); - memset(memory_.get(), 0, hash_set1.numBytes(stats_options_)); - EXPECT_DEATH(hash_set1.sanityCheck(), ""); -} - -} // namespace Envoy diff --git a/test/common/common/callback_impl_test.cc b/test/common/common/callback_impl_test.cc index 72ea44c63b81e..5548df659388b 100644 --- a/test/common/common/callback_impl_test.cc +++ b/test/common/common/callback_impl_test.cc @@ -1,15 +1,14 @@ #include "common/common/callback_impl.h" -#include "test/test_common/test_base.h" - #include "gmock/gmock.h" +#include "gtest/gtest.h" using testing::InSequence; namespace Envoy { namespace Common { -class CallbackManagerTest : public TestBase { +class CallbackManagerTest : public testing::Test { public: MOCK_METHOD1(called, void(int arg)); }; diff --git a/test/common/common/cleanup_test.cc b/test/common/common/cleanup_test.cc index f702f70f4bdc9..742f279dc530c 100644 --- a/test/common/common/cleanup_test.cc +++ b/test/common/common/cleanup_test.cc @@ -1,6 +1,6 @@ #include "common/common/cleanup.h" -#include "test/test_common/test_base.h" +#include "gtest/gtest.h" namespace Envoy { diff --git a/test/common/common/fmt_test.cc b/test/common/common/fmt_test.cc index 6ec59c26b2701..f5b8fd45d9536 100644 --- a/test/common/common/fmt_test.cc +++ b/test/common/common/fmt_test.cc @@ -1,9 +1,8 @@ #include "common/common/fmt.h" #include "common/common/logger.h" -#include "test/test_common/test_base.h" - #include "absl/strings/string_view.h" +#include "gtest/gtest.h" namespace Envoy { diff --git a/test/common/common/hash_test.cc b/test/common/common/hash_test.cc index 8d131ea7ca9ff..4112b67d59c56 100644 --- a/test/common/common/hash_test.cc +++ b/test/common/common/hash_test.cc @@ -1,6 +1,6 @@ #include "common/common/hash.h" -#include "test/test_common/test_base.h" +#include "gtest/gtest.h" namespace Envoy { TEST(Hash, xxHash) { @@ -38,4 +38,12 @@ TEST(Hash, stdhash) { } #endif +TEST(Hash, sharedStringSet) { + SharedStringSet set; + auto foo = std::make_shared("foo"); + set.insert(foo); + auto pos = set.find("foo"); + EXPECT_EQ(pos->get(), foo.get()); +} + } // namespace Envoy diff --git a/test/common/common/hex_test.cc b/test/common/common/hex_test.cc index 4fe426a25fb93..06a06469edc1c 100644 --- a/test/common/common/hex_test.cc +++ b/test/common/common/hex_test.cc @@ -5,7 +5,7 @@ #include "common/common/hex.h" -#include "test/test_common/test_base.h" +#include "gtest/gtest.h" namespace Envoy { TEST(Hex, SimpleEncode) { diff --git a/test/common/common/lock_guard_test.cc b/test/common/common/lock_guard_test.cc index ee9073391a5d5..55f505f9ab584 100644 --- a/test/common/common/lock_guard_test.cc +++ b/test/common/common/lock_guard_test.cc @@ -1,12 +1,12 @@ #include "common/common/lock_guard.h" #include "common/common/thread.h" -#include "test/test_common/test_base.h" +#include "gtest/gtest.h" namespace Envoy { namespace Thread { -class ThreadTest : public TestBase { +class ThreadTest : public testing::Test { protected: ThreadTest() : a_(0), b_(0) {} int a_ GUARDED_BY(a_mutex_); diff --git a/test/common/common/log_macros_test.cc b/test/common/common/log_macros_test.cc index 82fdfeca05ec2..8d683910bb6b7 100644 --- a/test/common/common/log_macros_test.cc +++ b/test/common/common/log_macros_test.cc @@ -5,9 +5,9 @@ #include "test/mocks/http/mocks.h" #include "test/mocks/network/mocks.h" -#include "test/test_common/test_base.h" #include "gmock/gmock.h" +#include "gtest/gtest.h" namespace Envoy { diff --git a/test/common/common/matchers_test.cc b/test/common/common/matchers_test.cc index 43c24e132109c..d5b509a89082a 100644 --- a/test/common/common/matchers_test.cc +++ b/test/common/common/matchers_test.cc @@ -7,7 +7,7 @@ #include "common/config/metadata.h" #include "common/protobuf/protobuf.h" -#include "test/test_common/test_base.h" +#include "gtest/gtest.h" namespace Envoy { namespace Matcher { diff --git a/test/common/common/mutex_tracer_test.cc b/test/common/common/mutex_tracer_test.cc index a8ccee205a047..e72fc3a995b94 100644 --- a/test/common/common/mutex_tracer_test.cc +++ b/test/common/common/mutex_tracer_test.cc @@ -5,13 +5,14 @@ #include "common/common/mutex_tracer_impl.h" #include "test/test_common/contention.h" -#include "test/test_common/test_base.h" +#include "test/test_common/utility.h" #include "absl/synchronization/mutex.h" +#include "gtest/gtest.h" namespace Envoy { -class MutexTracerTest : public TestBase { +class MutexTracerTest : public testing::Test { protected: void SetUp() override { tracer_.reset(); } @@ -72,13 +73,16 @@ TEST_F(MutexTracerTest, TryLockNoContention) { } TEST_F(MutexTracerTest, TwoThreadsWithContention) { + Api::ApiPtr api = Api::createApiForTest(); + int64_t prev_num_contentions = tracer_.numContentions(); for (int i = 1; i <= 10; ++i) { int64_t curr_num_lifetime_wait_cycles = tracer_.lifetimeWaitCycles(); - Thread::TestUtil::ContentionGenerator contention_generator; - + Thread::TestUtil::ContentionGenerator contention_generator(*api); contention_generator.generateContention(tracer_); - EXPECT_EQ(tracer_.numContentions(), i); + int64_t num_contentions = tracer_.numContentions(); + EXPECT_LT(prev_num_contentions, num_contentions); + prev_num_contentions = num_contentions; EXPECT_GT(tracer_.currentWaitCycles(), 0); // This shouldn't be hardcoded. EXPECT_GT(tracer_.lifetimeWaitCycles(), 0); EXPECT_GT(tracer_.lifetimeWaitCycles(), curr_num_lifetime_wait_cycles); diff --git a/test/common/common/perf_annotation_disabled_test.cc b/test/common/common/perf_annotation_disabled_test.cc index fe6ecc663a2b2..8109cad634578 100644 --- a/test/common/common/perf_annotation_disabled_test.cc +++ b/test/common/common/perf_annotation_disabled_test.cc @@ -6,7 +6,7 @@ #include "common/common/perf_annotation.h" -#include "test/test_common/test_base.h" +#include "gtest/gtest.h" namespace Envoy { diff --git a/test/common/common/perf_annotation_test.cc b/test/common/common/perf_annotation_test.cc index 6a479009b25f0..c8498a72096ef 100644 --- a/test/common/common/perf_annotation_test.cc +++ b/test/common/common/perf_annotation_test.cc @@ -9,11 +9,11 @@ #include "common/common/perf_annotation.h" -#include "test/test_common/test_base.h" +#include "gtest/gtest.h" namespace Envoy { -class PerfAnnotationTest : public TestBase { +class PerfAnnotationTest : public testing::Test { protected: void TearDown() override { PERF_CLEAR(); } }; diff --git a/test/common/common/phantom_test.cc b/test/common/common/phantom_test.cc index fbdfd42a5df0c..6ec0ed1850683 100644 --- a/test/common/common/phantom_test.cc +++ b/test/common/common/phantom_test.cc @@ -1,6 +1,6 @@ #include "common/common/phantom.h" -#include "test/test_common/test_base.h" +#include "gtest/gtest.h" namespace Envoy { diff --git a/test/common/common/stack_array_test.cc b/test/common/common/stack_array_test.cc index 04a98c9d31596..fa857f77b05f5 100644 --- a/test/common/common/stack_array_test.cc +++ b/test/common/common/stack_array_test.cc @@ -1,8 +1,7 @@ #include "common/common/stack_array.h" -#include "test/test_common/test_base.h" - #include "gmock/gmock.h" +#include "gtest/gtest.h" namespace Envoy { diff --git a/test/common/common/to_lower_table_test.cc b/test/common/common/to_lower_table_test.cc index a20c6cebdd5a4..3004f852aa200 100644 --- a/test/common/common/to_lower_table_test.cc +++ b/test/common/common/to_lower_table_test.cc @@ -1,6 +1,6 @@ #include "common/common/to_lower_table.h" -#include "test/test_common/test_base.h" +#include "gtest/gtest.h" namespace Envoy { TEST(ToLowerTableTest, All) { diff --git a/test/common/common/token_bucket_impl_test.cc b/test/common/common/token_bucket_impl_test.cc index 5d575377769d1..aec4744bc83eb 100644 --- a/test/common/common/token_bucket_impl_test.cc +++ b/test/common/common/token_bucket_impl_test.cc @@ -3,11 +3,12 @@ #include "common/common/token_bucket_impl.h" #include "test/test_common/simulated_time_system.h" -#include "test/test_common/test_base.h" + +#include "gtest/gtest.h" namespace Envoy { -class TokenBucketImplTest : public TestBase { +class TokenBucketImplTest : public testing::Test { protected: Event::SimulatedTimeSystem time_system_; }; @@ -16,60 +17,80 @@ class TokenBucketImplTest : public TestBase { TEST_F(TokenBucketImplTest, Initialization) { TokenBucketImpl token_bucket{1, time_system_, -1.0}; - EXPECT_TRUE(token_bucket.consume()); - EXPECT_FALSE(token_bucket.consume()); + EXPECT_EQ(1, token_bucket.consume(1, false)); + EXPECT_EQ(0, token_bucket.consume(1, false)); } // Verifies TokenBucket's maximum capacity. TEST_F(TokenBucketImplTest, MaxBucketSize) { TokenBucketImpl token_bucket{3, time_system_, 1}; - EXPECT_TRUE(token_bucket.consume(3)); + EXPECT_EQ(3, token_bucket.consume(3, false)); time_system_.setMonotonicTime(std::chrono::seconds(10)); - EXPECT_FALSE(token_bucket.consume(4)); - EXPECT_TRUE(token_bucket.consume(3)); + EXPECT_EQ(0, token_bucket.consume(4, false)); + EXPECT_EQ(3, token_bucket.consume(3, false)); } // Verifies that TokenBucket can consume tokens. TEST_F(TokenBucketImplTest, Consume) { TokenBucketImpl token_bucket{10, time_system_, 1}; - EXPECT_FALSE(token_bucket.consume(20)); - EXPECT_TRUE(token_bucket.consume(9)); + EXPECT_EQ(0, token_bucket.consume(20, false)); + EXPECT_EQ(9, token_bucket.consume(9, false)); - EXPECT_TRUE(token_bucket.consume()); + EXPECT_EQ(1, token_bucket.consume(1, false)); time_system_.setMonotonicTime(std::chrono::milliseconds(999)); - EXPECT_FALSE(token_bucket.consume()); + EXPECT_EQ(0, token_bucket.consume(1, false)); time_system_.setMonotonicTime(std::chrono::milliseconds(5999)); - EXPECT_FALSE(token_bucket.consume(6)); + EXPECT_EQ(0, token_bucket.consume(6, false)); time_system_.setMonotonicTime(std::chrono::milliseconds(6000)); - EXPECT_TRUE(token_bucket.consume(6)); - EXPECT_FALSE(token_bucket.consume()); + EXPECT_EQ(6, token_bucket.consume(6, false)); + EXPECT_EQ(0, token_bucket.consume(1, false)); } // Verifies that TokenBucket can refill tokens. TEST_F(TokenBucketImplTest, Refill) { TokenBucketImpl token_bucket{1, time_system_, 0.5}; - EXPECT_TRUE(token_bucket.consume()); + EXPECT_EQ(1, token_bucket.consume(1, false)); time_system_.setMonotonicTime(std::chrono::milliseconds(500)); - EXPECT_FALSE(token_bucket.consume()); + EXPECT_EQ(0, token_bucket.consume(1, false)); time_system_.setMonotonicTime(std::chrono::milliseconds(1500)); - EXPECT_FALSE(token_bucket.consume()); + EXPECT_EQ(0, token_bucket.consume(1, false)); time_system_.setMonotonicTime(std::chrono::milliseconds(2000)); - EXPECT_TRUE(token_bucket.consume()); + EXPECT_EQ(1, token_bucket.consume(1, false)); } TEST_F(TokenBucketImplTest, NextTokenAvailable) { TokenBucketImpl token_bucket{10, time_system_, 5}; - EXPECT_TRUE(token_bucket.consume(9)); - EXPECT_EQ(0, token_bucket.nextTokenAvailableMs()); - EXPECT_TRUE(token_bucket.consume()); - EXPECT_FALSE(token_bucket.consume()); - EXPECT_EQ(200, token_bucket.nextTokenAvailableMs()); + EXPECT_EQ(9, token_bucket.consume(9, false)); + EXPECT_EQ(std::chrono::milliseconds(0), token_bucket.nextTokenAvailable()); + EXPECT_EQ(1, token_bucket.consume(1, false)); + EXPECT_EQ(0, token_bucket.consume(1, false)); + EXPECT_EQ(std::chrono::milliseconds(200), token_bucket.nextTokenAvailable()); +} + +// Test partial consumption of tokens. +TEST_F(TokenBucketImplTest, PartialConsumption) { + TokenBucketImpl token_bucket{16, time_system_, 16}; + EXPECT_EQ(16, token_bucket.consume(18, true)); + EXPECT_EQ(std::chrono::milliseconds(63), token_bucket.nextTokenAvailable()); + time_system_.sleep(std::chrono::milliseconds(62)); + EXPECT_EQ(0, token_bucket.consume(1, true)); + time_system_.sleep(std::chrono::milliseconds(1)); + EXPECT_EQ(1, token_bucket.consume(2, true)); + EXPECT_EQ(std::chrono::milliseconds(63), token_bucket.nextTokenAvailable()); +} + +// Test reset functionality. +TEST_F(TokenBucketImplTest, Reset) { + TokenBucketImpl token_bucket{16, time_system_, 16}; + token_bucket.reset(1); + EXPECT_EQ(1, token_bucket.consume(2, true)); + EXPECT_EQ(std::chrono::milliseconds(63), token_bucket.nextTokenAvailable()); } } // namespace Envoy diff --git a/test/common/common/utility_fuzz_test.cc b/test/common/common/utility_fuzz_test.cc index aebf5552f5514..99006fdc0d09a 100644 --- a/test/common/common/utility_fuzz_test.cc +++ b/test/common/common/utility_fuzz_test.cc @@ -7,12 +7,13 @@ namespace Envoy { namespace Fuzz { +namespace { DEFINE_FUZZER(const uint8_t* buf, size_t len) { { uint64_t out; const std::string string_buffer(reinterpret_cast(buf), len); - StringUtil::atoul(string_buffer.c_str(), out); + StringUtil::atoull(string_buffer.c_str(), out); } { const std::string string_buffer(reinterpret_cast(buf), len); @@ -60,5 +61,6 @@ DEFINE_FUZZER(const uint8_t* buf, size_t len) { } } +} // namespace } // namespace Fuzz } // namespace Envoy diff --git a/test/common/common/utility_speed_test.cc b/test/common/common/utility_speed_test.cc index 878e8c1d9473e..69ed1b41ff4d6 100644 --- a/test/common/common/utility_speed_test.cc +++ b/test/common/common/utility_speed_test.cc @@ -9,6 +9,8 @@ #include "absl/strings/string_view.h" #include "benchmark/benchmark.h" +namespace Envoy { + static const char TextToTrim[] = "\t the quick brown fox jumps over the lazy dog\n\r\n"; static size_t TextToTrimLength = sizeof(TextToTrim) - 1; @@ -250,6 +252,7 @@ static void BM_IntervalSet50ToVector(benchmark::State& state) { } } BENCHMARK(BM_IntervalSet50ToVector); +} // namespace Envoy // Boilerplate main(), which discovers benchmarks in the same file and runs them. int main(int argc, char** argv) { diff --git a/test/common/common/utility_test.cc b/test/common/common/utility_test.cc index 3e83544a04c00..1ea15ef2e1913 100644 --- a/test/common/common/utility_test.cc +++ b/test/common/common/utility_test.cc @@ -8,123 +8,96 @@ #include "common/common/utility.h" -#include "test/test_common/test_base.h" #include "test/test_common/test_time.h" #include "test/test_common/utility.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "gmock/gmock.h" +#include "gtest/gtest.h" using testing::ContainerEq; namespace Envoy { -TEST(StringUtil, strtoul) { +TEST(StringUtil, strtoull) { uint64_t out; const char* rest; static const char* test_str = "12345b"; - rest = StringUtil::strtoul(test_str, out); + rest = StringUtil::strtoull(test_str, out); EXPECT_NE(nullptr, rest); EXPECT_EQ('b', *rest); EXPECT_EQ(&test_str[5], rest); EXPECT_EQ(12345U, out); - EXPECT_EQ(nullptr, StringUtil::strtoul("", out)); - EXPECT_EQ(nullptr, StringUtil::strtoul("b123", out)); + EXPECT_EQ(nullptr, StringUtil::strtoull("", out)); + EXPECT_EQ(nullptr, StringUtil::strtoull("b123", out)); - rest = StringUtil::strtoul("123", out); + rest = StringUtil::strtoull("123", out); EXPECT_NE(nullptr, rest); EXPECT_EQ('\0', *rest); EXPECT_EQ(123U, out); - EXPECT_NE(nullptr, StringUtil::strtoul(" 456", out)); + EXPECT_NE(nullptr, StringUtil::strtoull(" 456", out)); EXPECT_EQ(456U, out); - EXPECT_NE(nullptr, StringUtil::strtoul("00789", out)); + EXPECT_NE(nullptr, StringUtil::strtoull("00789", out)); EXPECT_EQ(789U, out); // Hex - rest = StringUtil::strtoul("0x1234567890abcdefg", out, 16); + rest = StringUtil::strtoull("0x1234567890abcdefg", out, 16); EXPECT_NE(nullptr, rest); EXPECT_EQ('g', *rest); EXPECT_EQ(0x1234567890abcdefU, out); // Explicit decimal - rest = StringUtil::strtoul("01234567890A", out, 10); + rest = StringUtil::strtoull("01234567890A", out, 10); EXPECT_NE(nullptr, rest); EXPECT_EQ('A', *rest); EXPECT_EQ(1234567890U, out); // Octal - rest = StringUtil::strtoul("012345678", out, 8); + rest = StringUtil::strtoull("012345678", out, 8); EXPECT_NE(nullptr, rest); EXPECT_EQ('8', *rest); EXPECT_EQ(01234567U, out); // Binary - rest = StringUtil::strtoul("01010101012", out, 2); + rest = StringUtil::strtoull("01010101012", out, 2); EXPECT_NE(nullptr, rest); EXPECT_EQ('2', *rest); EXPECT_EQ(0b101010101U, out); - // Verify subsequent call to strtoul succeeds after the first one + // Verify subsequent call to strtoull succeeds after the first one // failed due to errno ERANGE - EXPECT_EQ(nullptr, StringUtil::strtoul("18446744073709551616", out)); - EXPECT_NE(nullptr, StringUtil::strtoul("18446744073709551615", out)); + EXPECT_EQ(nullptr, StringUtil::strtoull("18446744073709551616", out)); + EXPECT_NE(nullptr, StringUtil::strtoull("18446744073709551615", out)); EXPECT_EQ(18446744073709551615U, out); } -TEST(StringUtil, atoul) { +TEST(StringUtil, atoull) { uint64_t out; - EXPECT_FALSE(StringUtil::atoul("123b", out)); - EXPECT_FALSE(StringUtil::atoul("", out)); - EXPECT_FALSE(StringUtil::atoul("b123", out)); + EXPECT_FALSE(StringUtil::atoull("123b", out)); + EXPECT_FALSE(StringUtil::atoull("", out)); + EXPECT_FALSE(StringUtil::atoull("b123", out)); - EXPECT_TRUE(StringUtil::atoul("123", out)); + EXPECT_TRUE(StringUtil::atoull("123", out)); EXPECT_EQ(123U, out); - EXPECT_TRUE(StringUtil::atoul(" 456", out)); + EXPECT_TRUE(StringUtil::atoull(" 456", out)); EXPECT_EQ(456U, out); - EXPECT_TRUE(StringUtil::atoul("00789", out)); + EXPECT_TRUE(StringUtil::atoull("00789", out)); EXPECT_EQ(789U, out); - // Verify subsequent call to atoul succeeds after the first one + // Verify subsequent call to atoull succeeds after the first one // failed due to errno ERANGE - EXPECT_FALSE(StringUtil::atoul("18446744073709551616", out)); - EXPECT_TRUE(StringUtil::atoul("18446744073709551615", out)); + EXPECT_FALSE(StringUtil::atoull("18446744073709551616", out)); + EXPECT_TRUE(StringUtil::atoull("18446744073709551615", out)); EXPECT_EQ(18446744073709551615U, out); } -TEST(StringUtil, atol) { - int64_t out; - EXPECT_FALSE(StringUtil::atol("-123b", out)); - EXPECT_FALSE(StringUtil::atol("", out)); - EXPECT_FALSE(StringUtil::atol("b123", out)); - - EXPECT_TRUE(StringUtil::atol("123", out)); - EXPECT_EQ(123, out); - EXPECT_TRUE(StringUtil::atol("-123", out)); - EXPECT_EQ(-123, out); - EXPECT_TRUE(StringUtil::atol("+123", out)); - EXPECT_EQ(123, out); - - EXPECT_TRUE(StringUtil::atol(" 456", out)); - EXPECT_EQ(456, out); - - EXPECT_TRUE(StringUtil::atol("00789", out)); - EXPECT_EQ(789, out); - - // INT64_MAX + 1 - EXPECT_FALSE(StringUtil::atol("9223372036854775808", out)); - - // INT64_MIN - EXPECT_TRUE(StringUtil::atol("-9223372036854775808", out)); - EXPECT_EQ(INT64_MIN, out); -} - TEST(DateUtil, All) { EXPECT_FALSE(DateUtil::timePointValid(SystemTime())); DangerousDeprecatedTestTime test_time; @@ -812,11 +785,9 @@ TEST(DateFormatter, FromTime) { const SystemTime time1(std::chrono::seconds(1522796769)); EXPECT_EQ("2018-04-03T23:06:09.000Z", DateFormatter("%Y-%m-%dT%H:%M:%S.000Z").fromTime(time1)); EXPECT_EQ("aaa23", DateFormatter(std::string(3, 'a') + "%H").fromTime(time1)); - EXPECT_EQ("", DateFormatter(std::string(1022, 'a') + "%H").fromTime(time1)); const SystemTime time2(std::chrono::seconds(0)); EXPECT_EQ("1970-01-01T00:00:00.000Z", DateFormatter("%Y-%m-%dT%H:%M:%S.000Z").fromTime(time2)); EXPECT_EQ("aaa00", DateFormatter(std::string(3, 'a') + "%H").fromTime(time2)); - EXPECT_EQ("", DateFormatter(std::string(1022, 'a') + "%H").fromTime(time2)); } // Verify that two DateFormatter patterns with the same ??? patterns but @@ -830,4 +801,41 @@ TEST(DateFormatter, FromTimeSameWildcard) { DateFormatter("%Y-%m-%dT%H:%M:%S.000Z%1f%2f").fromTime(time1)); } +TEST(TrieLookupTable, AddItems) { + TrieLookupTable trie; + EXPECT_TRUE(trie.add("foo", "a")); + EXPECT_TRUE(trie.add("bar", "b")); + EXPECT_EQ("a", trie.find("foo")); + EXPECT_EQ("b", trie.find("bar")); + + // overwrite_existing = false + EXPECT_FALSE(trie.add("foo", "c", false)); + EXPECT_EQ("a", trie.find("foo")); + + // overwrite_existing = true + EXPECT_TRUE(trie.add("foo", "c")); + EXPECT_EQ("c", trie.find("foo")); +} + +TEST(TrieLookupTable, LongestPrefix) { + TrieLookupTable trie; + EXPECT_TRUE(trie.add("foo", "a")); + EXPECT_TRUE(trie.add("bar", "b")); + EXPECT_TRUE(trie.add("baro", "c")); + + EXPECT_EQ("a", trie.find("foo")); + EXPECT_EQ("a", trie.findLongestPrefix("foo")); + EXPECT_EQ("a", trie.findLongestPrefix("foosball")); + + EXPECT_EQ("b", trie.find("bar")); + EXPECT_EQ("b", trie.findLongestPrefix("bar")); + EXPECT_EQ("b", trie.findLongestPrefix("baritone")); + EXPECT_EQ("c", trie.findLongestPrefix("barometer")); + + EXPECT_EQ(nullptr, trie.find("toto")); + EXPECT_EQ(nullptr, trie.findLongestPrefix("toto")); + EXPECT_EQ(nullptr, trie.find(" ")); + EXPECT_EQ(nullptr, trie.findLongestPrefix(" ")); +} + } // namespace Envoy diff --git a/test/common/compressor/zlib_compressor_impl_test.cc b/test/common/compressor/zlib_compressor_impl_test.cc index c1fd72b5cd50d..2410d78fe308d 100644 --- a/test/common/compressor/zlib_compressor_impl_test.cc +++ b/test/common/compressor/zlib_compressor_impl_test.cc @@ -3,14 +3,15 @@ #include "common/common/stack_array.h" #include "common/compressor/zlib_compressor_impl.h" -#include "test/test_common/test_base.h" #include "test/test_common/utility.h" +#include "gtest/gtest.h" + namespace Envoy { namespace Compressor { namespace { -class ZlibCompressorImplTest : public TestBase { +class ZlibCompressorImplTest : public testing::Test { protected: void expectValidFlushedBuffer(const Buffer::OwnedImpl& output_buffer) { uint64_t num_comp_slices = output_buffer.getRawSlices(nullptr, 0); @@ -56,7 +57,7 @@ class ZlibCompressorImplTest : public TestBase { void expectEqualInputSize(const std::string& footer_bytes, const uint32_t input_size) { const std::string size_bytes = footer_bytes.substr(footer_bytes.size() - 8, 8); uint64_t size; - StringUtil::atoul(size_bytes.c_str(), size, 16); + StringUtil::atoull(size_bytes.c_str(), size, 16); EXPECT_EQ(TestUtility::flipOrder(size), input_size); } diff --git a/test/common/config/BUILD b/test/common/config/BUILD index 8f2ce71b92036..4f902249ef071 100644 --- a/test/common/config/BUILD +++ b/test/common/config/BUILD @@ -10,6 +10,39 @@ load( envoy_package() +envoy_cc_test( + name = "delta_subscription_impl_test", + srcs = ["delta_subscription_impl_test.cc"], + deps = [ + ":delta_subscription_test_harness", + "//source/common/config:delta_subscription_lib", + "//source/common/stats:isolated_store_lib", + "//test/mocks:common_lib", + "//test/mocks/config:config_mocks", + "//test/mocks/event:event_mocks", + "//test/mocks/grpc:grpc_mocks", + "//test/mocks/local_info:local_info_mocks", + "//test/mocks/runtime:runtime_mocks", + "//test/test_common:logging_lib", + ], +) + +envoy_cc_test( + name = "delta_subscription_state_test", + srcs = ["delta_subscription_state_test.cc"], + deps = [ + "//source/common/config:delta_subscription_lib", + "//source/common/stats:isolated_store_lib", + "//test/mocks:common_lib", + "//test/mocks/config:config_mocks", + "//test/mocks/event:event_mocks", + "//test/mocks/grpc:grpc_mocks", + "//test/mocks/local_info:local_info_mocks", + "//test/mocks/runtime:runtime_mocks", + "//test/test_common:logging_lib", + ], +) + envoy_cc_test( name = "filesystem_subscription_impl_test", srcs = ["filesystem_subscription_impl_test.cc"], @@ -61,6 +94,18 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "grpc_stream_test", + srcs = ["grpc_stream_test.cc"], + deps = [ + "//source/common/config:grpc_stream_lib", + "//test/mocks/config:config_mocks", + "//test/mocks/event:event_mocks", + "//test/mocks/grpc:grpc_mocks", + "//test/mocks/upstream:upstream_mocks", + ], +) + envoy_cc_test( name = "grpc_subscription_impl_test", srcs = ["grpc_subscription_impl_test.cc"], @@ -87,6 +132,20 @@ envoy_cc_test_library( ], ) +envoy_cc_test_library( + name = "delta_subscription_test_harness", + hdrs = ["delta_subscription_test_harness.h"], + deps = [ + ":subscription_test_harness", + "//source/common/config:delta_subscription_lib", + "//test/mocks/config:config_mocks", + "//test/mocks/event:event_mocks", + "//test/mocks/grpc:grpc_mocks", + "//test/mocks/local_info:local_info_mocks", + "//test/mocks/runtime:runtime_mocks", + ], +) + envoy_cc_test( name = "http_subscription_impl_test", srcs = ["http_subscription_impl_test.cc"], @@ -138,6 +197,7 @@ envoy_cc_test( name = "subscription_impl_test", srcs = ["subscription_impl_test.cc"], deps = [ + ":delta_subscription_test_harness", ":filesystem_subscription_test_harness", ":grpc_subscription_test_harness", ":http_subscription_test_harness", diff --git a/test/common/config/config_provider_impl_test.cc b/test/common/config/config_provider_impl_test.cc index 1d94dbf4b239f..3945dc9d79ad1 100644 --- a/test/common/config/config_provider_impl_test.cc +++ b/test/common/config/config_provider_impl_test.cc @@ -4,19 +4,23 @@ #include "common/protobuf/utility.h" #include "test/common/config/dummy_config.pb.h" +#include "test/mocks/config/mocks.h" #include "test/mocks/server/mocks.h" #include "test/test_common/simulated_time_system.h" -#include "test/test_common/test_base.h" +#include "test/test_common/utility.h" #include "gmock/gmock.h" +#include "gtest/gtest.h" namespace Envoy { namespace Config { namespace { +using testing::InSequence; + class DummyConfigProviderManager; -class StaticDummyConfigProvider : public ImmutableConfigProviderImplBase { +class StaticDummyConfigProvider : public ImmutableConfigProviderBase { public: StaticDummyConfigProvider(const test::common::config::DummyConfig& config_proto, Server::Configuration::FactoryContext& factory_context, @@ -38,9 +42,8 @@ class StaticDummyConfigProvider : public ImmutableConfigProviderImplBase { test::common::config::DummyConfig config_proto_; }; -class DummyConfigSubscription - : public ConfigSubscriptionInstanceBase, - Envoy::Config::SubscriptionCallbacks { +class DummyConfigSubscription : public ConfigSubscriptionInstance, + Envoy::Config::SubscriptionCallbacks { public: DummyConfigSubscription(const uint64_t manager_identifier, Server::Configuration::FactoryContext& factory_context, @@ -48,17 +51,23 @@ class DummyConfigSubscription ~DummyConfigSubscription() override = default; - // Envoy::Config::ConfigSubscriptionInstanceBase + // Envoy::Config::ConfigSubscriptionCommonBase void start() override {} // Envoy::Config::SubscriptionCallbacks - void onConfigUpdate(const ResourceVector& resources, const std::string& version_info) override { - const auto& config = resources[0]; - if (checkAndApplyConfig(config, "dummy_config", version_info)) { + // TODO(fredlas) deduplicate + void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + const std::string& version_info) override { + auto config = MessageUtil::anyConvert(resources[0]); + if (checkAndApplyConfigUpdate(config, "dummy_config", version_info)) { config_proto_ = config; } - ConfigSubscriptionInstanceBase::onConfigUpdate(); + ConfigSubscriptionCommonBase::onConfigUpdate(); + } + void onConfigUpdate(const Protobuf::RepeatedPtrField&, + const Protobuf::RepeatedPtrField&, const std::string&) override { + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } // Envoy::Config::SubscriptionCallbacks @@ -74,7 +83,6 @@ class DummyConfigSubscription private: absl::optional config_proto_; }; - using DummyConfigSubscriptionSharedPtr = std::shared_ptr; class DummyConfig : public ConfigProvider::Config { @@ -82,14 +90,14 @@ class DummyConfig : public ConfigProvider::Config { DummyConfig(const test::common::config::DummyConfig&) {} }; -class DummyDynamicConfigProvider : public MutableConfigProviderImplBase { +class DummyDynamicConfigProvider : public MutableConfigProviderBase { public: DummyDynamicConfigProvider(DummyConfigSubscriptionSharedPtr&& subscription, - ConfigConstSharedPtr initial_config, + const ConfigConstSharedPtr& initial_config, Server::Configuration::FactoryContext& factory_context) - : MutableConfigProviderImplBase(std::move(subscription), factory_context), + : MutableConfigProviderBase(std::move(subscription), factory_context, ApiType::Full), subscription_(static_cast( - MutableConfigProviderImplBase::subscription().get())) { + MutableConfigProviderCommonBase::subscription_.get())) { initialize(initial_config); } @@ -97,7 +105,7 @@ class DummyDynamicConfigProvider : public MutableConfigProviderImplBase { DummyConfigSubscription& subscription() { return *subscription_; } - // Envoy::Config::MutableConfigProviderImplBase + // Envoy::Config::MutableConfigProviderBase ConfigProvider::ConfigConstSharedPtr onConfigProtoUpdate(const Protobuf::Message& config) override { return std::make_shared( @@ -111,8 +119,6 @@ class DummyDynamicConfigProvider : public MutableConfigProviderImplBase { } return &subscription_->config_proto().value(); } - - // Envoy::Config::ConfigProvider std::string getConfigVersion() const override { return ""; } private: @@ -157,22 +163,24 @@ class DummyConfigProviderManager : public ConfigProviderManagerImplBase { } // Envoy::Config::ConfigProviderManager - ConfigProviderPtr createXdsConfigProvider(const Protobuf::Message& config_source_proto, - Server::Configuration::FactoryContext& factory_context, - const std::string&) override { + ConfigProviderPtr + createXdsConfigProvider(const Protobuf::Message& config_source_proto, + Server::Configuration::FactoryContext& factory_context, + const std::string&, + const Envoy::Config::ConfigProviderManager::OptionalArg&) override { DummyConfigSubscriptionSharedPtr subscription = getSubscription( config_source_proto, factory_context.initManager(), [&factory_context](const uint64_t manager_identifier, ConfigProviderManagerImplBase& config_provider_manager) - -> ConfigSubscriptionInstanceBaseSharedPtr { + -> ConfigSubscriptionCommonBaseSharedPtr { return std::make_shared( manager_identifier, factory_context, static_cast(config_provider_manager)); }); ConfigProvider::ConfigConstSharedPtr initial_config; - const MutableConfigProviderImplBase* provider = - subscription->getAnyBoundMutableConfigProvider(); + const auto* provider = static_cast( + subscription->getAnyBoundMutableConfigProvider()); if (provider) { initial_config = provider->getConfig(); } @@ -183,31 +191,38 @@ class DummyConfigProviderManager : public ConfigProviderManagerImplBase { // Envoy::Config::ConfigProviderManager ConfigProviderPtr createStaticConfigProvider(const Protobuf::Message& config_proto, - Server::Configuration::FactoryContext& factory_context) override { + Server::Configuration::FactoryContext& factory_context, + const Envoy::Config::ConfigProviderManager::OptionalArg&) override { return std::make_unique( dynamic_cast(config_proto), factory_context, *this); } + ConfigProviderPtr + createStaticConfigProvider(std::vector>&&, + Server::Configuration::FactoryContext&, const OptionalArg&) override { + ASSERT(false, "this provider does not expect multiple config protos"); + return nullptr; + } }; StaticDummyConfigProvider::StaticDummyConfigProvider( const test::common::config::DummyConfig& config_proto, Server::Configuration::FactoryContext& factory_context, DummyConfigProviderManager& config_provider_manager) - : ImmutableConfigProviderImplBase(factory_context, config_provider_manager, - ConfigProviderInstanceType::Static), + : ImmutableConfigProviderBase(factory_context, config_provider_manager, + ConfigProviderInstanceType::Static, ApiType::Full), config_(std::make_shared(config_proto)), config_proto_(config_proto) {} DummyConfigSubscription::DummyConfigSubscription( const uint64_t manager_identifier, Server::Configuration::FactoryContext& factory_context, DummyConfigProviderManager& config_provider_manager) - : ConfigSubscriptionInstanceBase( + : ConfigSubscriptionInstance( "DummyDS", manager_identifier, config_provider_manager, factory_context.timeSource(), factory_context.timeSource().systemTime(), factory_context.localInfo()) {} -class ConfigProviderImplTest : public TestBase { +class ConfigProviderImplTest : public testing::Test { public: - ConfigProviderImplTest() { + void initialize() { EXPECT_CALL(factory_context_.admin_.config_tracker_, add_("dummy", _)); provider_manager_ = std::make_unique(factory_context_.admin_); } @@ -230,27 +245,31 @@ test::common::config::DummyConfig parseDummyConfigFromYaml(const std::string& ya // subscriptions, config protos and data structures generated as a result of the // configurations (i.e., the ConfigProvider::Config). TEST_F(ConfigProviderImplTest, SharedOwnership) { - factory_context_.init_manager_.initialize(); + initialize(); + Init::ExpectableWatcherImpl watcher; + factory_context_.init_manager_.initialize(watcher); envoy::api::v2::core::ApiConfigSource config_source_proto; config_source_proto.set_api_type(envoy::api::v2::core::ApiConfigSource::GRPC); ConfigProviderPtr provider1 = provider_manager_->createXdsConfigProvider( - config_source_proto, factory_context_, "dummy_prefix"); + config_source_proto, factory_context_, "dummy_prefix", + ConfigProviderManager::NullOptionalArg()); // No config protos have been received via the subscription yet. EXPECT_FALSE(provider1->configProtoInfo().has_value()); - Protobuf::RepeatedPtrField dummy_configs; - dummy_configs.Add()->MergeFrom(parseDummyConfigFromYaml("a: a dummy config")); + Protobuf::RepeatedPtrField untyped_dummy_configs; + untyped_dummy_configs.Add()->PackFrom(parseDummyConfigFromYaml("a: a dummy config")); DummyConfigSubscription& subscription = dynamic_cast(*provider1).subscription(); - subscription.onConfigUpdate(dummy_configs, "1"); + subscription.onConfigUpdate(untyped_dummy_configs, "1"); // Check that a newly created provider with the same config source will share // the subscription, config proto and resulting ConfigProvider::Config. ConfigProviderPtr provider2 = provider_manager_->createXdsConfigProvider( - config_source_proto, factory_context_, "dummy_prefix"); + config_source_proto, factory_context_, "dummy_prefix", + ConfigProviderManager::NullOptionalArg()); EXPECT_TRUE(provider2->configProtoInfo().has_value()); EXPECT_EQ(&dynamic_cast(*provider1).subscription(), @@ -263,7 +282,8 @@ TEST_F(ConfigProviderImplTest, SharedOwnership) { // Change the config source and verify that a new subscription is used. config_source_proto.set_api_type(envoy::api::v2::core::ApiConfigSource::REST); ConfigProviderPtr provider3 = provider_manager_->createXdsConfigProvider( - config_source_proto, factory_context_, "dummy_prefix"); + config_source_proto, factory_context_, "dummy_prefix", + ConfigProviderManager::NullOptionalArg()); EXPECT_NE(&dynamic_cast(*provider1).subscription(), &dynamic_cast(*provider3).subscription()); @@ -272,7 +292,7 @@ TEST_F(ConfigProviderImplTest, SharedOwnership) { dynamic_cast(*provider3) .subscription() - .onConfigUpdate(dummy_configs, "provider3"); + .onConfigUpdate(untyped_dummy_configs, "provider3"); EXPECT_EQ(2UL, static_cast( provider_manager_->dumpConfigs().get()) @@ -299,10 +319,61 @@ TEST_F(ConfigProviderImplTest, SharedOwnership) { .size()); } +// A ConfigProviderManager that returns a mock ConfigProvider. +class DummyConfigProviderManagerMockConfigProvider : public DummyConfigProviderManager { +public: + DummyConfigProviderManagerMockConfigProvider(Server::Admin& admin) + : DummyConfigProviderManager(admin) {} + + ConfigProviderPtr + createXdsConfigProvider(const Protobuf::Message& config_source_proto, + Server::Configuration::FactoryContext& factory_context, + const std::string&, + const Envoy::Config::ConfigProviderManager::OptionalArg&) override { + DummyConfigSubscriptionSharedPtr subscription = getSubscription( + config_source_proto, factory_context.initManager(), + [&factory_context](const uint64_t manager_identifier, + ConfigProviderManagerImplBase& config_provider_manager) + -> ConfigSubscriptionCommonBaseSharedPtr { + return std::make_shared( + manager_identifier, factory_context, + static_cast(config_provider_manager)); + }); + return std::make_unique(std::move(subscription), nullptr, + factory_context); + } +}; + +// Test that duplicate config updates will not trigger creation of a new ConfigProvider::Config. +TEST_F(ConfigProviderImplTest, DuplicateConfigProto) { + InSequence sequence; + // This provider manager returns a MockMutableConfigProviderBase. + auto provider_manager = + std::make_unique(factory_context_.admin_); + envoy::api::v2::core::ApiConfigSource config_source_proto; + config_source_proto.set_api_type(envoy::api::v2::core::ApiConfigSource::GRPC); + ConfigProviderPtr provider = provider_manager->createXdsConfigProvider( + config_source_proto, factory_context_, "dummy_prefix", + ConfigProviderManager::NullOptionalArg()); + auto* typed_provider = static_cast(provider.get()); + DummyConfigSubscription& subscription = + static_cast(typed_provider->subscription()); + // First time issuing a configUpdate(). A new ConfigProvider::Config should be created. + EXPECT_CALL(*typed_provider, onConfigProtoUpdate(_)).Times(1); + Protobuf::RepeatedPtrField untyped_dummy_configs; + untyped_dummy_configs.Add()->PackFrom(parseDummyConfigFromYaml("a: a dynamic dummy config")); + subscription.onConfigUpdate(untyped_dummy_configs, "1"); + // Second time issuing the configUpdate(), this time with a duplicate proto. A new + // ConfigProvider::Config _should not_ be created. + EXPECT_CALL(*typed_provider, onConfigProtoUpdate(_)).Times(0); + subscription.onConfigUpdate(untyped_dummy_configs, "1"); +} + // Tests that the base ConfigProvider*s are handling registration with the // /config_dump admin handler as well as generic bookkeeping such as timestamp // updates. TEST_F(ConfigProviderImplTest, ConfigDump) { + initialize(); // Empty dump first. auto message_ptr = factory_context_.admin_.config_tracker_.config_tracker_callbacks_["dummy"](); const auto& dummy_config_dump = @@ -321,7 +392,8 @@ TEST_F(ConfigProviderImplTest, ConfigDump) { timeSystem().setSystemTime(std::chrono::milliseconds(1234567891234)); ConfigProviderPtr static_config = provider_manager_->createStaticConfigProvider( - parseDummyConfigFromYaml(config_yaml), factory_context_); + parseDummyConfigFromYaml(config_yaml), factory_context_, + ConfigProviderManager::NullOptionalArg()); message_ptr = factory_context_.admin_.config_tracker_.config_tracker_callbacks_["dummy"](); const auto& dummy_config_dump2 = static_cast(*message_ptr); @@ -337,16 +409,17 @@ TEST_F(ConfigProviderImplTest, ConfigDump) { envoy::api::v2::core::ApiConfigSource config_source_proto; config_source_proto.set_api_type(envoy::api::v2::core::ApiConfigSource::GRPC); ConfigProviderPtr dynamic_provider = provider_manager_->createXdsConfigProvider( - config_source_proto, factory_context_, "dummy_prefix"); + config_source_proto, factory_context_, "dummy_prefix", + ConfigProviderManager::NullOptionalArg()); // Static + dynamic config dump. - Protobuf::RepeatedPtrField dummy_configs; - dummy_configs.Add()->MergeFrom(parseDummyConfigFromYaml("a: a dynamic dummy config")); + Protobuf::RepeatedPtrField untyped_dummy_configs; + untyped_dummy_configs.Add()->PackFrom(parseDummyConfigFromYaml("a: a dynamic dummy config")); timeSystem().setSystemTime(std::chrono::milliseconds(1234567891567)); DummyConfigSubscription& subscription = dynamic_cast(*dynamic_provider).subscription(); - subscription.onConfigUpdate(dummy_configs, "v1"); + subscription.onConfigUpdate(untyped_dummy_configs, "v1"); message_ptr = factory_context_.admin_.config_tracker_.config_tracker_callbacks_["dummy"](); const auto& dummy_config_dump3 = @@ -362,12 +435,33 @@ TEST_F(ConfigProviderImplTest, ConfigDump) { )EOF", expected_config_dump); EXPECT_EQ(expected_config_dump.DebugString(), dummy_config_dump3.DebugString()); + + ConfigProviderPtr static_config2 = provider_manager_->createStaticConfigProvider( + parseDummyConfigFromYaml("a: another static dummy config"), factory_context_, + ConfigProviderManager::NullOptionalArg()); + message_ptr = factory_context_.admin_.config_tracker_.config_tracker_callbacks_["dummy"](); + const auto& dummy_config_dump4 = + static_cast(*message_ptr); + MessageUtil::loadFromYaml(R"EOF( +static_dummy_configs: + - dummy_config: { a: another static dummy config } + last_updated: { seconds: 1234567891, nanos: 567000000 } + - dummy_config: { a: a static dummy config } + last_updated: { seconds: 1234567891, nanos: 234000000 } +dynamic_dummy_configs: + - version_info: v1 + dummy_config: { a: a dynamic dummy config } + last_updated: { seconds: 1234567891, nanos: 567000000 } +)EOF", + expected_config_dump); + EXPECT_THAT(expected_config_dump, ProtoEqIgnoreRepeatedFieldOrdering(dummy_config_dump4)); } // Tests that dynamic config providers enforce that the context's localInfo is // set, since it is used to obtain the node/cluster attributes required for // subscriptions. TEST_F(ConfigProviderImplTest, LocalInfoNotDefined) { + initialize(); factory_context_.local_info_.node_.set_cluster(""); factory_context_.local_info_.node_.set_id(""); @@ -375,12 +469,274 @@ TEST_F(ConfigProviderImplTest, LocalInfoNotDefined) { config_source_proto.set_api_type(envoy::api::v2::core::ApiConfigSource::GRPC); EXPECT_THROW_WITH_MESSAGE( provider_manager_->createXdsConfigProvider(config_source_proto, factory_context_, - "dummy_prefix"), + "dummy_prefix", + ConfigProviderManager::NullOptionalArg()), EnvoyException, "DummyDS: node 'id' and 'cluster' are required. Set it either in 'node' config or " "via --service-node and --service-cluster options."); } +class DeltaDummyConfigProviderManager; + +class DeltaDummyConfigSubscription : public DeltaConfigSubscriptionInstance, + Envoy::Config::SubscriptionCallbacks { +public: + using ProtoMap = std::map; + + DeltaDummyConfigSubscription(const uint64_t manager_identifier, + Server::Configuration::FactoryContext& factory_context, + DeltaDummyConfigProviderManager& config_provider_manager); + + // Envoy::Config::ConfigSubscriptionCommonBase + void start() override {} + + // Envoy::Config::SubscriptionCallbacks + void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + const std::string& version_info) override; + void onConfigUpdate(const Protobuf::RepeatedPtrField&, + const Protobuf::RepeatedPtrField&, const std::string&) override { + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } + void onConfigUpdateFailed(const EnvoyException*) override { + ConfigSubscriptionCommonBase::onConfigUpdateFailed(); + } + std::string resourceName(const ProtobufWkt::Any&) override { + return "test.common.config.DummyConfig"; + } + + const ProtoMap& protoMap() const { return proto_map_; } + +private: + ProtoMap proto_map_; +}; +using DeltaDummyConfigSubscriptionSharedPtr = std::shared_ptr; + +class ThreadLocalDummyConfig : public ThreadLocal::ThreadLocalObject, + public Envoy::Config::ConfigProvider::Config { +public: + void addProto(const test::common::config::DummyConfig& config_proto) { + protos_.push_back(config_proto); + } + + uint32_t numProtos() const { return protos_.size(); } + +private: + std::vector protos_; +}; + +class DeltaDummyDynamicConfigProvider : public Envoy::Config::DeltaMutableConfigProviderBase { +public: + DeltaDummyDynamicConfigProvider(DeltaDummyConfigSubscriptionSharedPtr&& subscription, + Server::Configuration::FactoryContext& factory_context, + std::shared_ptr dummy_config) + : DeltaMutableConfigProviderBase(std::move(subscription), factory_context, + ConfigProvider::ApiType::Delta), + subscription_(static_cast( + MutableConfigProviderCommonBase::subscription_.get())) { + initialize([&dummy_config](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr { + return (dummy_config != nullptr) ? dummy_config : std::make_shared(); + }); + } + + DeltaDummyConfigSubscription& subscription() { return *subscription_; } + + // Envoy::Config::ConfigProvider + ConfigProtoVector getConfigProtos() const override { + ConfigProtoVector proto_vector; + for (const auto& value_type : subscription_->protoMap()) { + proto_vector.push_back(&value_type.second); + } + return proto_vector; + } + std::string getConfigVersion() const override { + return (subscription_->configInfo().has_value()) + ? subscription_->configInfo().value().last_config_version_ + : ""; + } + ConfigConstSharedPtr getConfig() const override { + return std::dynamic_pointer_cast(tls_->get()); + } + + // Envoy::Config::DeltaMutableConfigProviderBase + ConfigSharedPtr getConfig() override { + return std::dynamic_pointer_cast(tls_->get()); + } + + std::shared_ptr getThreadLocalDummyConfig() { + return std::dynamic_pointer_cast(tls_->get()); + } + +private: + DeltaDummyConfigSubscription* subscription_; +}; + +class DeltaDummyConfigProviderManager : public ConfigProviderManagerImplBase { +public: + DeltaDummyConfigProviderManager(Server::Admin& admin) + : ConfigProviderManagerImplBase(admin, "dummy") {} + + // Envoy::Config::ConfigProviderManagerImplBase + ProtobufTypes::MessagePtr dumpConfigs() const override { + auto config_dump = std::make_unique(); + for (const auto& element : configSubscriptions()) { + auto subscription = element.second.lock(); + ASSERT(subscription); + + if (subscription->configInfo()) { + auto* dynamic_config = config_dump->mutable_dynamic_dummy_configs()->Add(); + dynamic_config->set_version_info(subscription->configInfo().value().last_config_version_); + const auto* typed_subscription = + static_cast(subscription.get()); + const DeltaDummyConfigSubscription::ProtoMap& proto_map = typed_subscription->protoMap(); + for (const auto& value_type : proto_map) { + dynamic_config->mutable_dummy_configs()->Add()->MergeFrom(value_type.second); + } + TimestampUtil::systemClockToTimestamp(subscription->lastUpdated(), + *dynamic_config->mutable_last_updated()); + } + } + + return config_dump; + } + + // Envoy::Config::ConfigProviderManager + ConfigProviderPtr + createXdsConfigProvider(const Protobuf::Message& config_source_proto, + Server::Configuration::FactoryContext& factory_context, + const std::string&, + const Envoy::Config::ConfigProviderManager::OptionalArg&) override { + DeltaDummyConfigSubscriptionSharedPtr subscription = + getSubscription( + config_source_proto, factory_context.initManager(), + [&factory_context](const uint64_t manager_identifier, + ConfigProviderManagerImplBase& config_provider_manager) + -> ConfigSubscriptionCommonBaseSharedPtr { + return std::make_shared( + manager_identifier, factory_context, + static_cast(config_provider_manager)); + }); + + auto* existing_provider = static_cast( + subscription->getAnyBoundMutableConfigProvider()); + return std::make_unique( + std::move(subscription), factory_context, + (existing_provider != nullptr) ? existing_provider->getThreadLocalDummyConfig() : nullptr); + } +}; + +DeltaDummyConfigSubscription::DeltaDummyConfigSubscription( + const uint64_t manager_identifier, Server::Configuration::FactoryContext& factory_context, + DeltaDummyConfigProviderManager& config_provider_manager) + : DeltaConfigSubscriptionInstance( + "Dummy", manager_identifier, config_provider_manager, factory_context.timeSource(), + factory_context.timeSource().systemTime(), factory_context.localInfo()) {} + +void DeltaDummyConfigSubscription::onConfigUpdate( + const Protobuf::RepeatedPtrField& resources, + const std::string& version_info) { + if (resources.empty()) { + return; + } + + // For simplicity, there is no logic here to track updates and/or removals to the existing config + // proto set (i.e., this is append only). Real xDS APIs will need to track additions, updates and + // removals to the config set and apply the diffs to the underlying config implementations. + for (const auto& resource_any : resources) { + auto dummy_config = MessageUtil::anyConvert(resource_any); + proto_map_[version_info] = dummy_config; + // Propagate the new config proto to all worker threads. + applyDeltaConfigUpdate([&dummy_config](const ConfigSharedPtr& config) { + auto* thread_local_dummy_config = static_cast(config.get()); + // Per above, append only for now. + thread_local_dummy_config->addProto(dummy_config); + }); + } + + ConfigSubscriptionCommonBase::onConfigUpdate(); + setLastConfigInfo(absl::optional({absl::nullopt, version_info})); +} + +class DeltaConfigProviderImplTest : public testing::Test { +public: + DeltaConfigProviderImplTest() { + EXPECT_CALL(factory_context_.admin_.config_tracker_, add_("dummy", _)); + provider_manager_ = std::make_unique(factory_context_.admin_); + } + + Event::SimulatedTimeSystem& timeSystem() { return time_system_; } + +protected: + Event::SimulatedTimeSystem time_system_; + NiceMock factory_context_; + std::unique_ptr provider_manager_; +}; + +// Validate that delta config subscriptions are shared across delta dynamic config providers and +// that the underlying Config implementation can be shared as well. +TEST_F(DeltaConfigProviderImplTest, MultipleDeltaSubscriptions) { + envoy::api::v2::core::ApiConfigSource config_source_proto; + config_source_proto.set_api_type(envoy::api::v2::core::ApiConfigSource::GRPC); + ConfigProviderPtr provider1 = provider_manager_->createXdsConfigProvider( + config_source_proto, factory_context_, "dummy_prefix", + ConfigProviderManager::NullOptionalArg()); + + // No config protos have been received via the subscription yet. + EXPECT_FALSE(provider1->configProtoInfoVector().has_value()); + + Protobuf::RepeatedPtrField untyped_dummy_configs; + untyped_dummy_configs.Add()->PackFrom(parseDummyConfigFromYaml("a: a dummy config")); + untyped_dummy_configs.Add()->PackFrom(parseDummyConfigFromYaml("a: another dummy config")); + + DeltaDummyConfigSubscription& subscription = + dynamic_cast(*provider1).subscription(); + subscription.onConfigUpdate(untyped_dummy_configs, "1"); + + ConfigProviderPtr provider2 = provider_manager_->createXdsConfigProvider( + config_source_proto, factory_context_, "dummy_prefix", + ConfigProviderManager::NullOptionalArg()); + + // Providers, config implementations (i.e., the ThreadLocalDummyConfig) and config protos are + // expected to be shared for a given subscription. + EXPECT_EQ(&dynamic_cast(*provider1).subscription(), + &dynamic_cast(*provider2).subscription()); + ASSERT_TRUE(provider2->configProtoInfoVector().has_value()); + EXPECT_EQ( + provider1->configProtoInfoVector().value().config_protos_, + provider2->configProtoInfoVector().value().config_protos_); + EXPECT_EQ(provider1->config().get(), + provider2->config().get()); + // Validate that the config protos are propagated to the thread local config implementation. + EXPECT_EQ(provider1->config()->numProtos(), 2); + + // Issue a second config update to validate that having multiple providers bound to the + // subscription causes a single update to the underlying shared config implementation. + subscription.onConfigUpdate(untyped_dummy_configs, "2"); + // NOTE: the config implementation is append only and _does not_ track updates/removals to the + // config proto set, so the expectation is to double the size of the set. + EXPECT_EQ(provider1->config()->numProtos(), 4); + EXPECT_EQ(provider1->configProtoInfoVector().value().version_, + "2"); +} + +// Tests a config update failure. +TEST_F(DeltaConfigProviderImplTest, DeltaSubscriptionFailure) { + envoy::api::v2::core::ApiConfigSource config_source_proto; + config_source_proto.set_api_type(envoy::api::v2::core::ApiConfigSource::GRPC); + ConfigProviderPtr provider = provider_manager_->createXdsConfigProvider( + config_source_proto, factory_context_, "dummy_prefix", + ConfigProviderManager::NullOptionalArg()); + DeltaDummyConfigSubscription& subscription = + dynamic_cast(*provider).subscription(); + const auto time = std::chrono::milliseconds(1234567891234); + timeSystem().setSystemTime(time); + const EnvoyException ex(fmt::format("config failure")); + // Verify the failure updates the lastUpdated() timestamp. + subscription.onConfigUpdateFailed(&ex); + EXPECT_EQ(std::chrono::time_point_cast(provider->lastUpdated()) + .time_since_epoch(), + time); +} + } // namespace } // namespace Config } // namespace Envoy diff --git a/test/common/config/delta_subscription_impl_test.cc b/test/common/config/delta_subscription_impl_test.cc new file mode 100644 index 0000000000000..2f8577a393c19 --- /dev/null +++ b/test/common/config/delta_subscription_impl_test.cc @@ -0,0 +1,117 @@ +#include "test/common/config/delta_subscription_test_harness.h" + +namespace Envoy { +namespace Config { +namespace { + +class DeltaSubscriptionImplTest : public DeltaSubscriptionTestHarness, public testing::Test { +protected: + DeltaSubscriptionImplTest() : DeltaSubscriptionTestHarness() {} +}; + +TEST_F(DeltaSubscriptionImplTest, UpdateResourcesCausesRequest) { + startSubscription({"name1", "name2", "name3"}); + expectSendMessage({"name4"}, {"name1", "name2"}, Grpc::Status::GrpcStatus::Ok, "", {}); + subscription_->updateResources({"name3", "name4"}); + expectSendMessage({"name1", "name2"}, {}, Grpc::Status::GrpcStatus::Ok, "", {}); + subscription_->updateResources({"name1", "name2", "name3", "name4"}); + expectSendMessage({}, {"name1", "name2"}, Grpc::Status::GrpcStatus::Ok, "", {}); + subscription_->updateResources({"name3", "name4"}); + expectSendMessage({"name1", "name2"}, {}, Grpc::Status::GrpcStatus::Ok, "", {}); + subscription_->updateResources({"name1", "name2", "name3", "name4"}); + expectSendMessage({}, {"name1", "name2", "name3"}, Grpc::Status::GrpcStatus::Ok, "", {}); + subscription_->updateResources({"name4"}); +} + +// Checks that after a pause(), no requests are sent until resume(). +// Also demonstrates the collapsing of subscription interest updates into a single +// request. (This collapsing happens any time multiple updates arrive before a request +// can be sent, not just with pausing: rate limiting or a down gRPC stream would also do it). +TEST_F(DeltaSubscriptionImplTest, PauseHoldsRequest) { + startSubscription({"name1", "name2", "name3"}); + subscription_->pause(); + + expectSendMessage({"name4"}, {"name1", "name2"}, Grpc::Status::GrpcStatus::Ok, "", {}); + // If not for the pause, these updates would make the expectSendMessage fail due to too many + // messages being sent. + subscription_->updateResources({"name3", "name4"}); + subscription_->updateResources({"name1", "name2", "name3", "name4"}); + subscription_->updateResources({"name3", "name4"}); + subscription_->updateResources({"name1", "name2", "name3", "name4"}); + subscription_->updateResources({"name3", "name4"}); + + subscription_->resume(); +} + +TEST_F(DeltaSubscriptionImplTest, ResponseCausesAck) { + startSubscription({"name1"}); + deliverConfigUpdate({"name1"}, "someversion", true); +} + +// Checks that after a pause(), no ACK requests are sent until resume(), but that after the +// resume, *all* ACKs that arrived during the pause are sent (in order). +TEST_F(DeltaSubscriptionImplTest, PauseQueuesAcks) { + startSubscription({"name1", "name2", "name3"}); + subscription_->pause(); + // The server gives us our first version of resource name1. + // subscription_ now wants to ACK name1 (but can't due to pause). + { + auto message = std::make_unique(); + auto* resource = message->mutable_resources()->Add(); + resource->set_name("name1"); + resource->set_version("version1A"); + const std::string nonce = std::to_string(HashUtil::xxHash64("version1A")); + message->set_nonce(nonce); + nonce_acks_required_.push(nonce); + subscription_->onDiscoveryResponse(std::move(message)); + } + // The server gives us our first version of resource name2. + // subscription_ now wants to ACK name1 and then name2 (but can't due to pause). + { + auto message = std::make_unique(); + auto* resource = message->mutable_resources()->Add(); + resource->set_name("name2"); + resource->set_version("version2A"); + const std::string nonce = std::to_string(HashUtil::xxHash64("version2A")); + message->set_nonce(nonce); + nonce_acks_required_.push(nonce); + subscription_->onDiscoveryResponse(std::move(message)); + } + // The server gives us an updated version of resource name1. + // subscription_ now wants to ACK name1A, then name2, then name1B (but can't due to pause). + { + auto message = std::make_unique(); + auto* resource = message->mutable_resources()->Add(); + resource->set_name("name1"); + resource->set_version("version1B"); + const std::string nonce = std::to_string(HashUtil::xxHash64("version1B")); + message->set_nonce(nonce); + nonce_acks_required_.push(nonce); + subscription_->onDiscoveryResponse(std::move(message)); + } + // All ACK sendMessage()s will happen upon calling resume(). + EXPECT_CALL(async_stream_, sendMessage(_, _)) + .WillRepeatedly([this](const Protobuf::Message& message, bool) { + const std::string nonce = + static_cast(message).response_nonce(); + if (!nonce.empty()) { + nonce_acks_sent_.push(nonce); + } + }); + subscription_->resume(); + // DeltaSubscriptionTestHarness's dtor will check that all ACKs were sent with the correct nonces, + // in the correct order. +} + +TEST_F(DeltaSubscriptionImplTest, NoGrpcStream) { + // Have to call start() to get state_ populated (which this test needs to not segfault), but + // start() also tries to start the GrpcStream. So, have that attempt return nullptr. + EXPECT_CALL(*async_client_, start(_, _)).WillOnce(Return(nullptr)); + EXPECT_CALL(async_stream_, sendMessage(_, _)).Times(0); + subscription_->start({"name1"}, callbacks_); + subscription_->updateResources({"name1", "name2"}); +} + +} // namespace +} // namespace Config +} // namespace Envoy diff --git a/test/common/config/delta_subscription_state_test.cc b/test/common/config/delta_subscription_state_test.cc new file mode 100644 index 0000000000000..eb15fdf775153 --- /dev/null +++ b/test/common/config/delta_subscription_state_test.cc @@ -0,0 +1,347 @@ +#include "common/config/delta_subscription_state.h" +#include "common/config/utility.h" +#include "common/stats/isolated_store_impl.h" + +#include "test/mocks/config/mocks.h" +#include "test/mocks/event/mocks.h" +#include "test/mocks/local_info/mocks.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::NiceMock; +using testing::Throw; +using testing::UnorderedElementsAre; + +namespace Envoy { +namespace Config { +namespace { + +const char TypeUrl[] = "type.googleapis.com/envoy.api.v2.Cluster"; + +class DeltaSubscriptionStateTest : public testing::Test { +protected: + DeltaSubscriptionStateTest() + : stats_(Utility::generateStats(store_)), + state_(TypeUrl, {"name1", "name2", "name3"}, callbacks_, local_info_, + std::chrono::milliseconds(0U), dispatcher_, stats_) { + envoy::api::v2::DeltaDiscoveryRequest cur_request = state_.getNextRequest(); + EXPECT_THAT(cur_request.resource_names_subscribe(), + UnorderedElementsAre("name1", "name2", "name3")); + } + + UpdateAck deliverDiscoveryResponse( + const Protobuf::RepeatedPtrField& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& version_info, absl::optional nonce = absl::nullopt) { + envoy::api::v2::DeltaDiscoveryResponse message; + *message.mutable_resources() = added_resources; + *message.mutable_removed_resources() = removed_resources; + message.set_system_version_info(version_info); + if (nonce.has_value()) { + message.set_nonce(nonce.value()); + } + EXPECT_CALL(callbacks_, onConfigUpdate(_, _, _)).Times(1); + return state_.handleResponse(message); + } + + UpdateAck deliverBadDiscoveryResponse( + const Protobuf::RepeatedPtrField& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& version_info, std::string nonce) { + envoy::api::v2::DeltaDiscoveryResponse message; + *message.mutable_resources() = added_resources; + *message.mutable_removed_resources() = removed_resources; + message.set_system_version_info(version_info); + message.set_nonce(nonce); + EXPECT_CALL(callbacks_, onConfigUpdate(_, _, _)).WillOnce(Throw(EnvoyException("oh no"))); + return state_.handleResponse(message); + } + + NiceMock> callbacks_; + NiceMock local_info_; + NiceMock dispatcher_; + Stats::IsolatedStoreImpl store_; + SubscriptionStats stats_; + // We start out interested in three resources: name1, name2, and name3. + DeltaSubscriptionState state_; +}; + +Protobuf::RepeatedPtrField +populateRepeatedResource(std::vector> items) { + Protobuf::RepeatedPtrField add_to; + for (const auto& item : items) { + auto* resource = add_to.Add(); + resource->set_name(item.first); + resource->set_version(item.second); + } + return add_to; +} + +// Basic gaining/losing interest in resources should lead to (un)subscriptions. +TEST_F(DeltaSubscriptionStateTest, SubscribeAndUnsubscribe) { + { + state_.updateResourceInterest({"name2", "name3", "name4"}); // drop name1, add name4 + envoy::api::v2::DeltaDiscoveryRequest cur_request = state_.getNextRequest(); + EXPECT_THAT(cur_request.resource_names_subscribe(), UnorderedElementsAre("name4")); + EXPECT_THAT(cur_request.resource_names_unsubscribe(), UnorderedElementsAre("name1")); + } + { + state_.updateResourceInterest({"name1", "name2"}); // add back name1, drop name3 and 4 + envoy::api::v2::DeltaDiscoveryRequest cur_request = state_.getNextRequest(); + EXPECT_THAT(cur_request.resource_names_subscribe(), UnorderedElementsAre("name1")); + EXPECT_THAT(cur_request.resource_names_unsubscribe(), UnorderedElementsAre("name3", "name4")); + } +} + +// Delta xDS reliably queues up and sends all discovery requests, even in situations where it isn't +// strictly necessary. E.g.: if you subscribe but then unsubscribe to a given resource, all before a +// request was able to be sent, two requests will be sent. The following tests demonstrate this. +// +// If Envoy decided it wasn't interested in a resource and then (before a request was sent) decided +// it was again, for all we know, it dropped that resource in between and needs to retrieve it +// again. So, we *should* send a request "re-"subscribing. This means that the server needs to +// interpret the resource_names_subscribe field as "send these resources even if you think Envoy +// already has them". +TEST_F(DeltaSubscriptionStateTest, RemoveThenAdd) { + state_.updateResourceInterest({"name1", "name2"}); + state_.updateResourceInterest({"name1", "name2", "name3"}); + envoy::api::v2::DeltaDiscoveryRequest cur_request = state_.getNextRequest(); + EXPECT_THAT(cur_request.resource_names_subscribe(), UnorderedElementsAre("name3")); + EXPECT_TRUE(cur_request.resource_names_unsubscribe().empty()); +} + +// Due to how our implementation provides the required behavior tested in RemoveThenAdd, the +// add-then-remove case *also* causes the resource to be referred to in the request (as an +// unsubscribe). +// Unlike the remove-then-add case, this one really is unnecessary, and ideally we would have +// the request simply not include any mention of the resource. Oh well. +// This test is just here to illustrate that this behavior exists, not to enforce that it +// should be like this. What *is* important: the server must happily and cleanly ignore +// "unsubscribe from [resource name I have never before referred to]" requests. +TEST_F(DeltaSubscriptionStateTest, AddThenRemove) { + state_.updateResourceInterest({"name1", "name2", "name3", "name4"}); + state_.updateResourceInterest({"name1", "name2", "name3"}); + envoy::api::v2::DeltaDiscoveryRequest cur_request = state_.getNextRequest(); + EXPECT_TRUE(cur_request.resource_names_subscribe().empty()); + EXPECT_THAT(cur_request.resource_names_unsubscribe(), UnorderedElementsAre("name4")); +} + +// add/remove/add == add. +TEST_F(DeltaSubscriptionStateTest, AddRemoveAdd) { + state_.updateResourceInterest({"name1", "name2", "name3", "name4"}); + state_.updateResourceInterest({"name1", "name2", "name3"}); + state_.updateResourceInterest({"name1", "name2", "name3", "name4"}); + envoy::api::v2::DeltaDiscoveryRequest cur_request = state_.getNextRequest(); + EXPECT_THAT(cur_request.resource_names_subscribe(), UnorderedElementsAre("name4")); + EXPECT_TRUE(cur_request.resource_names_unsubscribe().empty()); +} + +// remove/add/remove == remove. +TEST_F(DeltaSubscriptionStateTest, RemoveAddRemove) { + state_.updateResourceInterest({"name1", "name2"}); + state_.updateResourceInterest({"name1", "name2", "name3"}); + state_.updateResourceInterest({"name1", "name2"}); + envoy::api::v2::DeltaDiscoveryRequest cur_request = state_.getNextRequest(); + EXPECT_TRUE(cur_request.resource_names_subscribe().empty()); + EXPECT_THAT(cur_request.resource_names_unsubscribe(), UnorderedElementsAre("name3")); +} + +// Starts with 1,2,3. 4 is added/removed/added. In those same updates, 1,2,3 are +// removed/added/removed. End result should be 4 added and 1,2,3 removed. +TEST_F(DeltaSubscriptionStateTest, BothAddAndRemove) { + state_.updateResourceInterest({"name4"}); + state_.updateResourceInterest({"name1", "name2", "name3"}); + state_.updateResourceInterest({"name4"}); + envoy::api::v2::DeltaDiscoveryRequest cur_request = state_.getNextRequest(); + EXPECT_THAT(cur_request.resource_names_subscribe(), UnorderedElementsAre("name4")); + EXPECT_THAT(cur_request.resource_names_unsubscribe(), + UnorderedElementsAre("name1", "name2", "name3")); +} + +TEST_F(DeltaSubscriptionStateTest, CumulativeUpdates) { + state_.updateResourceInterest({"name1", "name2", "name3", "name4"}); + state_.updateResourceInterest({"name1", "name2", "name3", "name4", "name5"}); + envoy::api::v2::DeltaDiscoveryRequest cur_request = state_.getNextRequest(); + EXPECT_THAT(cur_request.resource_names_subscribe(), UnorderedElementsAre("name4", "name5")); + EXPECT_TRUE(cur_request.resource_names_unsubscribe().empty()); +} + +// Verifies that a sequence of good and bad responses from the server all get the appropriate +// ACKs/NACKs from Envoy. +TEST_F(DeltaSubscriptionStateTest, AckGenerated) { + // The xDS server's first response includes items for name1 and 2, but not 3. + { + Protobuf::RepeatedPtrField added_resources = + populateRepeatedResource({{"name1", "version1A"}, {"name2", "version2A"}}); + UpdateAck ack = deliverDiscoveryResponse(added_resources, {}, "debug1", "nonce1"); + EXPECT_EQ("nonce1", ack.nonce_); + EXPECT_EQ(Grpc::Status::GrpcStatus::Ok, ack.error_detail_.code()); + } + // The next response updates 1 and 2, and adds 3. + { + Protobuf::RepeatedPtrField added_resources = populateRepeatedResource( + {{"name1", "version1B"}, {"name2", "version2B"}, {"name3", "version3A"}}); + UpdateAck ack = deliverDiscoveryResponse(added_resources, {}, "debug2", "nonce2"); + EXPECT_EQ("nonce2", ack.nonce_); + EXPECT_EQ(Grpc::Status::GrpcStatus::Ok, ack.error_detail_.code()); + } + // The next response tries but fails to update all 3, and so should produce a NACK. + { + Protobuf::RepeatedPtrField added_resources = populateRepeatedResource( + {{"name1", "version1C"}, {"name2", "version2C"}, {"name3", "version3B"}}); + UpdateAck ack = deliverBadDiscoveryResponse(added_resources, {}, "debug3", "nonce3"); + EXPECT_EQ("nonce3", ack.nonce_); + EXPECT_NE(Grpc::Status::GrpcStatus::Ok, ack.error_detail_.code()); + } + // The last response successfully updates all 3. + { + Protobuf::RepeatedPtrField added_resources = populateRepeatedResource( + {{"name1", "version1D"}, {"name2", "version2D"}, {"name3", "version3C"}}); + UpdateAck ack = deliverDiscoveryResponse(added_resources, {}, "debug4", "nonce4"); + EXPECT_EQ("nonce4", ack.nonce_); + EXPECT_EQ(Grpc::Status::GrpcStatus::Ok, ack.error_detail_.code()); + } +} + +// Tests population of the initial_resource_versions map in the first request of a new stream. +// Tests that +// 1) resources we have a version of are present in the map, +// 2) resources we are interested in but don't have are not present, and +// 3) resources we have lost interest in are not present. +TEST_F(DeltaSubscriptionStateTest, ResourceGoneLeadsToBlankInitialVersion) { + { + // The xDS server's first update includes items for name1 and 2, but not 3. + Protobuf::RepeatedPtrField add1_2 = + populateRepeatedResource({{"name1", "version1A"}, {"name2", "version2A"}}); + deliverDiscoveryResponse(add1_2, {}, "debugversion1"); + state_.markStreamFresh(); // simulate a stream reconnection + envoy::api::v2::DeltaDiscoveryRequest cur_request = state_.getNextRequest(); + EXPECT_EQ("version1A", cur_request.initial_resource_versions().at("name1")); + EXPECT_EQ("version2A", cur_request.initial_resource_versions().at("name2")); + EXPECT_EQ(cur_request.initial_resource_versions().end(), + cur_request.initial_resource_versions().find("name3")); + } + + { + // The next update updates 1, removes 2, and adds 3. The map should then have 1 and 3. + Protobuf::RepeatedPtrField add1_3 = + populateRepeatedResource({{"name1", "version1B"}, {"name3", "version3A"}}); + Protobuf::RepeatedPtrField remove2; + *remove2.Add() = "name2"; + deliverDiscoveryResponse(add1_3, remove2, "debugversion2"); + state_.markStreamFresh(); // simulate a stream reconnection + envoy::api::v2::DeltaDiscoveryRequest cur_request = state_.getNextRequest(); + EXPECT_EQ("version1B", cur_request.initial_resource_versions().at("name1")); + EXPECT_EQ(cur_request.initial_resource_versions().end(), + cur_request.initial_resource_versions().find("name2")); + EXPECT_EQ("version3A", cur_request.initial_resource_versions().at("name3")); + } + + { + // The next update removes 1 and 3. The map we send the server should be empty... + Protobuf::RepeatedPtrField remove1_3; + *remove1_3.Add() = "name1"; + *remove1_3.Add() = "name3"; + deliverDiscoveryResponse({}, remove1_3, "debugversion3"); + state_.markStreamFresh(); // simulate a stream reconnection + envoy::api::v2::DeltaDiscoveryRequest cur_request = state_.getNextRequest(); + EXPECT_TRUE(cur_request.initial_resource_versions().empty()); + } + + { + // ...but our own map should remember our interest. In particular, losing interest in a + // resource should cause its name to appear in the next request's resource_names_unsubscribe. + state_.updateResourceInterest({"name3", "name4"}); // note the lack of 1 and 2 + envoy::api::v2::DeltaDiscoveryRequest cur_request = state_.getNextRequest(); + EXPECT_THAT(cur_request.resource_names_subscribe(), UnorderedElementsAre("name4")); + EXPECT_THAT(cur_request.resource_names_unsubscribe(), UnorderedElementsAre("name1", "name2")); + } +} + +// Upon a reconnection, the server is supposed to assume a blank slate for the Envoy's state +// (hence the need for initial_resource_versions). The resource_names_subscribe of the first +// message must therefore be every resource the Envoy is interested in. +// +// resource_names_unsubscribe, on the other hand, is always blank in the first request - even if, +// in between the last request of the last stream and the first request of the new stream, Envoy +// lost interest in a resource. The unsubscription implicitly takes effect by simply saying +// nothing about the resource in the newly reconnected stream. +TEST_F(DeltaSubscriptionStateTest, SubscribeAndUnsubscribeAfterReconnect) { + Protobuf::RepeatedPtrField add1_2 = + populateRepeatedResource({{"name1", "version1A"}, {"name2", "version2A"}}); + deliverDiscoveryResponse(add1_2, {}, "debugversion1"); + + state_.updateResourceInterest({"name2", "name3", "name4"}); // drop name1, add name4 + state_.markStreamFresh(); // simulate a stream reconnection + envoy::api::v2::DeltaDiscoveryRequest cur_request = state_.getNextRequest(); + // Regarding the resource_names_subscribe field: + // name1: do not include: we lost interest. + // name2: yes do include: we're interested and we have a version of it. + // name3: yes do include: even though we don't have a version of it, we are interested. + // name4: yes do include: we are newly interested. (If this wasn't a stream reconnect, only name4 + // would belong in this subscribe field). + EXPECT_THAT(cur_request.resource_names_subscribe(), + UnorderedElementsAre("name2", "name3", "name4")); + EXPECT_TRUE(cur_request.resource_names_unsubscribe().empty()); +} + +// initial_resource_versions should not be present on messages after the first in a stream. +TEST_F(DeltaSubscriptionStateTest, InitialVersionMapFirstMessageOnly) { + // First, verify that the first message of a new stream sends initial versions. + { + // The xDS server's first update gives us all three resources. + Protobuf::RepeatedPtrField add_all = populateRepeatedResource( + {{"name1", "version1A"}, {"name2", "version2A"}, {"name3", "version3A"}}); + deliverDiscoveryResponse(add_all, {}, "debugversion1"); + state_.markStreamFresh(); // simulate a stream reconnection + envoy::api::v2::DeltaDiscoveryRequest cur_request = state_.getNextRequest(); + EXPECT_EQ("version1A", cur_request.initial_resource_versions().at("name1")); + EXPECT_EQ("version2A", cur_request.initial_resource_versions().at("name2")); + EXPECT_EQ("version3A", cur_request.initial_resource_versions().at("name3")); + } + // Then, after updating the resources but not reconnecting the stream, verify that initial + // versions are not sent. + { + state_.updateResourceInterest({"name1", "name2", "name3", "name4"}); + // The xDS server updates our resources, and gives us our newly requested one too. + Protobuf::RepeatedPtrField add_all = + populateRepeatedResource({{"name1", "version1B"}, + {"name2", "version2B"}, + {"name3", "version3B"}, + {"name4", "version4A"}}); + deliverDiscoveryResponse(add_all, {}, "debugversion2"); + envoy::api::v2::DeltaDiscoveryRequest cur_request = state_.getNextRequest(); + EXPECT_TRUE(cur_request.initial_resource_versions().empty()); + } +} + +TEST_F(DeltaSubscriptionStateTest, CheckUpdatePending) { + // Note that the test fixture ctor causes the first request to be "sent", so we start in the + // middle of a stream, with our initially interested resources having been requested already. + EXPECT_FALSE(state_.subscriptionUpdatePending()); + state_.updateResourceInterest({"name1", "name2", "name3"}); // no change + EXPECT_FALSE(state_.subscriptionUpdatePending()); + state_.markStreamFresh(); + EXPECT_TRUE(state_.subscriptionUpdatePending()); // no change, BUT fresh stream + state_.updateResourceInterest({"name1", "name2"}); // one removed + EXPECT_TRUE(state_.subscriptionUpdatePending()); + state_.updateResourceInterest({"name1", "name2", "name3"}); // one added + EXPECT_TRUE(state_.subscriptionUpdatePending()); +} + +TEST_F(DeltaSubscriptionStateTest, PauseAndResume) { + EXPECT_FALSE(state_.paused()); + state_.pause(); + EXPECT_TRUE(state_.paused()); + state_.resume(); + EXPECT_FALSE(state_.paused()); + state_.pause(); + EXPECT_TRUE(state_.paused()); + state_.resume(); + EXPECT_FALSE(state_.paused()); +} + +} // namespace +} // namespace Config +} // namespace Envoy diff --git a/test/common/config/delta_subscription_test_harness.h b/test/common/config/delta_subscription_test_harness.h new file mode 100644 index 0000000000000..61d40bb2f83a7 --- /dev/null +++ b/test/common/config/delta_subscription_test_harness.h @@ -0,0 +1,183 @@ +#pragma once + +#include "common/config/delta_subscription_impl.h" + +#include "test/common/config/subscription_test_harness.h" +#include "test/mocks/config/mocks.h" +#include "test/mocks/event/mocks.h" +#include "test/mocks/grpc/mocks.h" +#include "test/mocks/local_info/mocks.h" +#include "test/mocks/runtime/mocks.h" +#include "test/mocks/stats/mocks.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::Mock; +using testing::NiceMock; +using testing::Return; + +namespace Envoy { +namespace Config { +namespace { + +class DeltaSubscriptionTestHarness : public SubscriptionTestHarness { +public: + DeltaSubscriptionTestHarness() : DeltaSubscriptionTestHarness(std::chrono::milliseconds(0)) {} + DeltaSubscriptionTestHarness(std::chrono::milliseconds init_fetch_timeout) + : method_descriptor_(Protobuf::DescriptorPool::generated_pool()->FindMethodByName( + "envoy.api.v2.EndpointDiscoveryService.StreamEndpoints")), + async_client_(new Grpc::MockAsyncClient()) { + node_.set_id("fo0"); + EXPECT_CALL(local_info_, node()).WillRepeatedly(testing::ReturnRef(node_)); + EXPECT_CALL(dispatcher_, createTimer_(_)); + subscription_ = std::make_unique( + local_info_, std::unique_ptr(async_client_), dispatcher_, + *method_descriptor_, Config::TypeUrl::get().ClusterLoadAssignment, random_, stats_store_, + rate_limit_settings_, stats_, init_fetch_timeout); + } + + ~DeltaSubscriptionTestHarness() { + while (!nonce_acks_required_.empty()) { + EXPECT_FALSE(nonce_acks_sent_.empty()); + EXPECT_EQ(nonce_acks_required_.front(), nonce_acks_sent_.front()); + nonce_acks_required_.pop(); + nonce_acks_sent_.pop(); + } + EXPECT_TRUE(nonce_acks_sent_.empty()); + } + + void startSubscription(const std::set& cluster_names) override { + EXPECT_CALL(*async_client_, start(_, _)).WillOnce(Return(&async_stream_)); + last_cluster_names_ = cluster_names; + expectSendMessage(last_cluster_names_, ""); + subscription_->start(cluster_names, callbacks_); + } + + void expectSendMessage(const std::set& cluster_names, + const std::string& version) override { + UNREFERENCED_PARAMETER(version); + expectSendMessage(cluster_names, {}, Grpc::Status::GrpcStatus::Ok, "", {}); + } + + void expectSendMessage(const std::set& subscribe, + const std::set& unsubscribe, const Protobuf::int32 error_code, + const std::string& error_message, + std::map initial_resource_versions) { + envoy::api::v2::DeltaDiscoveryRequest expected_request; + expected_request.mutable_node()->CopyFrom(node_); + std::copy( + subscribe.begin(), subscribe.end(), + Protobuf::RepeatedFieldBackInserter(expected_request.mutable_resource_names_subscribe())); + std::copy( + unsubscribe.begin(), unsubscribe.end(), + Protobuf::RepeatedFieldBackInserter(expected_request.mutable_resource_names_unsubscribe())); + if (!last_response_nonce_.empty()) { + nonce_acks_required_.push(last_response_nonce_); + last_response_nonce_ = ""; + } + expected_request.set_type_url(Config::TypeUrl::get().ClusterLoadAssignment); + + for (auto const& resource : initial_resource_versions) { + (*expected_request.mutable_initial_resource_versions())[resource.first] = resource.second; + } + + if (error_code != Grpc::Status::GrpcStatus::Ok) { + ::google::rpc::Status* error_detail = expected_request.mutable_error_detail(); + error_detail->set_code(error_code); + error_detail->set_message(error_message); + } + EXPECT_CALL(async_stream_, + sendMessage(ProtoEqIgnoringField(expected_request, "response_nonce"), false)) + .WillOnce([this](const Protobuf::Message& message, bool) { + const std::string nonce = + static_cast(message).response_nonce(); + if (!nonce.empty()) { + nonce_acks_sent_.push(nonce); + } + }); + } + + void deliverConfigUpdate(const std::vector& cluster_names, + const std::string& version, bool accept) override { + std::unique_ptr response( + new envoy::api::v2::DeltaDiscoveryResponse()); + + last_response_nonce_ = std::to_string(HashUtil::xxHash64(version)); + response->set_nonce(last_response_nonce_); + response->set_system_version_info(version); + + Protobuf::RepeatedPtrField typed_resources; + for (const auto& cluster : cluster_names) { + if (std::find(last_cluster_names_.begin(), last_cluster_names_.end(), cluster) != + last_cluster_names_.end()) { + envoy::api::v2::ClusterLoadAssignment* load_assignment = typed_resources.Add(); + load_assignment->set_cluster_name(cluster); + auto* resource = response->add_resources(); + resource->set_name(cluster); + resource->set_version(version); + resource->mutable_resource()->PackFrom(*load_assignment); + } + } + Protobuf::RepeatedPtrField removed_resources; + EXPECT_CALL(callbacks_, onConfigUpdate(_, _, version)).WillOnce(ThrowOnRejectedConfig(accept)); + if (accept) { + expectSendMessage({}, version); + } else { + EXPECT_CALL(callbacks_, onConfigUpdateFailed(_)); + expectSendMessage({}, {}, Grpc::Status::GrpcStatus::Internal, "bad config", {}); + } + subscription_->onDiscoveryResponse(std::move(response)); + Mock::VerifyAndClearExpectations(&async_stream_); + } + + void updateResources(const std::set& cluster_names) override { + std::set sub; + std::set unsub; + + std::set_difference(cluster_names.begin(), cluster_names.end(), last_cluster_names_.begin(), + last_cluster_names_.end(), std::inserter(sub, sub.begin())); + std::set_difference(last_cluster_names_.begin(), last_cluster_names_.end(), + cluster_names.begin(), cluster_names.end(), + std::inserter(unsub, unsub.begin())); + + expectSendMessage(sub, unsub, Grpc::Status::GrpcStatus::Ok, "", {}); + subscription_->updateResources(cluster_names); + last_cluster_names_ = cluster_names; + } + + void expectConfigUpdateFailed() override { + EXPECT_CALL(callbacks_, onConfigUpdateFailed(nullptr)); + } + + void expectEnableInitFetchTimeoutTimer(std::chrono::milliseconds timeout) override { + init_timeout_timer_ = new Event::MockTimer(&dispatcher_); + EXPECT_CALL(*init_timeout_timer_, enableTimer(std::chrono::milliseconds(timeout))); + } + + void expectDisableInitFetchTimeoutTimer() override { + EXPECT_CALL(*init_timeout_timer_, disableTimer()); + } + + void callInitFetchTimeoutCb() override { init_timeout_timer_->callback_(); } + + const Protobuf::MethodDescriptor* method_descriptor_; + Grpc::MockAsyncClient* async_client_; + Event::MockDispatcher dispatcher_; + NiceMock random_; + NiceMock local_info_; + Grpc::MockAsyncStream async_stream_; + std::unique_ptr subscription_; + std::string last_response_nonce_; + std::set last_cluster_names_; + Envoy::Config::RateLimitSettings rate_limit_settings_; + Event::MockTimer* init_timeout_timer_; + envoy::api::v2::core::Node node_; + NiceMock> callbacks_; + std::queue nonce_acks_required_; + std::queue nonce_acks_sent_; +}; + +} // namespace +} // namespace Config +} // namespace Envoy diff --git a/test/common/config/dummy_config.proto b/test/common/config/dummy_config.proto index fcb2749e4f036..ae32e1477e04c 100644 --- a/test/common/config/dummy_config.proto +++ b/test/common/config/dummy_config.proto @@ -25,3 +25,13 @@ message DummyConfigsDump { repeated StaticConfigs static_dummy_configs = 1; repeated DynamicConfigs dynamic_dummy_configs = 2; } + +message DeltaDummyConfigsDump { + message DynamicConfigs { + string version_info = 1; + repeated DummyConfig dummy_configs = 2; + google.protobuf.Timestamp last_updated = 3; + } + + repeated DynamicConfigs dynamic_dummy_configs = 2; +} diff --git a/test/common/config/filesystem_subscription_impl_test.cc b/test/common/config/filesystem_subscription_impl_test.cc index c088e2329a70e..2feabde6921fd 100644 --- a/test/common/config/filesystem_subscription_impl_test.cc +++ b/test/common/config/filesystem_subscription_impl_test.cc @@ -2,9 +2,9 @@ #include "test/mocks/event/mocks.h" #include "test/mocks/filesystem/mocks.h" #include "test/test_common/logging.h" -#include "test/test_common/test_base.h" #include "gmock/gmock.h" +#include "gtest/gtest.h" using ::testing::Throw; @@ -12,7 +12,8 @@ namespace Envoy { namespace Config { namespace { -class FilesystemSubscriptionImplTest : public TestBase, public FilesystemSubscriptionTestHarness {}; +class FilesystemSubscriptionImplTest : public testing::Test, + public FilesystemSubscriptionTestHarness {}; // Validate that the client can recover from bad JSON responses. TEST_F(FilesystemSubscriptionImplTest, BadJsonRecovery) { @@ -41,7 +42,7 @@ TEST(MiscFilesystemSubscriptionImplTest, BadWatch) { auto* watcher = new Filesystem::MockWatcher(); EXPECT_CALL(dispatcher, createFilesystemWatcher_()).WillOnce(Return(watcher)); EXPECT_CALL(*watcher, addWatch(_, _, _)).WillOnce(Throw(EnvoyException("bad path"))); - EXPECT_THROW_WITH_MESSAGE(FilesystemEdsSubscriptionImpl(dispatcher, "##!@/dev/null", stats, *api), + EXPECT_THROW_WITH_MESSAGE(FilesystemSubscriptionImpl(dispatcher, "##!@/dev/null", stats, *api), EnvoyException, "bad path"); } diff --git a/test/common/config/filesystem_subscription_test_harness.h b/test/common/config/filesystem_subscription_test_harness.h index 22a4bfd16697a..912d21739419d 100644 --- a/test/common/config/filesystem_subscription_test_harness.h +++ b/test/common/config/filesystem_subscription_test_harness.h @@ -7,14 +7,15 @@ #include "common/config/filesystem_subscription_impl.h" #include "common/config/utility.h" #include "common/event/dispatcher_impl.h" +#include "common/protobuf/utility.h" #include "test/common/config/subscription_test_harness.h" #include "test/mocks/config/mocks.h" #include "test/test_common/environment.h" -#include "test/test_common/test_base.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" +#include "gtest/gtest.h" using testing::_; using testing::NiceMock; @@ -23,25 +24,26 @@ using testing::Return; namespace Envoy { namespace Config { -typedef FilesystemSubscriptionImpl - FilesystemEdsSubscriptionImpl; - class FilesystemSubscriptionTestHarness : public SubscriptionTestHarness { public: FilesystemSubscriptionTestHarness() : path_(TestEnvironment::temporaryPath("eds.json")), - api_(Api::createApiForTest(stats_store_)), dispatcher_(*api_), - subscription_(dispatcher_, path_, stats_, *api_) {} + api_(Api::createApiForTest(stats_store_)), dispatcher_(api_->allocateDispatcher()), + subscription_(*dispatcher_, path_, stats_, *api_) {} - ~FilesystemSubscriptionTestHarness() { EXPECT_EQ(0, ::unlink(path_.c_str())); } + ~FilesystemSubscriptionTestHarness() { + if (::access(path_.c_str(), F_OK) != -1) { + EXPECT_EQ(0, ::unlink(path_.c_str())); + } + } - void startSubscription(const std::vector& cluster_names) override { + void startSubscription(const std::set& cluster_names) override { std::ifstream config_file(path_); file_at_start_ = config_file.good(); subscription_.start(cluster_names, callbacks_); } - void updateResources(const std::vector& cluster_names) override { + void updateResources(const std::set& cluster_names) override { subscription_.updateResources(cluster_names); } @@ -51,11 +53,11 @@ class FilesystemSubscriptionTestHarness : public SubscriptionTestHarness { const std::string temp_path = TestEnvironment::writeStringToFileForTest("eds.json.tmp", json); TestUtility::renameFile(temp_path, path_); if (run_dispatcher) { - dispatcher_.run(Event::Dispatcher::RunType::NonBlock); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); } } - void expectSendMessage(const std::vector& cluster_names, + void expectSendMessage(const std::set& cluster_names, const std::string& version) override { UNREFERENCED_PARAMETER(cluster_names); UNREFERENCED_PARAMETER(version); @@ -72,13 +74,8 @@ class FilesystemSubscriptionTestHarness : public SubscriptionTestHarness { file_json.pop_back(); file_json += "]}"; envoy::api::v2::DiscoveryResponse response_pb; - EXPECT_TRUE(Protobuf::util::JsonStringToMessage(file_json, &response_pb).ok()); - EXPECT_CALL(callbacks_, - onConfigUpdate( - RepeatedProtoEq( - Config::Utility::getTypedResources( - response_pb)), - version)) + MessageUtil::loadFromJson(file_json, response_pb); + EXPECT_CALL(callbacks_, onConfigUpdate(RepeatedProtoEq(response_pb.resources()), version)) .WillOnce(ThrowOnRejectedConfig(accept)); if (accept) { version_ = version; @@ -95,13 +92,30 @@ class FilesystemSubscriptionTestHarness : public SubscriptionTestHarness { failure + (file_at_start_ ? 0 : 1), version); } + void expectConfigUpdateFailed() override { + // initial_fetch_timeout not implemented + } + + void expectEnableInitFetchTimeoutTimer(std::chrono::milliseconds timeout) override { + UNREFERENCED_PARAMETER(timeout); + // initial_fetch_timeout not implemented + } + + void expectDisableInitFetchTimeoutTimer() override { + // initial_fetch_timeout not implemented + } + + void callInitFetchTimeoutCb() override { + // initial_fetch_timeout not implemented + } + const std::string path_; std::string version_; Stats::IsolatedStoreImpl stats_store_; Api::ApiPtr api_; - Event::DispatcherImpl dispatcher_; + Event::DispatcherPtr dispatcher_; NiceMock> callbacks_; - FilesystemEdsSubscriptionImpl subscription_; + FilesystemSubscriptionImpl subscription_; bool file_at_start_{false}; }; diff --git a/test/common/config/filter_json_test.cc b/test/common/config/filter_json_test.cc index beb6a7e4de3dd..8cec55f45609a 100644 --- a/test/common/config/filter_json_test.cc +++ b/test/common/config/filter_json_test.cc @@ -5,9 +5,10 @@ #include "common/config/filter_json.h" #include "common/json/json_loader.h" -#include "test/test_common/test_base.h" #include "test/test_common/utility.h" +#include "gtest/gtest.h" + namespace Envoy { namespace Config { namespace { diff --git a/test/common/config/grpc_mux_impl_test.cc b/test/common/config/grpc_mux_impl_test.cc index b8c58b5443fea..00a1b89e38998 100644 --- a/test/common/config/grpc_mux_impl_test.cc +++ b/test/common/config/grpc_mux_impl_test.cc @@ -3,6 +3,7 @@ #include "envoy/api/v2/discovery.pb.h" #include "envoy/api/v2/eds.pb.h" +#include "common/common/empty_string.h" #include "common/config/grpc_mux_impl.h" #include "common/config/protobuf_link_hacks.h" #include "common/config/resources.h" @@ -18,11 +19,11 @@ #include "test/mocks/runtime/mocks.h" #include "test/test_common/logging.h" #include "test/test_common/simulated_time_system.h" -#include "test/test_common/test_base.h" #include "test/test_common/test_time.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" +#include "gtest/gtest.h" using testing::_; using testing::AtLeast; @@ -31,6 +32,7 @@ using testing::Invoke; using testing::IsSubstring; using testing::NiceMock; using testing::Return; +using testing::ReturnRef; namespace Envoy { namespace Config { @@ -38,7 +40,7 @@ namespace { // We test some mux specific stuff below, other unit test coverage for singleton use of GrpcMuxImpl // is provided in [grpc_]subscription_impl_test.cc. -class GrpcMuxImplTestBase : public TestBase { +class GrpcMuxImplTestBase : public testing::Test { public: GrpcMuxImplTestBase() : async_client_(new Grpc::MockAsyncClient()) {} @@ -145,7 +147,7 @@ TEST_F(GrpcMuxImplTest, ResetStream) { EXPECT_CALL(random_, random()); ASSERT_TRUE(timer != nullptr); // initialized from dispatcher mock. EXPECT_CALL(*timer, enableTimer(_)); - grpc_mux_->onRemoteClose(Grpc::Status::GrpcStatus::Canceled, ""); + grpc_mux_->grpcStreamForTest().onRemoteClose(Grpc::Status::GrpcStatus::Canceled, ""); EXPECT_EQ(0, stats_.gauge("control_plane.connected_state").value()); EXPECT_CALL(*async_client_, start(_, _)).WillOnce(Return(&async_stream_)); expectSendMessage("foo", {"x", "y"}, ""); @@ -195,7 +197,7 @@ TEST_F(GrpcMuxImplTest, TypeUrlMismatch) { std::unique_ptr response( new envoy::api::v2::DiscoveryResponse()); response->set_type_url("bar"); - grpc_mux_->onReceiveMessage(std::move(response)); + grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); } { @@ -209,7 +211,7 @@ TEST_F(GrpcMuxImplTest, TypeUrlMismatch) { expectSendMessage("foo", {"x", "y"}, "", "", Grpc::Status::GrpcStatus::Internal, fmt::format("bar does not match foo type URL in DiscoveryResponse {}", invalid_response->DebugString())); - grpc_mux_->onReceiveMessage(std::move(invalid_response)); + grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(invalid_response)); } expectSendMessage("foo", {}, ""); } @@ -243,7 +245,7 @@ TEST_F(GrpcMuxImplTest, WildcardWatch) { EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment)); })); expectSendMessage(type_url, {}, "1"); - grpc_mux_->onReceiveMessage(std::move(response)); + grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); } } @@ -269,9 +271,7 @@ TEST_F(GrpcMuxImplTest, WatchDemux) { envoy::api::v2::ClusterLoadAssignment load_assignment; load_assignment.set_cluster_name("x"); response->add_resources()->PackFrom(load_assignment); - EXPECT_CALL(bar_callbacks, onConfigUpdate(_, "1")) - .WillOnce(Invoke([](const Protobuf::RepeatedPtrField& resources, - const std::string&) { EXPECT_TRUE(resources.empty()); })); + EXPECT_CALL(bar_callbacks, onConfigUpdate(_, "1")).Times(0); EXPECT_CALL(foo_callbacks, onConfigUpdate(_, "1")) .WillOnce( Invoke([&load_assignment](const Protobuf::RepeatedPtrField& resources, @@ -282,7 +282,7 @@ TEST_F(GrpcMuxImplTest, WatchDemux) { EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment)); })); expectSendMessage(type_url, {"y", "z", "x"}, "1"); - grpc_mux_->onReceiveMessage(std::move(response)); + grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); } { @@ -322,13 +322,60 @@ TEST_F(GrpcMuxImplTest, WatchDemux) { EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment_y)); })); expectSendMessage(type_url, {"y", "z", "x"}, "2"); - grpc_mux_->onReceiveMessage(std::move(response)); + grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); } expectSendMessage(type_url, {"x", "y"}, "2"); expectSendMessage(type_url, {}, "2"); } +// Validate behavior when we have multiple watchers that send empty updates. +TEST_F(GrpcMuxImplTest, MultipleWatcherWithEmptyUpdates) { + setup(); + InSequence s; + const std::string& type_url = Config::TypeUrl::get().ClusterLoadAssignment; + NiceMock foo_callbacks; + auto foo_sub = grpc_mux_->subscribe(type_url, {"x", "y"}, foo_callbacks); + + EXPECT_CALL(*async_client_, start(_, _)).WillOnce(Return(&async_stream_)); + expectSendMessage(type_url, {"x", "y"}, ""); + grpc_mux_->start(); + + std::unique_ptr response( + new envoy::api::v2::DiscoveryResponse()); + response->set_type_url(type_url); + response->set_version_info("1"); + + EXPECT_CALL(foo_callbacks, onConfigUpdate(_, "1")).Times(0); + expectSendMessage(type_url, {"x", "y"}, "1"); + grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); + + expectSendMessage(type_url, {}, "1"); +} + +// Validate behavior when we have Single Watcher that sends Empty updates. +TEST_F(GrpcMuxImplTest, SingleWatcherWithEmptyUpdates) { + setup(); + const std::string& type_url = Config::TypeUrl::get().Cluster; + NiceMock foo_callbacks; + auto foo_sub = grpc_mux_->subscribe(type_url, {}, foo_callbacks); + + EXPECT_CALL(*async_client_, start(_, _)).WillOnce(Return(&async_stream_)); + expectSendMessage(type_url, {}, ""); + grpc_mux_->start(); + + std::unique_ptr response( + new envoy::api::v2::DiscoveryResponse()); + response->set_type_url(type_url); + response->set_version_info("1"); + // Validate that onConfigUpdate is called with empty resources. + EXPECT_CALL(foo_callbacks, onConfigUpdate(_, "1")) + .WillOnce(Invoke([](const Protobuf::RepeatedPtrField& resources, + const std::string&) { EXPECT_TRUE(resources.empty()); })); + expectSendMessage(type_url, {}, "1"); + grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); +} + // Exactly one test requires a mock time system to provoke behavior that cannot // easily be achieved with a SimulatedTimeSystem. class GrpcMuxImplTestWithMockTimeSystem : public GrpcMuxImplTestBase { @@ -363,7 +410,7 @@ TEST_F(GrpcMuxImplTestWithMockTimeSystem, TooManyRequestsWithDefaultSettings) { response->set_version_info("baz"); response->set_nonce("bar"); response->set_type_url("foo"); - grpc_mux_->onReceiveMessage(std::move(response)); + grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); } }; @@ -416,7 +463,7 @@ TEST_F(GrpcMuxImplTestWithMockTimeSystem, TooManyRequestsWithEmptyRateLimitSetti response->set_version_info("baz"); response->set_nonce("bar"); response->set_type_url("foo"); - grpc_mux_->onReceiveMessage(std::move(response)); + grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); } }; @@ -470,7 +517,7 @@ TEST_F(GrpcMuxImplTest, TooManyRequestsWithCustomRateLimitSettings) { response->set_version_info("baz"); response->set_nonce("bar"); response->set_type_url("foo"); - grpc_mux_->onReceiveMessage(std::move(response)); + grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); } }; @@ -491,9 +538,12 @@ TEST_F(GrpcMuxImplTest, TooManyRequestsWithCustomRateLimitSettings) { // Validate that drain requests call when there are multiple requests in queue. time_system_.setMonotonicTime(std::chrono::seconds(10)); drain_timer_cb(); + + // Check that the pending_requests stat is updated with the queue drain. + EXPECT_EQ(0, stats_.counter("control_plane.pending_requests").value()); } -// Verifies that a messsage with no resources is accepted. +// Verifies that a message with no resources is accepted. TEST_F(GrpcMuxImplTest, UnwatchedTypeAcceptsEmptyResources) { setup(); @@ -517,7 +567,7 @@ TEST_F(GrpcMuxImplTest, UnwatchedTypeAcceptsEmptyResources) { response->set_type_url(type_url); // This contains zero resources. No discovery request should be sent. - grpc_mux_->onReceiveMessage(std::move(response)); + grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); // when we add the new subscription version should be 1 and nonce should be bar expectSendMessage(type_url, {"x"}, "1", "bar"); @@ -556,11 +606,11 @@ TEST_F(GrpcMuxImplTest, UnwatchedTypeRejectsResources) { // The message should be rejected. expectSendMessage(type_url, {}, "", "bar"); EXPECT_LOG_CONTAINS("warning", "Ignoring unwatched type URL " + type_url, - grpc_mux_->onReceiveMessage(std::move(response))); + grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response))); } TEST_F(GrpcMuxImplTest, BadLocalInfoEmptyClusterName) { - EXPECT_CALL(local_info_, clusterName()).WillOnce(Return("")); + EXPECT_CALL(local_info_, clusterName()).WillOnce(ReturnRef(EMPTY_STRING)); EXPECT_THROW_WITH_MESSAGE( GrpcMuxImpl( local_info_, std::unique_ptr(async_client_), dispatcher_, @@ -573,7 +623,7 @@ TEST_F(GrpcMuxImplTest, BadLocalInfoEmptyClusterName) { } TEST_F(GrpcMuxImplTest, BadLocalInfoEmptyNodeName) { - EXPECT_CALL(local_info_, nodeName()).WillOnce(Return("")); + EXPECT_CALL(local_info_, nodeName()).WillOnce(ReturnRef(EMPTY_STRING)); EXPECT_THROW_WITH_MESSAGE( GrpcMuxImpl( local_info_, std::unique_ptr(async_client_), dispatcher_, diff --git a/test/common/config/grpc_stream_test.cc b/test/common/config/grpc_stream_test.cc new file mode 100644 index 0000000000000..b4a97675fe916 --- /dev/null +++ b/test/common/config/grpc_stream_test.cc @@ -0,0 +1,133 @@ +#include "envoy/api/v2/discovery.pb.h" + +#include "common/config/grpc_stream.h" +#include "common/protobuf/protobuf.h" + +#include "test/mocks/config/mocks.h" +#include "test/mocks/event/mocks.h" +#include "test/mocks/grpc/mocks.h" +#include "test/mocks/upstream/mocks.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::NiceMock; +using testing::Return; + +namespace Envoy { +namespace Config { +namespace { + +class GrpcStreamTest : public testing::Test { +protected: + GrpcStreamTest() + : async_client_owner_(std::make_unique()), + async_client_(async_client_owner_.get()), + grpc_stream_(&callbacks_, std::move(async_client_owner_), + *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( + "envoy.api.v2.EndpointDiscoveryService.StreamEndpoints"), + random_, dispatcher_, stats_, rate_limit_settings_) {} + + NiceMock dispatcher_; + Grpc::MockAsyncStream async_stream_; + Stats::IsolatedStoreImpl stats_; + NiceMock random_; + Envoy::Config::RateLimitSettings rate_limit_settings_; + NiceMock callbacks_; + std::unique_ptr async_client_owner_; + Grpc::MockAsyncClient* async_client_; + + GrpcStream grpc_stream_; +}; + +// Tests that establishNewStream() establishes it, a second call does nothing, and a third call +// after the stream was disconnected re-establishes it. +TEST_F(GrpcStreamTest, EstablishNewStream) { + EXPECT_FALSE(grpc_stream_.grpcStreamAvailable()); + // Successful establishment + { + EXPECT_CALL(*async_client_, start(_, _)).WillOnce(Return(&async_stream_)); + EXPECT_CALL(callbacks_, onStreamEstablished()); + grpc_stream_.establishNewStream(); + EXPECT_TRUE(grpc_stream_.grpcStreamAvailable()); + } + // Idempotency: do nothing (other than logging a warning) if already connected + { + EXPECT_CALL(*async_client_, start(_, _)).Times(0); + EXPECT_CALL(callbacks_, onStreamEstablished()).Times(0); + grpc_stream_.establishNewStream(); + EXPECT_TRUE(grpc_stream_.grpcStreamAvailable()); + } + grpc_stream_.onRemoteClose(Grpc::Status::GrpcStatus::Ok, ""); + EXPECT_FALSE(grpc_stream_.grpcStreamAvailable()); + // Successful re-establishment + { + EXPECT_CALL(*async_client_, start(_, _)).WillOnce(Return(&async_stream_)); + EXPECT_CALL(callbacks_, onStreamEstablished()); + grpc_stream_.establishNewStream(); + EXPECT_TRUE(grpc_stream_.grpcStreamAvailable()); + } +} + +// A failure in the underlying gRPC machinery should result in grpcStreamAvailable() false. Calling +// sendMessage would segfault. +TEST_F(GrpcStreamTest, FailToEstablishNewStream) { + EXPECT_CALL(*async_client_, start(_, _)).WillOnce(Return(nullptr)); + EXPECT_CALL(callbacks_, onEstablishmentFailure()); + grpc_stream_.establishNewStream(); + EXPECT_FALSE(grpc_stream_.grpcStreamAvailable()); +} + +// Checks that sendMessage correctly passes a DiscoveryRequest down to the underlying gRPC +// machinery. +TEST_F(GrpcStreamTest, SendMessage) { + EXPECT_CALL(*async_client_, start(_, _)).WillOnce(Return(&async_stream_)); + grpc_stream_.establishNewStream(); + envoy::api::v2::DiscoveryRequest request; + request.set_response_nonce("grpc_stream_test_noncense"); + EXPECT_CALL(async_stream_, sendMessage(ProtoEq(request), false)); + grpc_stream_.sendMessage(request); +} + +// Tests that, upon a call of the GrpcStream::onReceiveMessage() callback, which is called by the +// underlying gRPC machinery, the received proto will make it up to the GrpcStreamCallbacks that the +// GrpcStream was given. +TEST_F(GrpcStreamTest, ReceiveMessage) { + envoy::api::v2::DiscoveryResponse response_copy; + response_copy.set_type_url("faketypeURL"); + auto response = std::make_unique(response_copy); + envoy::api::v2::DiscoveryResponse received_message; + EXPECT_CALL(callbacks_, onDiscoveryResponse(_)) + .WillOnce([&received_message](std::unique_ptr&& message) { + received_message = *message; + }); + grpc_stream_.onReceiveMessage(std::move(response)); + EXPECT_TRUE(TestUtility::protoEqual(response_copy, received_message)); +} + +// If the value has only ever been 0, the stat should remain unused, including after an attempt to +// write a 0 to it. +TEST_F(GrpcStreamTest, QueueSizeStat) { + grpc_stream_.maybeUpdateQueueSizeStat(0); + EXPECT_FALSE(stats_.gauge("control_plane.pending_requests").used()); + grpc_stream_.maybeUpdateQueueSizeStat(123); + EXPECT_EQ(123, stats_.gauge("control_plane.pending_requests").value()); + grpc_stream_.maybeUpdateQueueSizeStat(0); + EXPECT_EQ(0, stats_.gauge("control_plane.pending_requests").value()); +} + +// Just to add coverage to the no-op implementations of these callbacks (without exposing us to +// crashes from a badly behaved peer like NOT_IMPLEMENTED_GCOVR_EXCL_LINE would). +TEST_F(GrpcStreamTest, HeaderTrailerJustForCodeCoverage) { + Http::HeaderMapPtr response_headers{new Http::TestHeaderMapImpl{}}; + grpc_stream_.onReceiveInitialMetadata(std::move(response_headers)); + Http::TestHeaderMapImpl request_headers; + grpc_stream_.onCreateInitialMetadata(request_headers); + Http::HeaderMapPtr trailers{new Http::TestHeaderMapImpl{}}; + grpc_stream_.onReceiveTrailingMetadata(std::move(trailers)); +} + +} // namespace +} // namespace Config +} // namespace Envoy diff --git a/test/common/config/grpc_subscription_impl_test.cc b/test/common/config/grpc_subscription_impl_test.cc index d9ae80c5b98cd..490a74c228e39 100644 --- a/test/common/config/grpc_subscription_impl_test.cc +++ b/test/common/config/grpc_subscription_impl_test.cc @@ -1,5 +1,6 @@ #include "test/common/config/grpc_subscription_test_harness.h" -#include "test/test_common/test_base.h" + +#include "gtest/gtest.h" using testing::InSequence; @@ -7,7 +8,7 @@ namespace Envoy { namespace Config { namespace { -class GrpcSubscriptionImplTest : public TestBase, public GrpcSubscriptionTestHarness {}; +class GrpcSubscriptionImplTest : public testing::Test, public GrpcSubscriptionTestHarness {}; // Validate that stream creation results in a timer based retry and can recover. TEST_F(GrpcSubscriptionImplTest, StreamCreationFailure) { @@ -36,12 +37,11 @@ TEST_F(GrpcSubscriptionImplTest, StreamCreationFailure) { TEST_F(GrpcSubscriptionImplTest, RemoteStreamClose) { startSubscription({"cluster0", "cluster1"}); verifyStats(1, 0, 0, 0, 0); - Http::HeaderMapPtr trailers{new Http::TestHeaderMapImpl{}}; - subscription_->grpcMux().onReceiveTrailingMetadata(std::move(trailers)); EXPECT_CALL(callbacks_, onConfigUpdateFailed(_)); EXPECT_CALL(*timer_, enableTimer(_)); EXPECT_CALL(random_, random()); - subscription_->grpcMux().onRemoteClose(Grpc::Status::GrpcStatus::Canceled, ""); + subscription_->grpcMux().grpcStreamForTest().onRemoteClose(Grpc::Status::GrpcStatus::Canceled, + ""); verifyStats(2, 0, 0, 1, 0); verifyControlPlaneStats(0); diff --git a/test/common/config/grpc_subscription_test_harness.h b/test/common/config/grpc_subscription_test_harness.h index a9e2c0eed41db..5c23300dab288 100644 --- a/test/common/config/grpc_subscription_test_harness.h +++ b/test/common/config/grpc_subscription_test_harness.h @@ -14,10 +14,10 @@ #include "test/mocks/grpc/mocks.h" #include "test/mocks/local_info/mocks.h" #include "test/mocks/upstream/mocks.h" -#include "test/test_common/test_base.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" +#include "gtest/gtest.h" using testing::_; using testing::Invoke; @@ -28,11 +28,11 @@ using testing::Return; namespace Envoy { namespace Config { -typedef GrpcSubscriptionImpl GrpcEdsSubscriptionImpl; - class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { public: - GrpcSubscriptionTestHarness() + GrpcSubscriptionTestHarness() : GrpcSubscriptionTestHarness(std::chrono::milliseconds(0)) {} + + GrpcSubscriptionTestHarness(std::chrono::milliseconds init_fetch_timeout) : method_descriptor_(Protobuf::DescriptorPool::generated_pool()->FindMethodByName( "envoy.api.v2.EndpointDiscoveryService.StreamEndpoints")), async_client_(new Grpc::MockAsyncClient()), timer_(new Event::MockTimer()) { @@ -42,19 +42,20 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { timer_cb_ = timer_cb; return timer_; })); - subscription_ = std::make_unique( + subscription_ = std::make_unique( local_info_, std::unique_ptr(async_client_), dispatcher_, random_, - *method_descriptor_, stats_, stats_store_, rate_limit_settings_); + *method_descriptor_, Config::TypeUrl::get().ClusterLoadAssignment, stats_, stats_store_, + rate_limit_settings_, init_fetch_timeout); } - ~GrpcSubscriptionTestHarness() { EXPECT_CALL(async_stream_, sendMessage(_, false)); } + ~GrpcSubscriptionTestHarness() override { EXPECT_CALL(async_stream_, sendMessage(_, false)); } - void expectSendMessage(const std::vector& cluster_names, + void expectSendMessage(const std::set& cluster_names, const std::string& version) override { expectSendMessage(cluster_names, version, Grpc::Status::GrpcStatus::Ok, ""); } - void expectSendMessage(const std::vector& cluster_names, const std::string& version, + void expectSendMessage(const std::set& cluster_names, const std::string& version, const Protobuf::int32 error_code, const std::string& error_message) { envoy::api::v2::DiscoveryRequest expected_request; expected_request.mutable_node()->CopyFrom(node_); @@ -74,17 +75,11 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { EXPECT_CALL(async_stream_, sendMessage(ProtoEq(expected_request), false)); } - void startSubscription(const std::vector& cluster_names) override { + void startSubscription(const std::set& cluster_names) override { EXPECT_CALL(*async_client_, start(_, _)).WillOnce(Return(&async_stream_)); last_cluster_names_ = cluster_names; expectSendMessage(last_cluster_names_, ""); subscription_->start(cluster_names, callbacks_); - // These are just there to add coverage to the null implementations of these - // callbacks. - Http::HeaderMapPtr response_headers{new Http::TestHeaderMapImpl{}}; - subscription_->grpcMux().onReceiveInitialMetadata(std::move(response_headers)); - Http::TestHeaderMapImpl request_headers; - subscription_->grpcMux().onCreateInitialMetadata(request_headers); } void deliverConfigUpdate(const std::vector& cluster_names, @@ -104,7 +99,7 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { response->add_resources()->PackFrom(*load_assignment); } } - EXPECT_CALL(callbacks_, onConfigUpdate(RepeatedProtoEq(typed_resources), version)) + EXPECT_CALL(callbacks_, onConfigUpdate(RepeatedProtoEq(response->resources()), version)) .WillOnce(ThrowOnRejectedConfig(accept)); if (accept) { expectSendMessage(last_cluster_names_, version); @@ -114,20 +109,43 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { expectSendMessage(last_cluster_names_, version_, Grpc::Status::GrpcStatus::Internal, "bad config"); } - subscription_->grpcMux().onReceiveMessage(std::move(response)); + subscription_->grpcMux().onDiscoveryResponse(std::move(response)); Mock::VerifyAndClearExpectations(&async_stream_); } - void updateResources(const std::vector& cluster_names) override { - std::vector cluster_superset = cluster_names; - cluster_superset.insert(cluster_superset.end(), last_cluster_names_.begin(), - last_cluster_names_.end()); - expectSendMessage(cluster_superset, version_); + void updateResources(const std::set& cluster_names) override { + // The "watch" mechanism means that updates that lose interest in a resource + // will first generate a request for [still watched resources, i.e. without newly unwatched + // ones] before generating the request for all of cluster_names. + // TODO(fredlas) this unnecessary second request will stop happening once the watch mechanism is + // no longer internally used by GrpcSubscriptionImpl. + std::set both; + for (const auto& n : cluster_names) { + if (last_cluster_names_.find(n) != last_cluster_names_.end()) { + both.insert(n); + } + } + expectSendMessage(both, version_); expectSendMessage(cluster_names, version_); subscription_->updateResources(cluster_names); last_cluster_names_ = cluster_names; } + void expectConfigUpdateFailed() override { + EXPECT_CALL(callbacks_, onConfigUpdateFailed(nullptr)); + } + + void expectEnableInitFetchTimeoutTimer(std::chrono::milliseconds timeout) override { + init_timeout_timer_ = new Event::MockTimer(&dispatcher_); + EXPECT_CALL(*init_timeout_timer_, enableTimer(std::chrono::milliseconds(timeout))); + } + + void expectDisableInitFetchTimeoutTimer() override { + EXPECT_CALL(*init_timeout_timer_, disableTimer()); + } + + void callInitFetchTimeoutCb() override { init_timeout_timer_->callback_(); } + std::string version_; const Protobuf::MethodDescriptor* method_descriptor_; Grpc::MockAsyncClient* async_client_; @@ -139,11 +157,12 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { envoy::api::v2::core::Node node_; NiceMock> callbacks_; Grpc::MockAsyncStream async_stream_; - std::unique_ptr subscription_; + std::unique_ptr subscription_; std::string last_response_nonce_; - std::vector last_cluster_names_; + std::set last_cluster_names_; NiceMock local_info_; Envoy::Config::RateLimitSettings rate_limit_settings_; + Event::MockTimer* init_timeout_timer_; }; // TODO(danielhochman): test with RDS and ensure version_info is same as what API returned diff --git a/test/common/config/http_subscription_impl_test.cc b/test/common/config/http_subscription_impl_test.cc index 1b1a5f830c0e5..eb5f9df87b480 100644 --- a/test/common/config/http_subscription_impl_test.cc +++ b/test/common/config/http_subscription_impl_test.cc @@ -1,13 +1,14 @@ #include #include "test/common/config/http_subscription_test_harness.h" -#include "test/test_common/test_base.h" + +#include "gtest/gtest.h" namespace Envoy { namespace Config { namespace { -class HttpSubscriptionImplTest : public TestBase, public HttpSubscriptionTestHarness {}; +class HttpSubscriptionImplTest : public testing::Test, public HttpSubscriptionTestHarness {}; // Validate that the client can recover from a remote fetch failure. TEST_F(HttpSubscriptionImplTest, OnRequestReset) { diff --git a/test/common/config/http_subscription_test_harness.h b/test/common/config/http_subscription_test_harness.h index d847994b081f2..7e6cfc828092d 100644 --- a/test/common/config/http_subscription_test_harness.h +++ b/test/common/config/http_subscription_test_harness.h @@ -10,6 +10,7 @@ #include "common/config/utility.h" #include "common/http/message_impl.h" #include "common/protobuf/protobuf.h" +#include "common/protobuf/utility.h" #include "test/common/config/subscription_test_harness.h" #include "test/mocks/config/mocks.h" @@ -17,10 +18,10 @@ #include "test/mocks/local_info/mocks.h" #include "test/mocks/runtime/mocks.h" #include "test/mocks/upstream/mocks.h" -#include "test/test_common/test_base.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" +#include "gtest/gtest.h" using testing::_; using testing::Invoke; @@ -29,11 +30,11 @@ using testing::Return; namespace Envoy { namespace Config { -typedef HttpSubscriptionImpl HttpEdsSubscriptionImpl; - class HttpSubscriptionTestHarness : public SubscriptionTestHarness { public: - HttpSubscriptionTestHarness() + HttpSubscriptionTestHarness() : HttpSubscriptionTestHarness(std::chrono::milliseconds(0)) {} + + HttpSubscriptionTestHarness(std::chrono::milliseconds init_fetch_timeout) : method_descriptor_(Protobuf::DescriptorPool::generated_pool()->FindMethodByName( "envoy.api.v2.EndpointDiscoveryService.FetchEndpoints")), timer_(new Event::MockTimer()), http_request_(&cm_.async_client_) { @@ -43,9 +44,9 @@ class HttpSubscriptionTestHarness : public SubscriptionTestHarness { timer_cb_ = timer_cb; return timer_; })); - subscription_ = std::make_unique( + subscription_ = std::make_unique( local_info_, cm_, "eds_cluster", dispatcher_, random_gen_, std::chrono::milliseconds(1), - std::chrono::milliseconds(1000), *method_descriptor_, stats_); + std::chrono::milliseconds(1000), *method_descriptor_, stats_, init_fetch_timeout); } ~HttpSubscriptionTestHarness() { @@ -55,7 +56,7 @@ class HttpSubscriptionTestHarness : public SubscriptionTestHarness { } } - void expectSendMessage(const std::vector& cluster_names, + void expectSendMessage(const std::set& cluster_names, const std::string& version) override { EXPECT_CALL(cm_, httpAsyncClientForCluster("eds_cluster")); EXPECT_CALL(cm_.async_client_, send_(_, _, _)) @@ -63,38 +64,46 @@ class HttpSubscriptionTestHarness : public SubscriptionTestHarness { Http::AsyncClient::Callbacks& callbacks, const Http::AsyncClient::RequestOptions&) { http_callbacks_ = &callbacks; - EXPECT_EQ("POST", std::string(request->headers().Method()->value().c_str())); + EXPECT_EQ("POST", std::string(request->headers().Method()->value().getStringView())); EXPECT_EQ(Http::Headers::get().ContentTypeValues.Json, - std::string(request->headers().ContentType()->value().c_str())); - EXPECT_EQ("eds_cluster", std::string(request->headers().Host()->value().c_str())); + std::string(request->headers().ContentType()->value().getStringView())); + EXPECT_EQ("eds_cluster", std::string(request->headers().Host()->value().getStringView())); EXPECT_EQ("/v2/discovery:endpoints", - std::string(request->headers().Path()->value().c_str())); + std::string(request->headers().Path()->value().getStringView())); std::string expected_request = "{"; if (!version_.empty()) { expected_request += "\"version_info\":\"" + version + "\","; } expected_request += "\"node\":{\"id\":\"fo0\"},"; if (!cluster_names.empty()) { - expected_request += - "\"resource_names\":[\"" + StringUtil::join(cluster_names, "\",\"") + "\"]"; + std::string joined_cluster_names; + { + std::string delimiter = "\",\""; + std::ostringstream buf; + std::copy(cluster_names.begin(), cluster_names.end(), + std::ostream_iterator(buf, delimiter.c_str())); + std::string with_comma = buf.str(); + joined_cluster_names = with_comma.substr(0, with_comma.length() - delimiter.length()); + } + expected_request += "\"resource_names\":[\"" + joined_cluster_names + "\"]"; } expected_request += "}"; EXPECT_EQ(expected_request, request->bodyAsString()); EXPECT_EQ(fmt::format_int(expected_request.size()).str(), - std::string(request->headers().ContentLength()->value().c_str())); + std::string(request->headers().ContentLength()->value().getStringView())); request_in_progress_ = true; return &http_request_; })); } - void startSubscription(const std::vector& cluster_names) override { + void startSubscription(const std::set& cluster_names) override { version_ = ""; cluster_names_ = cluster_names; expectSendMessage(cluster_names, ""); subscription_->start(cluster_names, callbacks_); } - void updateResources(const std::vector& cluster_names) override { + void updateResources(const std::set& cluster_names) override { cluster_names_ = cluster_names; expectSendMessage(cluster_names, version_); subscription_->updateResources(cluster_names); @@ -112,16 +121,11 @@ class HttpSubscriptionTestHarness : public SubscriptionTestHarness { response_json.pop_back(); response_json += "]}"; envoy::api::v2::DiscoveryResponse response_pb; - EXPECT_TRUE(Protobuf::util::JsonStringToMessage(response_json, &response_pb).ok()); + MessageUtil::loadFromJson(response_json, response_pb); Http::HeaderMapPtr response_headers{new Http::TestHeaderMapImpl{{":status", "200"}}}; Http::MessagePtr message{new Http::ResponseMessageImpl(std::move(response_headers))}; message->body() = std::make_unique(response_json); - EXPECT_CALL(callbacks_, - onConfigUpdate( - RepeatedProtoEq( - Config::Utility::getTypedResources( - response_pb)), - version)) + EXPECT_CALL(callbacks_, onConfigUpdate(RepeatedProtoEq(response_pb.resources()), version)) .WillOnce(ThrowOnRejectedConfig(accept)); if (!accept) { EXPECT_CALL(callbacks_, onConfigUpdateFailed(_)); @@ -136,6 +140,21 @@ class HttpSubscriptionTestHarness : public SubscriptionTestHarness { timerTick(); } + void expectConfigUpdateFailed() override { + EXPECT_CALL(callbacks_, onConfigUpdateFailed(nullptr)); + } + + void expectEnableInitFetchTimeoutTimer(std::chrono::milliseconds timeout) override { + init_timeout_timer_ = new Event::MockTimer(&dispatcher_); + EXPECT_CALL(*init_timeout_timer_, enableTimer(std::chrono::milliseconds(timeout))); + } + + void expectDisableInitFetchTimeoutTimer() override { + EXPECT_CALL(*init_timeout_timer_, disableTimer()); + } + + void callInitFetchTimeoutCb() override { init_timeout_timer_->callback_(); } + void timerTick() { expectSendMessage(cluster_names_, version_); timer_cb_(); @@ -143,7 +162,7 @@ class HttpSubscriptionTestHarness : public SubscriptionTestHarness { bool request_in_progress_{}; std::string version_; - std::vector cluster_names_; + std::set cluster_names_; const Protobuf::MethodDescriptor* method_descriptor_; Upstream::MockClusterManager cm_; Event::MockDispatcher dispatcher_; @@ -154,8 +173,9 @@ class HttpSubscriptionTestHarness : public SubscriptionTestHarness { Http::MockAsyncClientRequest http_request_; Http::AsyncClient::Callbacks* http_callbacks_; Config::MockSubscriptionCallbacks callbacks_; - std::unique_ptr subscription_; + std::unique_ptr subscription_; NiceMock local_info_; + Event::MockTimer* init_timeout_timer_; }; } // namespace Config diff --git a/test/common/config/metadata_test.cc b/test/common/config/metadata_test.cc index bfc208841fcfa..28d5cb67b4a83 100644 --- a/test/common/config/metadata_test.cc +++ b/test/common/config/metadata_test.cc @@ -5,9 +5,10 @@ #include "common/protobuf/utility.h" #include "test/test_common/registry.h" -#include "test/test_common/test_base.h" #include "test/test_common/utility.h" +#include "gtest/gtest.h" + namespace Envoy { namespace Config { namespace { @@ -47,7 +48,7 @@ TEST(MetadataTest, MetadataValuePath) { ProtobufWkt::Value::KindCase::KIND_NOT_SET); } -class TypedMetadataTest : public TestBase { +class TypedMetadataTest : public testing::Test { public: TypedMetadataTest() : registered_factory_(foo_factory_) {} diff --git a/test/common/config/rds_json_test.cc b/test/common/config/rds_json_test.cc index 262871c750b72..5a6d57547398a 100644 --- a/test/common/config/rds_json_test.cc +++ b/test/common/config/rds_json_test.cc @@ -3,14 +3,14 @@ #include "common/config/rds_json.h" #include "common/json/json_loader.h" -#include "test/test_common/test_base.h" - #include "gmock/gmock.h" +#include "gtest/gtest.h" using testing::_; namespace Envoy { namespace Config { +namespace { TEST(RdsJsonTest, TestRuntimeFractionTranslation) { const std::string json_string = R"EOF( @@ -40,5 +40,38 @@ TEST(RdsJsonTest, TestRuntimeFractionTranslation) { EXPECT_EQ(route.match().runtime_fraction().runtime_key(), "some_key"); } +TEST(RdsJsonTest, TestWeightedClusterTranslation) { + const std::string json_string = R"EOF( + { + "prefix": "/new_endpoint", + "prefix_rewrite": "/api/new_endpoint", + "weighted_clusters": { + "clusters": [ + { + "name": "foo", + "weight": 80 + }, + { + "name": "bar", + "weight": 20 + } + ] + } + } + )EOF"; + envoy::api::v2::route::Route route; + auto json_object_ptr = Json::Factory::loadFromString(json_string); + Envoy::Config::RdsJson::translateRoute(*json_object_ptr, route); + + EXPECT_TRUE(route.has_route()); + EXPECT_TRUE(route.route().has_weighted_clusters()); + EXPECT_EQ(2, route.route().weighted_clusters().clusters_size()); + EXPECT_EQ("foo", route.route().weighted_clusters().clusters(0).name()); + EXPECT_EQ(80, route.route().weighted_clusters().clusters(0).weight().value()); + EXPECT_EQ("bar", route.route().weighted_clusters().clusters(1).name()); + EXPECT_EQ(20, route.route().weighted_clusters().clusters(1).weight().value()); +} + +} // namespace } // namespace Config } // namespace Envoy diff --git a/test/common/config/subscription_factory_test.cc b/test/common/config/subscription_factory_test.cc index 0daf82b123fca..3497f16a1826e 100644 --- a/test/common/config/subscription_factory_test.cc +++ b/test/common/config/subscription_factory_test.cc @@ -14,10 +14,10 @@ #include "test/mocks/stats/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/environment.h" -#include "test/test_common/test_base.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" +#include "gtest/gtest.h" using ::testing::_; using ::testing::Invoke; @@ -25,18 +25,20 @@ using ::testing::Return; namespace Envoy { namespace Config { +namespace { -class SubscriptionFactoryTest : public TestBase { +class SubscriptionFactoryTest : public testing::Test { public: SubscriptionFactoryTest() : http_request_(&cm_.async_client_), api_(Api::createApiForTest(stats_store_)) {} - std::unique_ptr> + std::unique_ptr subscriptionFromConfigSource(const envoy::api::v2::core::ConfigSource& config) { - return SubscriptionFactory::subscriptionFromConfigSource( + return SubscriptionFactory::subscriptionFromConfigSource( config, local_info_, dispatcher_, cm_, random_, stats_store_, "envoy.api.v2.EndpointDiscoveryService.FetchEndpoints", - "envoy.api.v2.EndpointDiscoveryService.StreamEndpoints", *api_); + "envoy.api.v2.EndpointDiscoveryService.StreamEndpoints", + Config::TypeUrl::get().ClusterLoadAssignment, *api_); } Upstream::MockClusterManager cm_; @@ -85,7 +87,7 @@ TEST_F(SubscriptionFactoryTest, GrpcClusterEmpty) { TEST_F(SubscriptionFactoryTest, RestClusterSingleton) { envoy::api::v2::core::ConfigSource config; Upstream::ClusterManager::ClusterInfoMap cluster_map; - NiceMock cluster; + NiceMock cluster; config.mutable_api_config_source()->set_api_type(envoy::api::v2::core::ApiConfigSource::REST); config.mutable_api_config_source()->mutable_refresh_delay()->set_seconds(1); @@ -102,7 +104,7 @@ TEST_F(SubscriptionFactoryTest, RestClusterSingleton) { TEST_F(SubscriptionFactoryTest, GrpcClusterSingleton) { envoy::api::v2::core::ConfigSource config; Upstream::ClusterManager::ClusterInfoMap cluster_map; - NiceMock cluster; + NiceMock cluster; config.mutable_api_config_source()->set_api_type(envoy::api::v2::core::ApiConfigSource::GRPC); config.mutable_api_config_source()->mutable_refresh_delay()->set_seconds(1); @@ -134,7 +136,7 @@ TEST_F(SubscriptionFactoryTest, GrpcClusterSingleton) { TEST_F(SubscriptionFactoryTest, RestClusterMultiton) { envoy::api::v2::core::ConfigSource config; Upstream::ClusterManager::ClusterInfoMap cluster_map; - NiceMock cluster; + NiceMock cluster; config.mutable_api_config_source()->set_api_type(envoy::api::v2::core::ApiConfigSource::REST); @@ -155,7 +157,7 @@ TEST_F(SubscriptionFactoryTest, RestClusterMultiton) { TEST_F(SubscriptionFactoryTest, GrpcClusterMultiton) { envoy::api::v2::core::ConfigSource config; Upstream::ClusterManager::ClusterInfoMap cluster_map; - NiceMock cluster; + NiceMock cluster; config.mutable_api_config_source()->set_api_type(envoy::api::v2::core::ApiConfigSource::GRPC); @@ -171,9 +173,9 @@ TEST_F(SubscriptionFactoryTest, GrpcClusterMultiton) { EXPECT_CALL(*cluster.info_, addedViaApi()).WillRepeatedly(Return(false)); EXPECT_CALL(*cluster.info_, type()).WillRepeatedly(Return(envoy::api::v2::Cluster::STATIC)); - EXPECT_THROW_WITH_REGEX( - subscriptionFromConfigSource(config), EnvoyException, - "envoy::api::v2::core::ConfigSource::GRPC must have a single gRPC service specified:"); + EXPECT_THROW_WITH_REGEX(subscriptionFromConfigSource(config), EnvoyException, + "envoy::api::v2::core::ConfigSource::.DELTA_.GRPC must have a " + "single gRPC service specified:"); } TEST_F(SubscriptionFactoryTest, FilesystemSubscription) { @@ -202,7 +204,7 @@ TEST_F(SubscriptionFactoryTest, LegacySubscription) { api_config_source->set_api_type(envoy::api::v2::core::ApiConfigSource::UNSUPPORTED_REST_LEGACY); api_config_source->add_cluster_names("static_cluster"); Upstream::ClusterManager::ClusterInfoMap cluster_map; - Upstream::MockCluster cluster; + Upstream::MockClusterMockPrioritySet cluster; cluster_map.emplace("static_cluster", cluster); EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); EXPECT_CALL(cluster, info()).Times(2); @@ -220,7 +222,7 @@ TEST_F(SubscriptionFactoryTest, HttpSubscriptionCustomRequestTimeout) { api_config_source->mutable_refresh_delay()->set_seconds(1); api_config_source->mutable_request_timeout()->set_seconds(5); Upstream::ClusterManager::ClusterInfoMap cluster_map; - Upstream::MockCluster cluster; + Upstream::MockClusterMockPrioritySet cluster; cluster_map.emplace("static_cluster", cluster); EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); EXPECT_CALL(cluster, info()).Times(2); @@ -240,7 +242,7 @@ TEST_F(SubscriptionFactoryTest, HttpSubscription) { api_config_source->add_cluster_names("static_cluster"); api_config_source->mutable_refresh_delay()->set_seconds(1); Upstream::ClusterManager::ClusterInfoMap cluster_map; - Upstream::MockCluster cluster; + Upstream::MockClusterMockPrioritySet cluster; cluster_map.emplace("static_cluster", cluster); EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); EXPECT_CALL(cluster, info()).Times(2); @@ -250,10 +252,11 @@ TEST_F(SubscriptionFactoryTest, HttpSubscription) { EXPECT_CALL(cm_.async_client_, send_(_, _, _)) .WillOnce(Invoke([this](Http::MessagePtr& request, Http::AsyncClient::Callbacks&, const Http::AsyncClient::RequestOptions&) { - EXPECT_EQ("POST", std::string(request->headers().Method()->value().c_str())); - EXPECT_EQ("static_cluster", std::string(request->headers().Host()->value().c_str())); + EXPECT_EQ("POST", std::string(request->headers().Method()->value().getStringView())); + EXPECT_EQ("static_cluster", + std::string(request->headers().Host()->value().getStringView())); EXPECT_EQ("/v2/discovery:endpoints", - std::string(request->headers().Path()->value().c_str())); + std::string(request->headers().Path()->value().getStringView())); return &http_request_; })); EXPECT_CALL(http_request_, cancel()); @@ -267,7 +270,7 @@ TEST_F(SubscriptionFactoryTest, HttpSubscriptionNoRefreshDelay) { api_config_source->set_api_type(envoy::api::v2::core::ApiConfigSource::REST); api_config_source->add_cluster_names("static_cluster"); Upstream::ClusterManager::ClusterInfoMap cluster_map; - Upstream::MockCluster cluster; + Upstream::MockClusterMockPrioritySet cluster; cluster_map.emplace("static_cluster", cluster); EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); EXPECT_CALL(cluster, info()).Times(2); @@ -285,7 +288,7 @@ TEST_F(SubscriptionFactoryTest, GrpcSubscription) { envoy::api::v2::core::GrpcService expected_grpc_service; expected_grpc_service.mutable_envoy_grpc()->set_cluster_name("static_cluster"); Upstream::ClusterManager::ClusterInfoMap cluster_map; - NiceMock cluster; + NiceMock cluster; cluster_map.emplace("static_cluster", cluster); EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); EXPECT_CALL(cm_, grpcAsyncClientManager()).WillOnce(ReturnRef(cm_.async_client_manager_)); @@ -338,7 +341,7 @@ TEST_P(SubscriptionFactoryTestApiConfigSource, DynamicCluster) { api_config_source->add_cluster_names("static_cluster"); } Upstream::ClusterManager::ClusterInfoMap cluster_map; - Upstream::MockCluster cluster; + Upstream::MockClusterMockPrioritySet cluster; cluster_map.emplace("static_cluster", cluster); EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); EXPECT_CALL(cluster, info()); @@ -360,7 +363,7 @@ TEST_P(SubscriptionFactoryTestApiConfigSource, EDSClusterBackingEDSCluster) { api_config_source->add_cluster_names("static_cluster"); } Upstream::ClusterManager::ClusterInfoMap cluster_map; - Upstream::MockCluster cluster; + Upstream::MockClusterMockPrioritySet cluster; cluster_map.emplace("static_cluster", cluster); EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); EXPECT_CALL(cluster, info()).Times(2); @@ -372,5 +375,6 @@ TEST_P(SubscriptionFactoryTestApiConfigSource, EDSClusterBackingEDSCluster) { "non-EDS cluster: 'static_cluster' does not exist, was added via api, or is an EDS cluster"); } +} // namespace } // namespace Config } // namespace Envoy diff --git a/test/common/config/subscription_impl_test.cc b/test/common/config/subscription_impl_test.cc index 503f469fe8bb1..67a9566619c2a 100644 --- a/test/common/config/subscription_impl_test.cc +++ b/test/common/config/subscription_impl_test.cc @@ -1,29 +1,37 @@ #include +#include "test/common/config/delta_subscription_test_harness.h" #include "test/common/config/filesystem_subscription_test_harness.h" #include "test/common/config/grpc_subscription_test_harness.h" #include "test/common/config/http_subscription_test_harness.h" #include "test/common/config/subscription_test_harness.h" +using testing::InSequence; + namespace Envoy { namespace Config { namespace { enum class SubscriptionType { Grpc, + DeltaGrpc, Http, Filesystem, }; -class SubscriptionImplTest : public TestBaseWithParam { +class SubscriptionImplTest : public testing::TestWithParam { public: - SubscriptionImplTest() { + SubscriptionImplTest() : SubscriptionImplTest(std::chrono::milliseconds(0)) {} + SubscriptionImplTest(std::chrono::milliseconds init_fetch_timeout) { switch (GetParam()) { case SubscriptionType::Grpc: - test_harness_ = std::make_unique(); + test_harness_ = std::make_unique(init_fetch_timeout); + break; + case SubscriptionType::DeltaGrpc: + test_harness_ = std::make_unique(init_fetch_timeout); break; case SubscriptionType::Http: - test_harness_ = std::make_unique(); + test_harness_ = std::make_unique(init_fetch_timeout); break; case SubscriptionType::Filesystem: test_harness_ = std::make_unique(); @@ -31,16 +39,15 @@ class SubscriptionImplTest : public TestBaseWithParam { } } - void startSubscription(const std::vector& cluster_names) { + void startSubscription(const std::set& cluster_names) { test_harness_->startSubscription(cluster_names); } - void updateResources(const std::vector& cluster_names) { + void updateResources(const std::set& cluster_names) { test_harness_->updateResources(cluster_names); } - void expectSendMessage(const std::vector& cluster_names, - const std::string& version) { + void expectSendMessage(const std::set& cluster_names, const std::string& version) { test_harness_->expectSendMessage(cluster_names, version); } @@ -54,12 +61,29 @@ class SubscriptionImplTest : public TestBaseWithParam { test_harness_->deliverConfigUpdate(cluster_names, version, accept); } + void expectConfigUpdateFailed() { test_harness_->expectConfigUpdateFailed(); } + + void expectEnableInitFetchTimeoutTimer(std::chrono::milliseconds timeout) { + test_harness_->expectEnableInitFetchTimeoutTimer(timeout); + } + + void expectDisableInitFetchTimeoutTimer() { test_harness_->expectDisableInitFetchTimeoutTimer(); } + + void callInitFetchTimeoutCb() { test_harness_->callInitFetchTimeoutCb(); } + std::unique_ptr test_harness_; }; -INSTANTIATE_TEST_SUITE_P(SubscriptionImplTest, SubscriptionImplTest, - testing::ValuesIn({SubscriptionType::Grpc, SubscriptionType::Http, - SubscriptionType::Filesystem})); +class SubscriptionImplInitFetchTimeoutTest : public SubscriptionImplTest { +public: + SubscriptionImplInitFetchTimeoutTest() : SubscriptionImplTest(std::chrono::milliseconds(1000)) {} +}; + +SubscriptionType types[] = {SubscriptionType::Grpc, SubscriptionType::DeltaGrpc, + SubscriptionType::Http, SubscriptionType::Filesystem}; +INSTANTIATE_TEST_SUITE_P(SubscriptionImplTest, SubscriptionImplTest, testing::ValuesIn(types)); +INSTANTIATE_TEST_SUITE_P(SubscriptionImplTest, SubscriptionImplInitFetchTimeoutTest, + testing::ValuesIn(types)); // Validate basic request-response succeeds. TEST_P(SubscriptionImplTest, InitialRequestResponse) { @@ -117,6 +141,37 @@ TEST_P(SubscriptionImplTest, UpdateResources) { verifyStats(3, 1, 0, 0, 7148434200721666028); } +// Validate that initial fetch timer is created and calls callback on timeout +TEST_P(SubscriptionImplInitFetchTimeoutTest, InitialFetchTimeout) { + InSequence s; + expectEnableInitFetchTimeoutTimer(std::chrono::milliseconds(1000)); + startSubscription({"cluster0", "cluster1"}); + verifyStats(1, 0, 0, 0, 0); + expectConfigUpdateFailed(); + callInitFetchTimeoutCb(); + verifyStats(1, 0, 0, 0, 0); +} + +// Validate that initial fetch timer is disabled on config update +TEST_P(SubscriptionImplInitFetchTimeoutTest, DisableInitTimeoutOnSuccess) { + InSequence s; + expectEnableInitFetchTimeoutTimer(std::chrono::milliseconds(1000)); + startSubscription({"cluster0", "cluster1"}); + verifyStats(1, 0, 0, 0, 0); + expectDisableInitFetchTimeoutTimer(); + deliverConfigUpdate({"cluster0", "cluster1"}, "0", true); +} + +// Validate that initial fetch timer is disabled on config update failed +TEST_P(SubscriptionImplInitFetchTimeoutTest, DisableInitTimeoutOnFail) { + InSequence s; + expectEnableInitFetchTimeoutTimer(std::chrono::milliseconds(1000)); + startSubscription({"cluster0", "cluster1"}); + verifyStats(1, 0, 0, 0, 0); + expectDisableInitFetchTimeoutTimer(); + deliverConfigUpdate({"cluster0", "cluster1"}, "0", false); +} + } // namespace } // namespace Config } // namespace Envoy diff --git a/test/common/config/subscription_test_harness.h b/test/common/config/subscription_test_harness.h index 16d1841217898..4a4c494bce1b5 100644 --- a/test/common/config/subscription_test_harness.h +++ b/test/common/config/subscription_test_harness.h @@ -3,9 +3,9 @@ #include "common/config/utility.h" #include "test/mocks/stats/mocks.h" -#include "test/test_common/test_base.h" #include "gmock/gmock.h" +#include "gtest/gtest.h" namespace Envoy { namespace Config { @@ -24,20 +24,20 @@ class SubscriptionTestHarness { * Start subscription and set related expectations. * @param cluster_names initial cluster names to request via EDS. */ - virtual void startSubscription(const std::vector& cluster_names) PURE; + virtual void startSubscription(const std::set& cluster_names) PURE; /** * Update cluster names to be delivered via EDS. * @param cluster_names cluster names. */ - virtual void updateResources(const std::vector& cluster_names) PURE; + virtual void updateResources(const std::set& cluster_names) PURE; /** * Expect that an update request is sent by the Subscription implementation. * @param cluster_names cluster names to expect in the request. * @param version version_info to expect in the request. */ - virtual void expectSendMessage(const std::vector& cluster_names, + virtual void expectSendMessage(const std::set& cluster_names, const std::string& version) PURE; /** @@ -51,7 +51,9 @@ class SubscriptionTestHarness { virtual void verifyStats(uint32_t attempt, uint32_t success, uint32_t rejected, uint32_t failure, uint64_t version) { - EXPECT_EQ(attempt, stats_.update_attempt_.value()); + // TODO(fredlas) rework update_success_ to make sense across all xDS carriers. Its value in + // verifyStats() calls in many tests will probably have to be changed. + UNREFERENCED_PARAMETER(attempt); EXPECT_EQ(success, stats_.update_success_.value()); EXPECT_EQ(rejected, stats_.update_rejected_.value()); EXPECT_EQ(failure, stats_.update_failure_.value()); @@ -62,6 +64,14 @@ class SubscriptionTestHarness { EXPECT_EQ(connected_state, stats_store_.gauge("control_plane.connected_state").value()); } + virtual void expectConfigUpdateFailed() PURE; + + virtual void expectEnableInitFetchTimeoutTimer(std::chrono::milliseconds timeout) PURE; + + virtual void expectDisableInitFetchTimeoutTimer() PURE; + + virtual void callInitFetchTimeoutCb() PURE; + Stats::IsolatedStoreImpl stats_store_; SubscriptionStats stats_; }; diff --git a/test/common/config/utility_test.cc b/test/common/config/utility_test.cc index c3ebc97e5d235..c576c8f9fd25f 100644 --- a/test/common/config/utility_test.cc +++ b/test/common/config/utility_test.cc @@ -14,10 +14,10 @@ #include "test/mocks/stats/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/environment.h" -#include "test/test_common/test_base.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" +#include "gtest/gtest.h" using testing::_; using testing::AtLeast; @@ -80,6 +80,18 @@ TEST(UtilityTest, ApiConfigSourceRequestTimeout) { EXPECT_EQ(1234, Utility::apiConfigSourceRequestTimeout(api_config_source).count()); } +TEST(UtilityTest, ConfigSourceDefaultInitFetchTimeout) { + envoy::api::v2::core::ConfigSource config_source; + EXPECT_EQ(0, Utility::configSourceInitialFetchTimeout(config_source).count()); +} + +TEST(UtilityTest, ConfigSourceInitFetchTimeout) { + envoy::api::v2::core::ConfigSource config_source; + config_source.mutable_initial_fetch_timeout()->CopyFrom( + Protobuf::util::TimeUtil::MillisecondsToDuration(654)); + EXPECT_EQ(654, Utility::configSourceInitialFetchTimeout(config_source).count()); +} + TEST(UtilityTest, TranslateApiConfigSource) { envoy::api::v2::core::ApiConfigSource api_config_source_rest_legacy; Utility::translateApiConfigSource("test_rest_legacy_cluster", 10000, @@ -117,65 +129,6 @@ TEST(UtilityTest, createTagProducer) { ASSERT_EQ(tags.size(), 1); } -TEST(UtilityTest, ObjNameLength) { - Stats::StatsOptionsImpl stats_options; - std::string name = "listenerwithareallyreallyreallyreallyreallyreallyreallyreallyreallyreallyreal" - "lyreallyreallyreallyreallyreallylongnamemorethanmaxcharsallowedbyschema"; - std::string err_prefix; - std::string err_suffix = fmt::format(": Length of {} ({}) exceeds allowed maximum length ({})", - name, name.length(), stats_options.maxNameLength()); - { - err_prefix = "test"; - EXPECT_THROW_WITH_MESSAGE(Utility::checkObjNameLength(err_prefix, name, stats_options), - EnvoyException, err_prefix + err_suffix); - } - - { - err_prefix = "Invalid listener name"; - std::string json = - R"EOF({ "name": ")EOF" + name + R"EOF(", "address": "foo", "filters":[]})EOF"; - auto json_object_ptr = Json::Factory::loadFromString(json); - - envoy::api::v2::Listener listener; - EXPECT_THROW_WITH_MESSAGE( - Config::LdsJson::translateListener(*json_object_ptr, listener, stats_options), - EnvoyException, err_prefix + err_suffix); - } - - { - err_prefix = "Invalid virtual host name"; - std::string json = R"EOF({ "name": ")EOF" + name + R"EOF(", "domains": [], "routes": []})EOF"; - auto json_object_ptr = Json::Factory::loadFromString(json); - envoy::api::v2::route::VirtualHost vhost; - EXPECT_THROW_WITH_MESSAGE( - Config::RdsJson::translateVirtualHost(*json_object_ptr, vhost, stats_options), - EnvoyException, err_prefix + err_suffix); - } - - { - err_prefix = "Invalid cluster name"; - std::string json = - R"EOF({ "name": ")EOF" + name + - R"EOF(", "type": "static", "lb_type": "random", "connect_timeout_ms" : 1})EOF"; - auto json_object_ptr = Json::Factory::loadFromString(json); - envoy::api::v2::Cluster cluster; - envoy::api::v2::core::ConfigSource eds_config; - EXPECT_THROW_WITH_MESSAGE( - Config::CdsJson::translateCluster(*json_object_ptr, eds_config, cluster, stats_options), - EnvoyException, err_prefix + err_suffix); - } - - { - err_prefix = "Invalid route_config name"; - std::string json = R"EOF({ "route_config_name": ")EOF" + name + R"EOF(", "cluster": "foo"})EOF"; - auto json_object_ptr = Json::Factory::loadFromString(json); - envoy::config::filter::network::http_connection_manager::v2::Rds rds; - EXPECT_THROW_WITH_MESSAGE( - Config::Utility::translateRdsConfig(*json_object_ptr, rds, stats_options), EnvoyException, - err_prefix + err_suffix); - } -} - TEST(UtilityTest, UnixClusterDns) { std::string cluster_type; @@ -186,10 +139,9 @@ TEST(UtilityTest, UnixClusterDns) { auto json_object_ptr = Json::Factory::loadFromString(json); envoy::api::v2::Cluster cluster; envoy::api::v2::core::ConfigSource eds_config; - Stats::StatsOptionsImpl stats_options; EXPECT_THROW_WITH_MESSAGE( - Config::CdsJson::translateCluster(*json_object_ptr, eds_config, cluster, stats_options), - EnvoyException, "unresolved URL must be TCP scheme, got: unix:///test.sock"); + Config::CdsJson::translateCluster(*json_object_ptr, eds_config, cluster), EnvoyException, + "unresolved URL must be TCP scheme, got: unix:///test.sock"); } TEST(UtilityTest, UnixClusterStatic) { @@ -202,14 +154,12 @@ TEST(UtilityTest, UnixClusterStatic) { auto json_object_ptr = Json::Factory::loadFromString(json); envoy::api::v2::Cluster cluster; envoy::api::v2::core::ConfigSource eds_config; - Stats::StatsOptionsImpl stats_options; - Config::CdsJson::translateCluster(*json_object_ptr, eds_config, cluster, stats_options); + Config::CdsJson::translateCluster(*json_object_ptr, eds_config, cluster); EXPECT_EQ("/test.sock", cluster.hosts(0).pipe().path()); } TEST(UtilityTest, CheckFilesystemSubscriptionBackingPath) { - Stats::MockIsolatedStatsStore stats_store; - Api::ApiPtr api = Api::createApiForTest(stats_store); + Api::ApiPtr api = Api::createApiForTest(); EXPECT_THROW_WITH_MESSAGE( Utility::checkFilesystemSubscriptionBackingPath("foo", *api), EnvoyException, @@ -269,7 +219,8 @@ TEST(UtilityTest, FactoryForGrpcApiConfigSource) { EXPECT_THROW_WITH_REGEX( Utility::factoryForGrpcApiConfigSource(async_client_manager, api_config_source, scope), EnvoyException, - "envoy::api::v2::core::ConfigSource::GRPC must have a single gRPC service specified:"); + "envoy::api::v2::core::ConfigSource::.DELTA_.GRPC must have a single gRPC service " + "specified:"); } { @@ -280,7 +231,8 @@ TEST(UtilityTest, FactoryForGrpcApiConfigSource) { EXPECT_THROW_WITH_REGEX( Utility::factoryForGrpcApiConfigSource(async_client_manager, api_config_source, scope), EnvoyException, - "envoy::api::v2::core::ConfigSource::GRPC must not have a cluster name specified:"); + "envoy::api::v2::core::ConfigSource::.DELTA_.GRPC must not have a cluster name " + "specified:"); } { @@ -291,7 +243,8 @@ TEST(UtilityTest, FactoryForGrpcApiConfigSource) { EXPECT_THROW_WITH_REGEX( Utility::factoryForGrpcApiConfigSource(async_client_manager, api_config_source, scope), EnvoyException, - "envoy::api::v2::core::ConfigSource::GRPC must not have a cluster name specified:"); + "envoy::api::v2::core::ConfigSource::.DELTA_.GRPC must not have a cluster name " + "specified:"); } { @@ -302,7 +255,7 @@ TEST(UtilityTest, FactoryForGrpcApiConfigSource) { EXPECT_THROW_WITH_REGEX( Utility::factoryForGrpcApiConfigSource(async_client_manager, api_config_source, scope), EnvoyException, - "envoy::api::v2::core::ConfigSource, if not of type gRPC, must not have a gRPC service " + "envoy::api::v2::core::ConfigSource, if not a gRPC type, must not have a gRPC service " "specified:"); } @@ -312,7 +265,7 @@ TEST(UtilityTest, FactoryForGrpcApiConfigSource) { api_config_source.add_cluster_names("foo"); EXPECT_THROW_WITH_REGEX( Utility::factoryForGrpcApiConfigSource(async_client_manager, api_config_source, scope), - EnvoyException, "envoy::api::v2::core::ConfigSource type must be GRPC:"); + EnvoyException, "envoy::api::v2::core::ConfigSource type must be gRPC:"); } { @@ -358,7 +311,7 @@ TEST(CheckApiConfigSourceSubscriptionBackingClusterTest, GrpcClusterTestAcrossTy "'foo_cluster' does not exist, was added via api, or is an EDS cluster"); // Dynamic Cluster. - Upstream::MockCluster cluster; + Upstream::MockClusterMockPrioritySet cluster; cluster_map.emplace("foo_cluster", cluster); EXPECT_CALL(cluster, info()); EXPECT_CALL(*cluster.info_, addedViaApi()).WillOnce(Return(true)); @@ -389,7 +342,8 @@ TEST(CheckApiConfigSourceSubscriptionBackingClusterTest, GrpcClusterTestAcrossTy EXPECT_THROW_WITH_REGEX( Utility::checkApiConfigSourceSubscriptionBackingCluster(cluster_map, *api_config_source), EnvoyException, - "envoy::api::v2::core::ConfigSource::GRPC must not have a cluster name specified:"); + "envoy::api::v2::core::ConfigSource::.DELTA_.GRPC must not have a cluster name " + "specified:"); } TEST(CheckApiConfigSourceSubscriptionBackingClusterTest, RestClusterTestAcrossTypes) { @@ -407,7 +361,7 @@ TEST(CheckApiConfigSourceSubscriptionBackingClusterTest, RestClusterTestAcrossTy "'foo_cluster' does not exist, was added via api, or is an EDS cluster"); // Dynamic Cluster. - Upstream::MockCluster cluster; + Upstream::MockClusterMockPrioritySet cluster; cluster_map.emplace("foo_cluster", cluster); EXPECT_CALL(cluster, info()); EXPECT_CALL(*cluster.info_, addedViaApi()).WillOnce(Return(true)); diff --git a/test/common/crypto/utility_test.cc b/test/common/crypto/utility_test.cc index e3730caab9593..bea2f8f9e0d54 100644 --- a/test/common/crypto/utility_test.cc +++ b/test/common/crypto/utility_test.cc @@ -2,11 +2,12 @@ #include "common/common/hex.h" #include "common/crypto/utility.h" -#include "test/test_common/test_base.h" +#include "gtest/gtest.h" namespace Envoy { namespace Common { namespace Crypto { +namespace { TEST(UtilityTest, TestSha256Digest) { const Buffer::OwnedImpl buffer("test data"); @@ -49,6 +50,7 @@ TEST(UtilityTest, TestSha256HmacWithEmptyArguments) { EXPECT_EQ("b613679a0814d9ec772f95d778c35fc5ff1697c493715653c6c712144292c5ad", Hex::encode(hmac)); } +} // namespace } // namespace Crypto } // namespace Common } // namespace Envoy diff --git a/test/common/decompressor/zlib_decompressor_impl_test.cc b/test/common/decompressor/zlib_decompressor_impl_test.cc index 3dfa3600966a4..bb715643e1239 100644 --- a/test/common/decompressor/zlib_decompressor_impl_test.cc +++ b/test/common/decompressor/zlib_decompressor_impl_test.cc @@ -3,14 +3,15 @@ #include "common/compressor/zlib_compressor_impl.h" #include "common/decompressor/zlib_decompressor_impl.h" -#include "test/test_common/test_base.h" #include "test/test_common/utility.h" +#include "gtest/gtest.h" + namespace Envoy { namespace Decompressor { namespace { -class ZlibDecompressorImplTest : public TestBase { +class ZlibDecompressorImplTest : public testing::Test { protected: void drainBuffer(Buffer::OwnedImpl& buffer) { buffer.drain(buffer.length()); } diff --git a/test/common/event/BUILD b/test/common/event/BUILD index 0a5ad582e66b2..4e432fb57625e 100644 --- a/test/common/event/BUILD +++ b/test/common/event/BUILD @@ -17,6 +17,7 @@ envoy_cc_test( "//source/common/event:dispatcher_lib", "//source/common/stats:isolated_store_lib", "//test/mocks:common_lib", + "//test/mocks/stats:stats_mocks", "//test/test_common:utility_lib", ], ) diff --git a/test/common/event/dispatched_thread_impl_test.cc b/test/common/event/dispatched_thread_impl_test.cc index fc9ba00435fbd..704fbc354512d 100644 --- a/test/common/event/dispatched_thread_impl_test.cc +++ b/test/common/event/dispatched_thread_impl_test.cc @@ -9,18 +9,19 @@ #include "test/mocks/common.h" #include "test/mocks/server/mocks.h" #include "test/mocks/stats/mocks.h" -#include "test/test_common/test_base.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" +#include "gtest/gtest.h" using testing::InSequence; using testing::NiceMock; namespace Envoy { namespace Event { +namespace { -class DispatchedThreadTest : public TestBase { +class DispatchedThreadTest : public testing::Test { protected: DispatchedThreadTest() : config_(1000, 1000, 1000, 1000), api_(Api::createApiForTest(fakestats_)), thread_(*api_), @@ -45,5 +46,6 @@ TEST_F(DispatchedThreadTest, PostCallbackTest) { thread_.exit(); } +} // namespace } // namespace Event } // namespace Envoy diff --git a/test/common/event/dispatcher_impl_test.cc b/test/common/event/dispatcher_impl_test.cc index 6677fbfe679d9..ba06d0a4be8dd 100644 --- a/test/common/event/dispatcher_impl_test.cc +++ b/test/common/event/dispatcher_impl_test.cc @@ -8,15 +8,21 @@ #include "common/stats/isolated_store_impl.h" #include "test/mocks/common.h" -#include "test/test_common/test_base.h" +#include "test/mocks/stats/mocks.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" +#include "gtest/gtest.h" +using testing::_; using testing::InSequence; +using testing::NiceMock; +using testing::Return; +using testing::StartsWith; namespace Envoy { namespace Event { +namespace { class TestDeferredDeletable : public DeferredDeletable { public: @@ -29,41 +35,40 @@ class TestDeferredDeletable : public DeferredDeletable { TEST(DeferredDeleteTest, DeferredDelete) { InSequence s; - Stats::IsolatedStoreImpl stats_store; - Api::ApiPtr api = Api::createApiForTest(stats_store); - DispatcherImpl dispatcher(*api); + Api::ApiPtr api = Api::createApiForTest(); + DispatcherPtr dispatcher(api->allocateDispatcher()); ReadyWatcher watcher1; - dispatcher.deferredDelete( + dispatcher->deferredDelete( DeferredDeletablePtr{new TestDeferredDeletable([&]() -> void { watcher1.ready(); })}); // The first one will get deleted inline. EXPECT_CALL(watcher1, ready()); - dispatcher.clearDeferredDeleteList(); + dispatcher->clearDeferredDeleteList(); // This one does a nested deferred delete. We should need two clear calls to actually get // rid of it with the vector swapping. We also test that inline clear() call does nothing. ReadyWatcher watcher2; ReadyWatcher watcher3; - dispatcher.deferredDelete(DeferredDeletablePtr{new TestDeferredDeletable([&]() -> void { + dispatcher->deferredDelete(DeferredDeletablePtr{new TestDeferredDeletable([&]() -> void { watcher2.ready(); - dispatcher.deferredDelete( + dispatcher->deferredDelete( DeferredDeletablePtr{new TestDeferredDeletable([&]() -> void { watcher3.ready(); })}); - dispatcher.clearDeferredDeleteList(); + dispatcher->clearDeferredDeleteList(); })}); EXPECT_CALL(watcher2, ready()); - dispatcher.clearDeferredDeleteList(); + dispatcher->clearDeferredDeleteList(); EXPECT_CALL(watcher3, ready()); - dispatcher.clearDeferredDeleteList(); + dispatcher->clearDeferredDeleteList(); } -class DispatcherImplTest : public TestBase { +class DispatcherImplTest : public testing::Test { protected: DispatcherImplTest() - : api_(Api::createApiForTest(stat_store_)), - dispatcher_(std::make_unique(*api_)), work_finished_(false) { + : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher()), + work_finished_(false) { dispatcher_thread_ = api_->threadFactory().createThread([this]() { // Must create a keepalive timer to keep the dispatcher from exiting. std::chrono::milliseconds time_interval(500); @@ -80,7 +85,7 @@ class DispatcherImplTest : public TestBase { dispatcher_thread_->join(); } - Stats::IsolatedStoreImpl stat_store_; + NiceMock scope_; // Used in InitializeStats, must outlive dispatcher_->exit(). Api::ApiPtr api_; Thread::ThreadPtr dispatcher_thread_; DispatcherPtr dispatcher_; @@ -91,6 +96,14 @@ class DispatcherImplTest : public TestBase { TimerPtr keepalive_timer_; }; +// TODO(mergeconflict): We also need integration testing to validate that the expected histograms +// are written when `enable_dispatcher_stats` is true. See issue #6582. +TEST_F(DispatcherImplTest, InitializeStats) { + EXPECT_CALL(scope_, histogram("test.dispatcher.loop_duration_us")); + EXPECT_CALL(scope_, histogram("test.dispatcher.poll_delay_us")); + dispatcher_->initializeStats(scope_, "test."); +} + TEST_F(DispatcherImplTest, Post) { dispatcher_->post([this]() { { @@ -157,6 +170,7 @@ TEST_F(DispatcherImplTest, Timer) { } cv_.notifyOne(); }); + EXPECT_FALSE(timer->enabled()); } cv_.notifyOne(); }); @@ -172,5 +186,17 @@ TEST_F(DispatcherImplTest, Timer) { } } +TEST(TimerImplTest, TimerEnabledDisabled) { + Api::ApiPtr api = Api::createApiForTest(); + DispatcherPtr dispatcher(api->allocateDispatcher()); + Event::TimerPtr timer = dispatcher->createTimer([] {}); + EXPECT_FALSE(timer->enabled()); + timer->enableTimer(std::chrono::milliseconds(0)); + EXPECT_TRUE(timer->enabled()); + dispatcher->run(Dispatcher::RunType::NonBlock); + EXPECT_FALSE(timer->enabled()); +} + +} // namespace } // namespace Event } // namespace Envoy diff --git a/test/common/event/file_event_impl_test.cc b/test/common/event/file_event_impl_test.cc index d43dbeef0bc40..3794f06788848 100644 --- a/test/common/event/file_event_impl_test.cc +++ b/test/common/event/file_event_impl_test.cc @@ -7,15 +7,17 @@ #include "test/mocks/common.h" #include "test/test_common/environment.h" -#include "test/test_common/test_base.h" #include "test/test_common/utility.h" +#include "gtest/gtest.h" + namespace Envoy { namespace Event { +namespace { -class FileEventImplTest : public TestBase { +class FileEventImplTest : public testing::Test { public: - FileEventImplTest() : api_(Api::createApiForTest(stats_store_)), dispatcher_(*api_) {} + FileEventImplTest() : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher()) {} void SetUp() override { int rc = socketpair(AF_UNIX, SOCK_DGRAM, 0, fds_); @@ -32,12 +34,11 @@ class FileEventImplTest : public TestBase { protected: int fds_[2]; - Stats::IsolatedStoreImpl stats_store_; Api::ApiPtr api_; - DispatcherImpl dispatcher_; + DispatcherPtr dispatcher_; }; -class FileEventImplActivateTest : public TestBaseWithParam {}; +class FileEventImplActivateTest : public testing::TestWithParam {}; INSTANTIATE_TEST_SUITE_P(IpVersions, FileEventImplActivateTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), @@ -52,9 +53,8 @@ TEST_P(FileEventImplActivateTest, Activate) { } ASSERT_NE(-1, fd); - Stats::IsolatedStoreImpl stats_store; - Api::ApiPtr api = Api::createApiForTest(stats_store); - DispatcherImpl dispatcher(*api); + Api::ApiPtr api = Api::createApiForTest(); + DispatcherPtr dispatcher(api->allocateDispatcher()); ReadyWatcher read_event; EXPECT_CALL(read_event, ready()).Times(1); ReadyWatcher write_event; @@ -62,7 +62,7 @@ TEST_P(FileEventImplActivateTest, Activate) { ReadyWatcher closed_event; EXPECT_CALL(closed_event, ready()).Times(1); - Event::FileEventPtr file_event = dispatcher.createFileEvent( + Event::FileEventPtr file_event = dispatcher->createFileEvent( fd, [&](uint32_t events) -> void { if (events & FileReadyType::Read) { @@ -80,7 +80,7 @@ TEST_P(FileEventImplActivateTest, Activate) { FileTriggerType::Edge, FileReadyType::Read | FileReadyType::Write | FileReadyType::Closed); file_event->activate(FileReadyType::Read | FileReadyType::Write | FileReadyType::Closed); - dispatcher.run(Event::Dispatcher::RunType::NonBlock); + dispatcher->run(Event::Dispatcher::RunType::NonBlock); close(fd); } @@ -91,7 +91,7 @@ TEST_F(FileEventImplTest, EdgeTrigger) { ReadyWatcher write_event; EXPECT_CALL(write_event, ready()).Times(1); - Event::FileEventPtr file_event = dispatcher_.createFileEvent( + Event::FileEventPtr file_event = dispatcher_->createFileEvent( fds_[0], [&](uint32_t events) -> void { if (events & FileReadyType::Read) { @@ -104,7 +104,7 @@ TEST_F(FileEventImplTest, EdgeTrigger) { }, FileTriggerType::Edge, FileReadyType::Read | FileReadyType::Write); - dispatcher_.run(Event::Dispatcher::RunType::NonBlock); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); } TEST_F(FileEventImplTest, LevelTrigger) { @@ -114,11 +114,11 @@ TEST_F(FileEventImplTest, LevelTrigger) { EXPECT_CALL(write_event, ready()).Times(2); int count = 2; - Event::FileEventPtr file_event = dispatcher_.createFileEvent( + Event::FileEventPtr file_event = dispatcher_->createFileEvent( fds_[0], [&](uint32_t events) -> void { if (count-- == 0) { - dispatcher_.exit(); + dispatcher_->exit(); return; } if (events & FileReadyType::Read) { @@ -131,7 +131,7 @@ TEST_F(FileEventImplTest, LevelTrigger) { }, FileTriggerType::Level, FileReadyType::Read | FileReadyType::Write); - dispatcher_.run(Event::Dispatcher::RunType::Block); + dispatcher_->run(Event::Dispatcher::RunType::Block); } TEST_F(FileEventImplTest, SetEnabled) { @@ -140,7 +140,7 @@ TEST_F(FileEventImplTest, SetEnabled) { ReadyWatcher write_event; EXPECT_CALL(write_event, ready()).Times(2); - Event::FileEventPtr file_event = dispatcher_.createFileEvent( + Event::FileEventPtr file_event = dispatcher_->createFileEvent( fds_[0], [&](uint32_t events) -> void { if (events & FileReadyType::Read) { @@ -154,17 +154,18 @@ TEST_F(FileEventImplTest, SetEnabled) { FileTriggerType::Edge, FileReadyType::Read | FileReadyType::Write); file_event->setEnabled(FileReadyType::Read); - dispatcher_.run(Event::Dispatcher::RunType::NonBlock); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); file_event->setEnabled(FileReadyType::Write); - dispatcher_.run(Event::Dispatcher::RunType::NonBlock); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); file_event->setEnabled(0); - dispatcher_.run(Event::Dispatcher::RunType::NonBlock); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); file_event->setEnabled(FileReadyType::Read | FileReadyType::Write); - dispatcher_.run(Event::Dispatcher::RunType::NonBlock); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); } +} // namespace } // namespace Event } // namespace Envoy diff --git a/test/common/filesystem/BUILD b/test/common/filesystem/BUILD index c34f8969df993..6f385036615a6 100644 --- a/test/common/filesystem/BUILD +++ b/test/common/filesystem/BUILD @@ -12,20 +12,8 @@ envoy_cc_test( name = "filesystem_impl_test", srcs = ["filesystem_impl_test.cc"], deps = [ - "//source/common/api:api_lib", - "//source/common/api:os_sys_calls_lib", - "//source/common/common:thread_lib", - "//source/common/event:dispatcher_includes", - "//source/common/event:dispatcher_lib", "//source/common/filesystem:filesystem_lib", - "//source/common/stats:isolated_store_lib", - "//source/common/stats:stats_lib", - "//test/mocks/api:api_mocks", - "//test/mocks/event:event_mocks", - "//test/mocks/filesystem:filesystem_mocks", "//test/test_common:environment_lib", - "//test/test_common:threadsafe_singleton_injector_lib", - "//test/test_common:utility_lib", ], ) @@ -46,7 +34,6 @@ envoy_cc_test( "//source/common/event:dispatcher_includes", "//source/common/event:dispatcher_lib", "//source/common/filesystem:watcher_lib", - "//source/common/stats:isolated_store_lib", "//test/test_common:environment_lib", ], ) diff --git a/test/common/filesystem/directory_test.cc b/test/common/filesystem/directory_test.cc index 9eb3f0d3ddd73..5536f77d72863 100644 --- a/test/common/filesystem/directory_test.cc +++ b/test/common/filesystem/directory_test.cc @@ -6,10 +6,10 @@ #include "common/filesystem/directory.h" #include "test/test_common/environment.h" -#include "test/test_common/test_base.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" +#include "gtest/gtest.h" namespace Envoy { namespace Filesystem { @@ -18,7 +18,7 @@ namespace Filesystem { // as it looks like some versions of libstdc++ have a bug in // std::experimental::filesystem::remove_all where it fails with nested directories: // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=71313 -class DirectoryTest : public TestBase { +class DirectoryTest : public testing::Test { public: DirectoryTest() : dir_path_(TestEnvironment::temporaryPath("envoy_test")) { files_to_remove_.push(dir_path_); diff --git a/test/common/filesystem/filesystem_impl_test.cc b/test/common/filesystem/filesystem_impl_test.cc index 3f675d9696165..47a657d220a80 100644 --- a/test/common/filesystem/filesystem_impl_test.cc +++ b/test/common/filesystem/filesystem_impl_test.cc @@ -1,66 +1,68 @@ #include #include -#include "common/api/api_impl.h" -#include "common/api/os_sys_calls_impl.h" -#include "common/common/lock_guard.h" -#include "common/common/thread.h" -#include "common/event/dispatcher_impl.h" +#include "common/common/assert.h" #include "common/filesystem/filesystem_impl.h" -#include "common/stats/isolated_store_impl.h" -#include "test/mocks/api/mocks.h" -#include "test/mocks/event/mocks.h" -#include "test/mocks/filesystem/mocks.h" #include "test/test_common/environment.h" -#include "test/test_common/test_base.h" -#include "test/test_common/threadsafe_singleton_injector.h" -#include "test/test_common/utility.h" #include "gmock/gmock.h" - -using testing::_; -using testing::InSequence; -using testing::Invoke; -using testing::NiceMock; -using testing::Return; -using testing::SaveArg; -using testing::Sequence; -using testing::Throw; +#include "gtest/gtest.h" namespace Envoy { +namespace Filesystem { -class FileSystemImplTest : public TestBase { +class FileSystemImplTest : public testing::Test { protected: - FileSystemImplTest() - : file_system_(std::chrono::milliseconds(10000), Thread::threadFactoryForTest(), - stats_store_) {} - - const std::chrono::milliseconds timeout_40ms_{40}; - Stats::IsolatedStoreImpl stats_store_; - Filesystem::InstanceImpl file_system_; + int getFd(File* file) { +#ifdef WIN32 + auto file_impl = dynamic_cast(file); +#else + auto file_impl = dynamic_cast(file); +#endif + RELEASE_ASSERT(file_impl != nullptr, "failed to cast File* to FileImpl*"); + return file_impl->fd_; + } +#ifdef WIN32 + InstanceImplWin32 file_system_; +#else + Api::SysCallStringResult canonicalPath(const std::string& path) { + return file_system_.canonicalPath(path); + } + InstanceImplPosix file_system_; +#endif }; -TEST_F(FileSystemImplTest, BadFile) { - Event::MockDispatcher dispatcher; - Thread::MutexBasicLockable lock; - EXPECT_CALL(dispatcher, createTimer_(_)); - EXPECT_THROW(file_system_.createFile("", dispatcher, lock), EnvoyException); -} - TEST_F(FileSystemImplTest, fileExists) { - EXPECT_TRUE(file_system_.fileExists("/dev/null")); EXPECT_FALSE(file_system_.fileExists("/dev/blahblahblah")); +#ifdef WIN32 + const std::string file_path = TestEnvironment::writeStringToFileForTest("test_envoy", "x"); + EXPECT_TRUE(file_system_.fileExists(file_path)); + EXPECT_TRUE(file_system_.fileExists("c:/windows")); +#else + EXPECT_TRUE(file_system_.fileExists("/dev/null")); + EXPECT_TRUE(file_system_.fileExists("/dev")); +#endif } TEST_F(FileSystemImplTest, directoryExists) { - EXPECT_TRUE(file_system_.directoryExists("/dev")); - EXPECT_FALSE(file_system_.directoryExists("/dev/null")); EXPECT_FALSE(file_system_.directoryExists("/dev/blahblah")); +#ifdef WIN32 + const std::string file_path = TestEnvironment::writeStringToFileForTest("test_envoy", "x"); + EXPECT_FALSE(file_system_.directoryExists(file_path)); + EXPECT_TRUE(file_system_.directoryExists("c:/windows")); +#else + EXPECT_FALSE(file_system_.directoryExists("/dev/null")); + EXPECT_TRUE(file_system_.directoryExists("/dev")); +#endif } TEST_F(FileSystemImplTest, fileSize) { +#ifdef WIN32 + EXPECT_EQ(0, file_system_.fileSize("NUL")); +#else EXPECT_EQ(0, file_system_.fileSize("/dev/null")); +#endif EXPECT_EQ(-1, file_system_.fileSize("/dev/blahblahblah")); const std::string data = "test string\ntest"; const std::string file_path = TestEnvironment::writeStringToFileForTest("test_envoy", data); @@ -103,18 +105,30 @@ TEST_F(FileSystemImplTest, fileReadToEndBlacklisted) { EXPECT_THROW(file_system_.fileReadToEnd("/sys/block/sda/dev"), EnvoyException); } -TEST_F(FileSystemImplTest, CanonicalPathSuccess) { - EXPECT_EQ("/", file_system_.canonicalPath("//").rc_); -} +#ifndef WIN32 +TEST_F(FileSystemImplTest, CanonicalPathSuccess) { EXPECT_EQ("/", canonicalPath("//").rc_); } +#endif +#ifndef WIN32 TEST_F(FileSystemImplTest, CanonicalPathFail) { - const Api::SysCallStringResult result = file_system_.canonicalPath("/_some_non_existent_file"); + const Api::SysCallStringResult result = canonicalPath("/_some_non_existent_file"); EXPECT_TRUE(result.rc_.empty()); EXPECT_STREQ("No such file or directory", ::strerror(result.errno_)); } +#endif TEST_F(FileSystemImplTest, IllegalPath) { EXPECT_FALSE(file_system_.illegalPath("/")); + EXPECT_FALSE(file_system_.illegalPath("//")); +#ifdef WIN32 + EXPECT_FALSE(file_system_.illegalPath("/dev")); + EXPECT_FALSE(file_system_.illegalPath("/dev/")); + EXPECT_FALSE(file_system_.illegalPath("/proc")); + EXPECT_FALSE(file_system_.illegalPath("/proc/")); + EXPECT_FALSE(file_system_.illegalPath("/sys")); + EXPECT_FALSE(file_system_.illegalPath("/sys/")); + EXPECT_FALSE(file_system_.illegalPath("/_some_non_existent_file")); +#else EXPECT_TRUE(file_system_.illegalPath("/dev")); EXPECT_TRUE(file_system_.illegalPath("/dev/")); EXPECT_TRUE(file_system_.illegalPath("/proc")); @@ -122,290 +136,114 @@ TEST_F(FileSystemImplTest, IllegalPath) { EXPECT_TRUE(file_system_.illegalPath("/sys")); EXPECT_TRUE(file_system_.illegalPath("/sys/")); EXPECT_TRUE(file_system_.illegalPath("/_some_non_existent_file")); +#endif } -TEST_F(FileSystemImplTest, flushToLogFilePeriodically) { - NiceMock dispatcher; - NiceMock* timer = new NiceMock(&dispatcher); - - Thread::MutexBasicLockable mutex; - NiceMock os_sys_calls; - TestThreadsafeSingletonInjector os_calls(&os_sys_calls); - - EXPECT_CALL(os_sys_calls, open_(_, _, _)).WillOnce(Return(5)); - Filesystem::FileSharedPtr file = file_system_.createFile("", dispatcher, mutex, timeout_40ms_); +TEST_F(FileSystemImplTest, ConstructedFileNotOpen) { + const std::string new_file_path = TestEnvironment::temporaryPath("envoy_this_not_exist"); + ::unlink(new_file_path.c_str()); - EXPECT_CALL(*timer, enableTimer(timeout_40ms_)); - EXPECT_CALL(os_sys_calls, write_(_, _, _)) - .WillOnce(Invoke([](int fd, const void* buffer, size_t num_bytes) -> ssize_t { - std::string written = std::string(reinterpret_cast(buffer), num_bytes); - EXPECT_EQ("test", written); - EXPECT_EQ(5, fd); - - return num_bytes; - })); - - file->write("test"); - - { - Thread::LockGuard lock(os_sys_calls.write_mutex_); - while (os_sys_calls.num_writes_ != 1) { - os_sys_calls.write_event_.wait(os_sys_calls.write_mutex_); - } - } - - EXPECT_CALL(os_sys_calls, write_(_, _, _)) - .WillOnce(Invoke([](int fd, const void* buffer, size_t num_bytes) -> ssize_t { - std::string written = std::string(reinterpret_cast(buffer), num_bytes); - EXPECT_EQ("test2", written); - EXPECT_EQ(5, fd); - - return num_bytes; - })); - - // make sure timer is re-enabled on callback call - file->write("test2"); - EXPECT_CALL(*timer, enableTimer(timeout_40ms_)); - timer->callback_(); - - { - Thread::LockGuard lock(os_sys_calls.write_mutex_); - while (os_sys_calls.num_writes_ != 2) { - os_sys_calls.write_event_.wait(os_sys_calls.write_mutex_); - } - } + FilePtr file = file_system_.createFile(new_file_path); + EXPECT_FALSE(file->isOpen()); } -TEST_F(FileSystemImplTest, flushToLogFileOnDemand) { - NiceMock dispatcher; - NiceMock* timer = new NiceMock(&dispatcher); - - Thread::MutexBasicLockable mutex; - NiceMock os_sys_calls; - TestThreadsafeSingletonInjector os_calls(&os_sys_calls); - - EXPECT_CALL(os_sys_calls, open_(_, _, _)).WillOnce(Return(5)); - Filesystem::FileSharedPtr file = file_system_.createFile("", dispatcher, mutex, timeout_40ms_); +TEST_F(FileSystemImplTest, Open) { + const std::string new_file_path = TestEnvironment::temporaryPath("envoy_this_not_exist"); + ::unlink(new_file_path.c_str()); - EXPECT_CALL(*timer, enableTimer(timeout_40ms_)); - - // The first write to a given file will start the flush thread, which can flush - // immediately (race on whether it will or not). So do a write and flush to - // get that state out of the way, then test that small writes don't trigger a flush. - EXPECT_CALL(os_sys_calls, write_(_, _, _)) - .WillOnce(Invoke([](int, const void*, size_t num_bytes) -> ssize_t { return num_bytes; })); - file->write("prime-it"); - file->flush(); - uint32_t expected_writes = 1; - { - Thread::LockGuard lock(os_sys_calls.write_mutex_); - EXPECT_EQ(expected_writes, os_sys_calls.num_writes_); - } - - EXPECT_CALL(os_sys_calls, write_(_, _, _)) - .WillOnce(Invoke([](int fd, const void* buffer, size_t num_bytes) -> ssize_t { - std::string written = std::string(reinterpret_cast(buffer), num_bytes); - EXPECT_EQ("test", written); - EXPECT_EQ(5, fd); - - return num_bytes; - })); - - file->write("test"); - - { - Thread::LockGuard lock(os_sys_calls.write_mutex_); - EXPECT_EQ(expected_writes, os_sys_calls.num_writes_); - } - - file->flush(); - expected_writes++; - { - Thread::LockGuard lock(os_sys_calls.write_mutex_); - EXPECT_EQ(expected_writes, os_sys_calls.num_writes_); - } - - EXPECT_CALL(os_sys_calls, write_(_, _, _)) - .WillOnce(Invoke([](int fd, const void* buffer, size_t num_bytes) -> ssize_t { - std::string written = std::string(reinterpret_cast(buffer), num_bytes); - EXPECT_EQ("test2", written); - EXPECT_EQ(5, fd); - - return num_bytes; - })); - - // make sure timer is re-enabled on callback call - file->write("test2"); - EXPECT_CALL(*timer, enableTimer(timeout_40ms_)); - timer->callback_(); - expected_writes++; - - { - Thread::LockGuard lock(os_sys_calls.write_mutex_); - while (os_sys_calls.num_writes_ != expected_writes) { - os_sys_calls.write_event_.wait(os_sys_calls.write_mutex_); - } - } + FilePtr file = file_system_.createFile(new_file_path); + const Api::IoCallBoolResult result = file->open(); + EXPECT_TRUE(result.rc_); + EXPECT_TRUE(file->isOpen()); } -TEST_F(FileSystemImplTest, reopenFile) { - NiceMock dispatcher; - NiceMock* timer = new NiceMock(&dispatcher); +TEST_F(FileSystemImplTest, OpenTwice) { + const std::string new_file_path = TestEnvironment::temporaryPath("envoy_this_not_exist"); + ::unlink(new_file_path.c_str()); - Thread::MutexBasicLockable mutex; - NiceMock os_sys_calls; - TestThreadsafeSingletonInjector os_calls(&os_sys_calls); + FilePtr file = file_system_.createFile(new_file_path); + EXPECT_EQ(getFd(file.get()), -1); - Sequence sq; - EXPECT_CALL(os_sys_calls, open_(_, _, _)).InSequence(sq).WillOnce(Return(5)); - Filesystem::FileSharedPtr file = file_system_.createFile("", dispatcher, mutex, timeout_40ms_); + const Api::IoCallBoolResult result1 = file->open(); + const int initial_fd = getFd(file.get()); + EXPECT_TRUE(result1.rc_); + EXPECT_TRUE(file->isOpen()); - EXPECT_CALL(os_sys_calls, write_(_, _, _)) - .InSequence(sq) - .WillOnce(Invoke([](int fd, const void* buffer, size_t num_bytes) -> ssize_t { - std::string written = std::string(reinterpret_cast(buffer), num_bytes); - EXPECT_EQ("before", written); - EXPECT_EQ(5, fd); + // check that we don't leak a file descriptor + const Api::IoCallBoolResult result2 = file->open(); + EXPECT_EQ(initial_fd, getFd(file.get())); + EXPECT_TRUE(result2.rc_); + EXPECT_TRUE(file->isOpen()); +} - return num_bytes; - })); +TEST_F(FileSystemImplTest, OpenBadFilePath) { + FilePtr file = file_system_.createFile(""); + const Api::IoCallBoolResult result = file->open(); + EXPECT_FALSE(result.rc_); +} - file->write("before"); - timer->callback_(); +TEST_F(FileSystemImplTest, ExistingFile) { + const std::string file_path = + TestEnvironment::writeStringToFileForTest("test_envoy", "existing file"); { - Thread::LockGuard lock(os_sys_calls.write_mutex_); - while (os_sys_calls.num_writes_ != 1) { - os_sys_calls.write_event_.wait(os_sys_calls.write_mutex_); - } + FilePtr file = file_system_.createFile(file_path); + const Api::IoCallBoolResult open_result = file->open(); + EXPECT_TRUE(open_result.rc_); + std::string data(" new data"); + const Api::IoCallSizeResult result = file->write(data); + EXPECT_EQ(data.length(), result.rc_); } - EXPECT_CALL(os_sys_calls, close(5)).InSequence(sq); - EXPECT_CALL(os_sys_calls, open_(_, _, _)).InSequence(sq).WillOnce(Return(10)); - - EXPECT_CALL(os_sys_calls, write_(_, _, _)) - .InSequence(sq) - .WillOnce(Invoke([](int fd, const void* buffer, size_t num_bytes) -> ssize_t { - std::string written = std::string(reinterpret_cast(buffer), num_bytes); - EXPECT_EQ("reopened", written); - EXPECT_EQ(10, fd); - - return num_bytes; - })); - - EXPECT_CALL(os_sys_calls, close(10)).InSequence(sq); - - file->reopen(); - file->write("reopened"); - timer->callback_(); - - { - Thread::LockGuard lock(os_sys_calls.write_mutex_); - while (os_sys_calls.num_writes_ != 2) { - os_sys_calls.write_event_.wait(os_sys_calls.write_mutex_); - } - } + auto contents = TestEnvironment::readFileToStringForTest(file_path); + EXPECT_EQ("existing file new data", contents); } -TEST_F(FileSystemImplTest, reopenThrows) { - NiceMock dispatcher; - NiceMock* timer = new NiceMock(&dispatcher); - - Thread::MutexBasicLockable mutex; - Stats::IsolatedStoreImpl stats_store; - NiceMock os_sys_calls; - TestThreadsafeSingletonInjector os_calls(&os_sys_calls); - - EXPECT_CALL(os_sys_calls, write_(_, _, _)) - .WillRepeatedly(Invoke([](int fd, const void* buffer, size_t num_bytes) -> ssize_t { - UNREFERENCED_PARAMETER(fd); - UNREFERENCED_PARAMETER(buffer); - - return num_bytes; - })); - - Sequence sq; - EXPECT_CALL(os_sys_calls, open_(_, _, _)).InSequence(sq).WillOnce(Return(5)); - - Filesystem::FileSharedPtr file = file_system_.createFile("", dispatcher, mutex, timeout_40ms_); - EXPECT_CALL(os_sys_calls, close(5)).InSequence(sq); - EXPECT_CALL(os_sys_calls, open_(_, _, _)).InSequence(sq).WillOnce(Return(-1)); - - file->write("test write"); - timer->callback_(); - { - Thread::LockGuard lock(os_sys_calls.write_mutex_); - while (os_sys_calls.num_writes_ != 1) { - os_sys_calls.write_event_.wait(os_sys_calls.write_mutex_); - } - } - file->reopen(); - - file->write("this is to force reopen"); - timer->callback_(); +TEST_F(FileSystemImplTest, NonExistingFile) { + const std::string new_file_path = TestEnvironment::temporaryPath("envoy_this_not_exist"); + ::unlink(new_file_path.c_str()); { - Thread::LockGuard lock(os_sys_calls.open_mutex_); - while (os_sys_calls.num_open_ != 2) { - os_sys_calls.open_event_.wait(os_sys_calls.open_mutex_); - } + FilePtr file = file_system_.createFile(new_file_path); + const Api::IoCallBoolResult open_result = file->open(); + EXPECT_TRUE(open_result.rc_); + std::string data(" new data"); + const Api::IoCallSizeResult result = file->write(data); + EXPECT_EQ(data.length(), result.rc_); } - // write call should not cause any exceptions - file->write("random data"); - timer->callback_(); + auto contents = TestEnvironment::readFileToStringForTest(new_file_path); + EXPECT_EQ(" new data", contents); } -TEST_F(FileSystemImplTest, bigDataChunkShouldBeFlushedWithoutTimer) { - NiceMock dispatcher; - Thread::MutexBasicLockable mutex; - Stats::IsolatedStoreImpl stats_store; - NiceMock os_sys_calls; - TestThreadsafeSingletonInjector os_calls(&os_sys_calls); - - Filesystem::FileSharedPtr file = file_system_.createFile("", dispatcher, mutex, timeout_40ms_); - - EXPECT_CALL(os_sys_calls, write_(_, _, _)) - .WillOnce(Invoke([](int fd, const void* buffer, size_t num_bytes) -> ssize_t { - UNREFERENCED_PARAMETER(fd); - - std::string written = std::string(reinterpret_cast(buffer), num_bytes); - std::string expected("a"); - EXPECT_EQ(expected, written); - - return num_bytes; - })); +TEST_F(FileSystemImplTest, Close) { + const std::string new_file_path = TestEnvironment::temporaryPath("envoy_this_not_exist"); + ::unlink(new_file_path.c_str()); - file->write("a"); + FilePtr file = file_system_.createFile(new_file_path); + const Api::IoCallBoolResult result1 = file->open(); + EXPECT_TRUE(result1.rc_); + EXPECT_TRUE(file->isOpen()); - { - Thread::LockGuard lock(os_sys_calls.write_mutex_); - while (os_sys_calls.num_writes_ != 1) { - os_sys_calls.write_event_.wait(os_sys_calls.write_mutex_); - } - } - - // First write happens without waiting on thread_flush_. Now make a big string and it should be - // flushed even when timer is not enabled - EXPECT_CALL(os_sys_calls, write_(_, _, _)) - .WillOnce(Invoke([](int fd, const void* buffer, size_t num_bytes) -> ssize_t { - UNREFERENCED_PARAMETER(fd); - - std::string written = std::string(reinterpret_cast(buffer), num_bytes); - std::string expected(1024 * 64 + 1, 'b'); - EXPECT_EQ(expected, written); - - return num_bytes; - })); + const Api::IoCallBoolResult result2 = file->close(); + EXPECT_TRUE(result2.rc_); + EXPECT_FALSE(file->isOpen()); +} - std::string big_string(1024 * 64 + 1, 'b'); - file->write(big_string); +TEST_F(FileSystemImplTest, WriteAfterClose) { + const std::string new_file_path = TestEnvironment::temporaryPath("envoy_this_not_exist"); + ::unlink(new_file_path.c_str()); - { - Thread::LockGuard lock(os_sys_calls.write_mutex_); - while (os_sys_calls.num_writes_ != 2) { - os_sys_calls.write_event_.wait(os_sys_calls.write_mutex_); - } - } + FilePtr file = file_system_.createFile(new_file_path); + const Api::IoCallBoolResult bool_result1 = file->open(); + EXPECT_TRUE(bool_result1.rc_); + const Api::IoCallBoolResult bool_result2 = file->close(); + EXPECT_TRUE(bool_result2.rc_); + const Api::IoCallSizeResult size_result = file->write(" new data"); + EXPECT_EQ(-1, size_result.rc_); + EXPECT_EQ(IoFileError::IoErrorCode::UnknownError, size_result.err_->getErrorCode()); + EXPECT_EQ("Bad file descriptor", size_result.err_->getErrorDetails()); } + +} // namespace Filesystem } // namespace Envoy diff --git a/test/common/filesystem/watcher_impl_test.cc b/test/common/filesystem/watcher_impl_test.cc index da6b5fd80cddf..c911d46efe782 100644 --- a/test/common/filesystem/watcher_impl_test.cc +++ b/test/common/filesystem/watcher_impl_test.cc @@ -4,24 +4,22 @@ #include "common/common/assert.h" #include "common/event/dispatcher_impl.h" #include "common/filesystem/watcher_impl.h" -#include "common/stats/isolated_store_impl.h" #include "test/test_common/environment.h" -#include "test/test_common/test_base.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" +#include "gtest/gtest.h" namespace Envoy { namespace Filesystem { -class WatcherImplTest : public TestBase { +class WatcherImplTest : public testing::Test { protected: - WatcherImplTest() : api_(Api::createApiForTest(stats_store_)), dispatcher_(*api_) {} + WatcherImplTest() : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher()) {} - Stats::IsolatedStoreImpl stats_store_; Api::ApiPtr api_; - Event::DispatcherImpl dispatcher_; + Event::DispatcherPtr dispatcher_; }; class WatchCallback { @@ -30,7 +28,7 @@ class WatchCallback { }; TEST_F(WatcherImplTest, All) { - Filesystem::WatcherPtr watcher = dispatcher_.createFilesystemWatcher(); + Filesystem::WatcherPtr watcher = dispatcher_->createFilesystemWatcher(); unlink(TestEnvironment::temporaryPath("envoy_test/watcher_target").c_str()); unlink(TestEnvironment::temporaryPath("envoy_test/watcher_link").c_str()); @@ -51,21 +49,21 @@ TEST_F(WatcherImplTest, All) { watcher->addWatch(TestEnvironment::temporaryPath("envoy_test/watcher_link"), Watcher::Events::MovedTo, [&](uint32_t events) -> void { callback.called(events); - dispatcher_.exit(); + dispatcher_->exit(); }); TestUtility::renameFile(TestEnvironment::temporaryPath("envoy_test/watcher_new_link"), TestEnvironment::temporaryPath("envoy_test/watcher_link")); - dispatcher_.run(Event::Dispatcher::RunType::Block); + dispatcher_->run(Event::Dispatcher::RunType::Block); TestUtility::createSymlink(TestEnvironment::temporaryPath("envoy_test/watcher_new_target"), TestEnvironment::temporaryPath("envoy_test/watcher_new_link")); TestUtility::renameFile(TestEnvironment::temporaryPath("envoy_test/watcher_new_link"), TestEnvironment::temporaryPath("envoy_test/watcher_link")); - dispatcher_.run(Event::Dispatcher::RunType::Block); + dispatcher_->run(Event::Dispatcher::RunType::Block); } TEST_F(WatcherImplTest, Create) { - Filesystem::WatcherPtr watcher = dispatcher_.createFilesystemWatcher(); + Filesystem::WatcherPtr watcher = dispatcher_->createFilesystemWatcher(); unlink(TestEnvironment::temporaryPath("envoy_test/watcher_target").c_str()); unlink(TestEnvironment::temporaryPath("envoy_test/watcher_link").c_str()); @@ -80,21 +78,40 @@ TEST_F(WatcherImplTest, Create) { watcher->addWatch(TestEnvironment::temporaryPath("envoy_test/watcher_link"), Watcher::Events::MovedTo, [&](uint32_t events) -> void { callback.called(events); - dispatcher_.exit(); + dispatcher_->exit(); }); { std::ofstream file(TestEnvironment::temporaryPath("envoy_test/other_file")); } - dispatcher_.run(Event::Dispatcher::RunType::NonBlock); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); TestUtility::createSymlink(TestEnvironment::temporaryPath("envoy_test/watcher_target"), TestEnvironment::temporaryPath("envoy_test/watcher_new_link")); TestUtility::renameFile(TestEnvironment::temporaryPath("envoy_test/watcher_new_link"), TestEnvironment::temporaryPath("envoy_test/watcher_link")); - dispatcher_.run(Event::Dispatcher::RunType::Block); + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + +TEST_F(WatcherImplTest, Modify) { + Filesystem::WatcherPtr watcher = dispatcher_->createFilesystemWatcher(); + + TestUtility::createDirectory(TestEnvironment::temporaryPath("envoy_test")); + std::ofstream file(TestEnvironment::temporaryPath("envoy_test/watcher_target")); + + WatchCallback callback; + watcher->addWatch(TestEnvironment::temporaryPath("envoy_test/watcher_target"), + Watcher::Events::Modified, [&](uint32_t events) -> void { + callback.called(events); + dispatcher_->exit(); + }); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + + file << "text" << std::flush; + EXPECT_CALL(callback, called(Watcher::Events::Modified)); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); } TEST_F(WatcherImplTest, BadPath) { - Filesystem::WatcherPtr watcher = dispatcher_.createFilesystemWatcher(); + Filesystem::WatcherPtr watcher = dispatcher_->createFilesystemWatcher(); EXPECT_THROW( watcher->addWatch("this_is_not_a_file", Watcher::Events::MovedTo, [&](uint32_t) -> void {}), @@ -106,7 +123,7 @@ TEST_F(WatcherImplTest, BadPath) { } TEST_F(WatcherImplTest, ParentDirectoryRemoved) { - Filesystem::WatcherPtr watcher = dispatcher_.createFilesystemWatcher(); + Filesystem::WatcherPtr watcher = dispatcher_->createFilesystemWatcher(); TestUtility::createDirectory(TestEnvironment::temporaryPath("envoy_test_empty")); @@ -120,11 +137,11 @@ TEST_F(WatcherImplTest, ParentDirectoryRemoved) { int rc = rmdir(TestEnvironment::temporaryPath("envoy_test_empty").c_str()); EXPECT_EQ(0, rc); - dispatcher_.run(Event::Dispatcher::RunType::NonBlock); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); } TEST_F(WatcherImplTest, RootDirectoryPath) { - Filesystem::WatcherPtr watcher = dispatcher_.createFilesystemWatcher(); + Filesystem::WatcherPtr watcher = dispatcher_->createFilesystemWatcher(); EXPECT_NO_THROW(watcher->addWatch("/", Watcher::Events::MovedTo, [&](uint32_t) -> void {})); } diff --git a/test/common/grpc/BUILD b/test/common/grpc/BUILD index 3d262d90ee43b..0d880ea03ff56 100644 --- a/test/common/grpc/BUILD +++ b/test/common/grpc/BUILD @@ -58,6 +58,18 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "google_grpc_utils_test", + srcs = envoy_select_google_grpc(["google_grpc_utils_test.cc"]), + deps = [ + "//source/common/grpc:common_lib", + "//source/common/http:headers_lib", + "//test/mocks/upstream:upstream_mocks", + "//test/proto:helloworld_proto_cc", + "//test/test_common:utility_lib", + ] + envoy_select_google_grpc(["//source/common/grpc:google_grpc_utils_lib"]), +) + envoy_cc_test( name = "google_async_client_impl_test", srcs = envoy_select_google_grpc(["google_async_client_impl_test.cc"]), @@ -78,6 +90,7 @@ envoy_cc_test( envoy_cc_test( name = "google_grpc_creds_test", srcs = envoy_select_google_grpc(["google_grpc_creds_test.cc"]), + data = [":service_key.json"], deps = [ ":utility_lib", "//test/mocks/stats:stats_mocks", @@ -108,6 +121,7 @@ envoy_cc_test_library( "//test/mocks/local_info:local_info_mocks", "//test/mocks/server:server_mocks", "//test/proto:helloworld_proto_cc", + "//test/test_common:global_lib", "//test/test_common:test_time_lib", "//test/test_common:utility_lib", ], diff --git a/test/common/grpc/async_client_impl_test.cc b/test/common/grpc/async_client_impl_test.cc index f12ff49921378..2b7df6d179113 100644 --- a/test/common/grpc/async_client_impl_test.cc +++ b/test/common/grpc/async_client_impl_test.cc @@ -4,12 +4,13 @@ #include "test/mocks/tracing/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/proto/helloworld.pb.h" -#include "test/test_common/test_base.h" #include "test/test_common/test_time.h" #include "gmock/gmock.h" +#include "gtest/gtest.h" using testing::_; +using testing::Eq; using testing::Invoke; using testing::Return; using testing::ReturnRef; @@ -19,7 +20,7 @@ namespace Envoy { namespace Grpc { namespace { -class EnvoyAsyncClientImplTest : public TestBase { +class EnvoyAsyncClientImplTest : public testing::Test { public: EnvoyAsyncClientImplTest() : method_descriptor_(helloworld::Greeter::descriptor()->FindMethodByName("SayHello")) { @@ -58,10 +59,11 @@ TEST_F(EnvoyAsyncClientImplTest, RequestHttpStartFail) { Tracing::MockSpan* child_span{new Tracing::MockSpan()}; EXPECT_CALL(active_span, spawnChild_(_, "async test_cluster egress", _)) .WillOnce(Return(child_span)); - EXPECT_CALL(*child_span, setTag(Tracing::Tags::get().COMPONENT, Tracing::Tags::get().PROXY)); - EXPECT_CALL(*child_span, setTag(Tracing::Tags::get().UPSTREAM_CLUSTER, "test_cluster")); - EXPECT_CALL(*child_span, setTag(Tracing::Tags::get().GRPC_STATUS_CODE, "14")); - EXPECT_CALL(*child_span, setTag(Tracing::Tags::get().ERROR, Tracing::Tags::get().TRUE)); + EXPECT_CALL(*child_span, + setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy))); + EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq("test_cluster"))); + EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().GrpcStatusCode), Eq("14"))); + EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True))); EXPECT_CALL(*child_span, finishSpan()); EXPECT_CALL(*child_span, injectContext(_)).Times(0); @@ -123,11 +125,12 @@ TEST_F(EnvoyAsyncClientImplTest, RequestHttpSendHeadersFail) { Tracing::MockSpan* child_span{new Tracing::MockSpan()}; EXPECT_CALL(active_span, spawnChild_(_, "async test_cluster egress", _)) .WillOnce(Return(child_span)); - EXPECT_CALL(*child_span, setTag(Tracing::Tags::get().COMPONENT, Tracing::Tags::get().PROXY)); - EXPECT_CALL(*child_span, setTag(Tracing::Tags::get().UPSTREAM_CLUSTER, "test_cluster")); + EXPECT_CALL(*child_span, + setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy))); + EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq("test_cluster"))); EXPECT_CALL(*child_span, injectContext(_)); - EXPECT_CALL(*child_span, setTag(Tracing::Tags::get().GRPC_STATUS_CODE, "13")); - EXPECT_CALL(*child_span, setTag(Tracing::Tags::get().ERROR, Tracing::Tags::get().TRUE)); + EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().GrpcStatusCode), Eq("13"))); + EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True))); EXPECT_CALL(*child_span, finishSpan()); auto* grpc_request = grpc_client_->send(*method_descriptor_, request_msg, grpc_callbacks, diff --git a/test/common/grpc/async_client_manager_impl_test.cc b/test/common/grpc/async_client_manager_impl_test.cc index 8d3c15f0a9fdc..7678dfef41fea 100644 --- a/test/common/grpc/async_client_manager_impl_test.cc +++ b/test/common/grpc/async_client_manager_impl_test.cc @@ -4,11 +4,11 @@ #include "test/mocks/stats/mocks.h" #include "test/mocks/thread_local/mocks.h" #include "test/mocks/upstream/mocks.h" -#include "test/test_common/test_base.h" #include "test/test_common/test_time.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" +#include "gtest/gtest.h" using ::testing::Return; @@ -16,13 +16,12 @@ namespace Envoy { namespace Grpc { namespace { -class AsyncClientManagerImplTest : public TestBase { +class AsyncClientManagerImplTest : public testing::Test { public: - AsyncClientManagerImplTest() : api_(Api::createApiForTest(api_stats_store_)) {} + AsyncClientManagerImplTest() : api_(Api::createApiForTest()) {} Upstream::MockClusterManager cm_; NiceMock tls_; - Stats::IsolatedStoreImpl api_stats_store_; Stats::MockStore scope_; DangerousDeprecatedTestTime test_time_; Api::ApiPtr api_; @@ -34,7 +33,7 @@ TEST_F(AsyncClientManagerImplTest, EnvoyGrpcOk) { grpc_service.mutable_envoy_grpc()->set_cluster_name("foo"); Upstream::ClusterManager::ClusterInfoMap cluster_map; - Upstream::MockCluster cluster; + Upstream::MockClusterMockPrioritySet cluster; cluster_map.emplace("foo", cluster); EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); EXPECT_CALL(cluster, info()); @@ -59,7 +58,7 @@ TEST_F(AsyncClientManagerImplTest, EnvoyGrpcDynamicCluster) { grpc_service.mutable_envoy_grpc()->set_cluster_name("foo"); Upstream::ClusterManager::ClusterInfoMap cluster_map; - Upstream::MockCluster cluster; + Upstream::MockClusterMockPrioritySet cluster; cluster_map.emplace("foo", cluster); EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); EXPECT_CALL(cluster, info()); diff --git a/test/common/grpc/codec_test.cc b/test/common/grpc/codec_test.cc index e0e6be57502d5..a8d6658933121 100644 --- a/test/common/grpc/codec_test.cc +++ b/test/common/grpc/codec_test.cc @@ -8,10 +8,12 @@ #include "test/proto/helloworld.pb.h" #include "test/test_common/printers.h" -#include "test/test_common/test_base.h" + +#include "gtest/gtest.h" namespace Envoy { namespace Grpc { +namespace { TEST(GrpcCodecTest, encodeHeader) { Encoder encoder; @@ -167,5 +169,6 @@ TEST(GrpcCodecTest, decodeMultipleFrame) { } } +} // namespace } // namespace Grpc } // namespace Envoy diff --git a/test/common/grpc/common_test.cc b/test/common/grpc/common_test.cc index 722e30519acd4..c7ed8e7e1a30c 100644 --- a/test/common/grpc/common_test.cc +++ b/test/common/grpc/common_test.cc @@ -1,3 +1,5 @@ +#include + #include "common/grpc/common.h" #include "common/http/headers.h" #include "common/http/message_impl.h" @@ -5,9 +7,10 @@ #include "test/mocks/upstream/mocks.h" #include "test/proto/helloworld.pb.h" -#include "test/test_common/test_base.h" #include "test/test_common/utility.h" +#include "gtest/gtest.h" + namespace Envoy { namespace Grpc { @@ -78,25 +81,25 @@ TEST(GrpcCommonTest, ToGrpcTimeout) { Http::HeaderString value; Common::toGrpcTimeout(std::chrono::milliseconds(0UL), value); - EXPECT_STREQ("0m", value.c_str()); + EXPECT_EQ("0m", value.getStringView()); Common::toGrpcTimeout(std::chrono::milliseconds(1UL), value); - EXPECT_STREQ("1m", value.c_str()); + EXPECT_EQ("1m", value.getStringView()); Common::toGrpcTimeout(std::chrono::milliseconds(100000000UL), value); - EXPECT_STREQ("100000S", value.c_str()); + EXPECT_EQ("100000S", value.getStringView()); Common::toGrpcTimeout(std::chrono::milliseconds(100000000000UL), value); - EXPECT_STREQ("1666666M", value.c_str()); + EXPECT_EQ("1666666M", value.getStringView()); Common::toGrpcTimeout(std::chrono::milliseconds(9000000000000UL), value); - EXPECT_STREQ("2500000H", value.c_str()); + EXPECT_EQ("2500000H", value.getStringView()); Common::toGrpcTimeout(std::chrono::milliseconds(360000000000000UL), value); - EXPECT_STREQ("99999999H", value.c_str()); + EXPECT_EQ("99999999H", value.getStringView()); Common::toGrpcTimeout(std::chrono::milliseconds(UINT64_MAX), value); - EXPECT_STREQ("99999999H", value.c_str()); + EXPECT_EQ("99999999H", value.getStringView()); } TEST(GrpcCommonTest, ChargeStats) { @@ -134,71 +137,71 @@ TEST(GrpcCommonTest, PrepareHeaders) { Http::MessagePtr message = Common::prepareHeaders("cluster", "service_name", "method_name", absl::nullopt); - EXPECT_STREQ("POST", message->headers().Method()->value().c_str()); - EXPECT_STREQ("/service_name/method_name", message->headers().Path()->value().c_str()); - EXPECT_STREQ("cluster", message->headers().Host()->value().c_str()); - EXPECT_STREQ("application/grpc", message->headers().ContentType()->value().c_str()); + EXPECT_EQ("POST", message->headers().Method()->value().getStringView()); + EXPECT_EQ("/service_name/method_name", message->headers().Path()->value().getStringView()); + EXPECT_EQ("cluster", message->headers().Host()->value().getStringView()); + EXPECT_EQ("application/grpc", message->headers().ContentType()->value().getStringView()); } { Http::MessagePtr message = Common::prepareHeaders("cluster", "service_name", "method_name", absl::optional(1)); - EXPECT_STREQ("POST", message->headers().Method()->value().c_str()); - EXPECT_STREQ("/service_name/method_name", message->headers().Path()->value().c_str()); - EXPECT_STREQ("cluster", message->headers().Host()->value().c_str()); - EXPECT_STREQ("application/grpc", message->headers().ContentType()->value().c_str()); - EXPECT_STREQ("1m", message->headers().GrpcTimeout()->value().c_str()); + EXPECT_EQ("POST", message->headers().Method()->value().getStringView()); + EXPECT_EQ("/service_name/method_name", message->headers().Path()->value().getStringView()); + EXPECT_EQ("cluster", message->headers().Host()->value().getStringView()); + EXPECT_EQ("application/grpc", message->headers().ContentType()->value().getStringView()); + EXPECT_EQ("1m", message->headers().GrpcTimeout()->value().getStringView()); } { Http::MessagePtr message = Common::prepareHeaders("cluster", "service_name", "method_name", absl::optional(1)); - EXPECT_STREQ("POST", message->headers().Method()->value().c_str()); - EXPECT_STREQ("/service_name/method_name", message->headers().Path()->value().c_str()); - EXPECT_STREQ("cluster", message->headers().Host()->value().c_str()); - EXPECT_STREQ("application/grpc", message->headers().ContentType()->value().c_str()); - EXPECT_STREQ("1000m", message->headers().GrpcTimeout()->value().c_str()); + EXPECT_EQ("POST", message->headers().Method()->value().getStringView()); + EXPECT_EQ("/service_name/method_name", message->headers().Path()->value().getStringView()); + EXPECT_EQ("cluster", message->headers().Host()->value().getStringView()); + EXPECT_EQ("application/grpc", message->headers().ContentType()->value().getStringView()); + EXPECT_EQ("1000m", message->headers().GrpcTimeout()->value().getStringView()); } { Http::MessagePtr message = Common::prepareHeaders("cluster", "service_name", "method_name", absl::optional(1)); - EXPECT_STREQ("POST", message->headers().Method()->value().c_str()); - EXPECT_STREQ("/service_name/method_name", message->headers().Path()->value().c_str()); - EXPECT_STREQ("cluster", message->headers().Host()->value().c_str()); - EXPECT_STREQ("application/grpc", message->headers().ContentType()->value().c_str()); - EXPECT_STREQ("60000m", message->headers().GrpcTimeout()->value().c_str()); + EXPECT_EQ("POST", message->headers().Method()->value().getStringView()); + EXPECT_EQ("/service_name/method_name", message->headers().Path()->value().getStringView()); + EXPECT_EQ("cluster", message->headers().Host()->value().getStringView()); + EXPECT_EQ("application/grpc", message->headers().ContentType()->value().getStringView()); + EXPECT_EQ("60000m", message->headers().GrpcTimeout()->value().getStringView()); } { Http::MessagePtr message = Common::prepareHeaders("cluster", "service_name", "method_name", absl::optional(1)); - EXPECT_STREQ("POST", message->headers().Method()->value().c_str()); - EXPECT_STREQ("/service_name/method_name", message->headers().Path()->value().c_str()); - EXPECT_STREQ("cluster", message->headers().Host()->value().c_str()); - EXPECT_STREQ("application/grpc", message->headers().ContentType()->value().c_str()); - EXPECT_STREQ("3600000m", message->headers().GrpcTimeout()->value().c_str()); + EXPECT_EQ("POST", message->headers().Method()->value().getStringView()); + EXPECT_EQ("/service_name/method_name", message->headers().Path()->value().getStringView()); + EXPECT_EQ("cluster", message->headers().Host()->value().getStringView()); + EXPECT_EQ("application/grpc", message->headers().ContentType()->value().getStringView()); + EXPECT_EQ("3600000m", message->headers().GrpcTimeout()->value().getStringView()); } { Http::MessagePtr message = Common::prepareHeaders( "cluster", "service_name", "method_name", absl::optional(100000000)); - EXPECT_STREQ("POST", message->headers().Method()->value().c_str()); - EXPECT_STREQ("/service_name/method_name", message->headers().Path()->value().c_str()); - EXPECT_STREQ("cluster", message->headers().Host()->value().c_str()); - EXPECT_STREQ("application/grpc", message->headers().ContentType()->value().c_str()); - EXPECT_STREQ("99999999H", message->headers().GrpcTimeout()->value().c_str()); + EXPECT_EQ("POST", message->headers().Method()->value().getStringView()); + EXPECT_EQ("/service_name/method_name", message->headers().Path()->value().getStringView()); + EXPECT_EQ("cluster", message->headers().Host()->value().getStringView()); + EXPECT_EQ("application/grpc", message->headers().ContentType()->value().getStringView()); + EXPECT_EQ("99999999H", message->headers().GrpcTimeout()->value().getStringView()); } { Http::MessagePtr message = Common::prepareHeaders("cluster", "service_name", "method_name", absl::optional(100000000000)); - EXPECT_STREQ("POST", message->headers().Method()->value().c_str()); - EXPECT_STREQ("/service_name/method_name", message->headers().Path()->value().c_str()); - EXPECT_STREQ("cluster", message->headers().Host()->value().c_str()); - EXPECT_STREQ("application/grpc", message->headers().ContentType()->value().c_str()); - EXPECT_STREQ("1666666M", message->headers().GrpcTimeout()->value().c_str()); + EXPECT_EQ("POST", message->headers().Method()->value().getStringView()); + EXPECT_EQ("/service_name/method_name", message->headers().Path()->value().getStringView()); + EXPECT_EQ("cluster", message->headers().Host()->value().getStringView()); + EXPECT_EQ("application/grpc", message->headers().ContentType()->value().getStringView()); + EXPECT_EQ("1666666M", message->headers().GrpcTimeout()->value().getStringView()); } } @@ -358,5 +361,18 @@ TEST(GrpcCommonTest, MakeByteBuffer) { Common::makeByteBuffer(std::move(buffer)); } +// Ensure that the correct gPRC header is constructed for a Buffer::Instance. +TEST(GrpcCommonTest, PrependGrpcFrameHeader) { + auto buffer = std::make_unique(); + buffer->add("test", 4); + std::array expected_header; + expected_header[0] = 0; // flags + const uint32_t nsize = htonl(4); + std::memcpy(&expected_header[1], reinterpret_cast(&nsize), sizeof(uint32_t)); + std::string header_string(&expected_header[0], 5); + Common::prependGrpcFrameHeader(*buffer); + EXPECT_EQ(buffer->toString(), header_string + "test"); +} + } // namespace Grpc } // namespace Envoy diff --git a/test/common/grpc/google_async_client_impl_test.cc b/test/common/grpc/google_async_client_impl_test.cc index 73f69db13b9de..3389e40a0220c 100644 --- a/test/common/grpc/google_async_client_impl_test.cc +++ b/test/common/grpc/google_async_client_impl_test.cc @@ -8,13 +8,14 @@ #include "test/mocks/grpc/mocks.h" #include "test/mocks/tracing/mocks.h" #include "test/proto/helloworld.pb.h" -#include "test/test_common/test_base.h" #include "test/test_common/test_time.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" +#include "gtest/gtest.h" using testing::_; +using testing::Eq; using testing::Return; namespace Envoy { @@ -44,25 +45,25 @@ class MockStubFactory : public GoogleStubFactory { std::shared_ptr shared_stub_{stub_}; }; -class EnvoyGoogleAsyncClientImplTest : public TestBase { +class EnvoyGoogleAsyncClientImplTest : public testing::Test { public: EnvoyGoogleAsyncClientImplTest() : stats_store_(new Stats::IsolatedStoreImpl), api_(Api::createApiForTest(*stats_store_)), - dispatcher_(*api_), scope_(stats_store_), + dispatcher_(api_->allocateDispatcher()), scope_(stats_store_), method_descriptor_(helloworld::Greeter::descriptor()->FindMethodByName("SayHello")) { envoy::api::v2::core::GrpcService config; auto* google_grpc = config.mutable_google_grpc(); google_grpc->set_target_uri("fake_address"); google_grpc->set_stat_prefix("test_cluster"); tls_ = std::make_unique(*api_); - grpc_client_ = std::make_unique(dispatcher_, *tls_, stub_factory_, + grpc_client_ = std::make_unique(*dispatcher_, *tls_, stub_factory_, scope_, config, *api_); } DangerousDeprecatedTestTime test_time_; Stats::IsolatedStoreImpl* stats_store_; // Ownership transferred to scope_. Api::ApiPtr api_; - Event::DispatcherImpl dispatcher_; + Event::DispatcherPtr dispatcher_; Stats::ScopeSharedPtr scope_; std::unique_ptr tls_; MockStubFactory stub_factory_; @@ -95,10 +96,11 @@ TEST_F(EnvoyGoogleAsyncClientImplTest, RequestHttpStartFail) { Tracing::MockSpan* child_span{new Tracing::MockSpan()}; EXPECT_CALL(active_span, spawnChild_(_, "async test_cluster egress", _)) .WillOnce(Return(child_span)); - EXPECT_CALL(*child_span, setTag(Tracing::Tags::get().COMPONENT, Tracing::Tags::get().PROXY)); - EXPECT_CALL(*child_span, setTag(Tracing::Tags::get().UPSTREAM_CLUSTER, "test_cluster")); - EXPECT_CALL(*child_span, setTag(Tracing::Tags::get().GRPC_STATUS_CODE, "14")); - EXPECT_CALL(*child_span, setTag(Tracing::Tags::get().ERROR, Tracing::Tags::get().TRUE)); + EXPECT_CALL(*child_span, + setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy))); + EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq("test_cluster"))); + EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().GrpcStatusCode), Eq("14"))); + EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True))); EXPECT_CALL(*child_span, finishSpan()); EXPECT_CALL(*child_span, injectContext(_)); diff --git a/test/common/grpc/google_grpc_creds_test.cc b/test/common/grpc/google_grpc_creds_test.cc index 1bbb3bd353401..819b758e1614c 100644 --- a/test/common/grpc/google_grpc_creds_test.cc +++ b/test/common/grpc/google_grpc_creds_test.cc @@ -1,10 +1,14 @@ +#include + #include "common/grpc/google_grpc_creds_impl.h" #include "test/common/grpc/utility.h" #include "test/mocks/stats/mocks.h" -#include "test/test_common/test_base.h" +#include "test/test_common/environment.h" #include "test/test_common/utility.h" +#include "gtest/gtest.h" + namespace Envoy { namespace Grpc { namespace { @@ -14,11 +18,10 @@ namespace { // of getting at the underlying state, we can at best just make sure we don't // crash, compare with nullptr and/or look at vector lengths. -class CredsUtilityTest : public TestBase { +class CredsUtilityTest : public testing::Test { public: - CredsUtilityTest() : api_(Api::createApiForTest(stats_store_)) {} + CredsUtilityTest() : api_(Api::createApiForTest()) {} - Stats::MockIsolatedStatsStore stats_store_; Api::ApiPtr api_; }; @@ -31,6 +34,14 @@ TEST_F(CredsUtilityTest, GetChannelCredentials) { EXPECT_NE(nullptr, CredsUtility::getChannelCredentials(config, *api_)); creds->mutable_local_credentials(); EXPECT_NE(nullptr, CredsUtility::getChannelCredentials(config, *api_)); + + const char var_name[] = "GOOGLE_APPLICATION_CREDENTIALS"; + EXPECT_EQ(nullptr, ::getenv(var_name)); + const auto creds_path = TestEnvironment::runfilesPath("test/common/grpc/service_key.json"); + ::setenv(var_name, creds_path.c_str(), 0); + creds->mutable_google_default(); + EXPECT_NE(nullptr, CredsUtility::getChannelCredentials(config, *api_)); + ::unsetenv(var_name); } TEST_F(CredsUtilityTest, DefaultSslChannelCredentials) { diff --git a/test/common/grpc/google_grpc_utils_test.cc b/test/common/grpc/google_grpc_utils_test.cc new file mode 100644 index 0000000000000..6f7a96a497c50 --- /dev/null +++ b/test/common/grpc/google_grpc_utils_test.cc @@ -0,0 +1,88 @@ +#include + +#include "common/grpc/google_grpc_utils.h" + +#include "test/mocks/upstream/mocks.h" +#include "test/proto/helloworld.pb.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Grpc { +namespace { + +TEST(GoogleGrpcUtilsTest, MakeBufferInstanceEmpty) { + grpc::ByteBuffer byte_buffer; + GoogleGrpcUtils::makeBufferInstance(byte_buffer); +} + +TEST(GoogleGrpcUtilsTest, MakeByteBufferEmpty) { + auto buffer = std::make_unique(); + GoogleGrpcUtils::makeByteBuffer(std::move(buffer)); + buffer = nullptr; + GoogleGrpcUtils::makeByteBuffer(std::move(buffer)); +} + +TEST(GoogleGrpcUtilsTest, MakeBufferInstance1) { + grpc::Slice slice("test"); + grpc::ByteBuffer byte_buffer(&slice, 1); + auto buffer_instance = GoogleGrpcUtils::makeBufferInstance(byte_buffer); + EXPECT_EQ(buffer_instance->toString(), "test"); +} + +// Test building a Buffer::Instance from 3 grpc::Slice(s). +TEST(GoogleGrpcUtilsTest, MakeBufferInstance3) { + std::array slices = {grpc::string("test"), grpc::string(" "), + grpc::string("this")}; + grpc::ByteBuffer byte_buffer(&slices[0], 3); + auto buffer_instance = GoogleGrpcUtils::makeBufferInstance(byte_buffer); + EXPECT_EQ(buffer_instance->toString(), "test this"); +} + +TEST(GoogleGrpcUtilsTest, MakeByteBuffer1) { + auto buffer = std::make_unique(); + buffer->add("test", 4); + auto byte_buffer = GoogleGrpcUtils::makeByteBuffer(std::move(buffer)); + std::vector slices; + byte_buffer.Dump(&slices); + std::string str; + for (auto& s : slices) { + str.append(std::string(reinterpret_cast(s.begin()), s.size())); + } + EXPECT_EQ(str, "test"); +} + +// Test building a grpc::ByteBuffer from a Bufffer::Instance with 3 slices. +TEST(GoogleGrpcUtilsTest, MakeByteBuffer3) { + auto buffer = std::make_unique(); + Buffer::BufferFragmentImpl f1("test", 4, nullptr); + buffer->addBufferFragment(f1); + Buffer::BufferFragmentImpl f2(" ", 1, nullptr); + buffer->addBufferFragment(f2); + Buffer::BufferFragmentImpl f3("this", 4, nullptr); + buffer->addBufferFragment(f3); + auto byte_buffer = GoogleGrpcUtils::makeByteBuffer(std::move(buffer)); + std::vector slices; + byte_buffer.Dump(&slices); + std::string str; + for (auto& s : slices) { + str.append(std::string(reinterpret_cast(s.begin()), s.size())); + } + EXPECT_EQ(str, "test this"); +} + +// Test building a Buffer::Instance from a grpc::ByteBuffer from a Bufffer::Instance with 3 slices. +TEST(GoogleGrpcUtilsTest, ByteBufferInstanceRoundTrip) { + std::array slices = {grpc::string("test"), grpc::string(" "), + grpc::string("this")}; + grpc::ByteBuffer byte_buffer(&slices[0], 3); + auto buffer_instance1 = GoogleGrpcUtils::makeBufferInstance(byte_buffer); + auto byte_buffer2 = GoogleGrpcUtils::makeByteBuffer(std::move(buffer_instance1)); + auto buffer_instance2 = GoogleGrpcUtils::makeBufferInstance(byte_buffer2); + EXPECT_EQ(buffer_instance2->toString(), "test this"); +} + +} // namespace +} // namespace Grpc +} // namespace Envoy diff --git a/test/common/grpc/grpc_client_integration.h b/test/common/grpc/grpc_client_integration.h index 782e1579aed7e..a2215a2edae93 100644 --- a/test/common/grpc/grpc_client_integration.h +++ b/test/common/grpc/grpc_client_integration.h @@ -2,9 +2,10 @@ #include "common/common/assert.h" -#include "test/test_common/test_base.h" #include "test/test_common/utility.h" +#include "gtest/gtest.h" + namespace Envoy { namespace Grpc { @@ -38,7 +39,7 @@ class BaseGrpcClientIntegrationParamTest { class GrpcClientIntegrationParamTest : public BaseGrpcClientIntegrationParamTest, - public TestBaseWithParam> { + public testing::TestWithParam> { public: ~GrpcClientIntegrationParamTest() override = default; static std::string protocolTestParamsToString( diff --git a/test/common/grpc/grpc_client_integration_test.cc b/test/common/grpc/grpc_client_integration_test.cc index a3e1cdaefcef6..7e55827dae9a5 100644 --- a/test/common/grpc/grpc_client_integration_test.cc +++ b/test/common/grpc/grpc_client_integration_test.cc @@ -7,6 +7,8 @@ #include "test/common/grpc/grpc_client_integration_test_harness.h" +using testing::Eq; + namespace Envoy { namespace Grpc { namespace { @@ -188,7 +190,7 @@ TEST_P(GrpcClientIntegrationTest, ReplyNoTrailers) { dispatcher_helper_.setStreamEventPending(); stream->expectTrailingMetadata(empty_metadata_); stream->expectGrpcStatus(Status::GrpcStatus::InvalidCode); - auto serialized_response = Grpc::Common::serializeBody(reply); + auto serialized_response = Grpc::Common::serializeToGrpcFrame(reply); stream->fake_stream_->encodeData(*serialized_response, true); stream->fake_stream_->encodeResetStream(); dispatcher_helper_.runDispatcher(); @@ -272,8 +274,9 @@ TEST_P(GrpcClientIntegrationTest, RequestTrailersOnly) { initialize(); auto request = createRequest(empty_metadata_); const Http::TestHeaderMapImpl reply_headers{{":status", "200"}, {"grpc-status", "0"}}; - EXPECT_CALL(*request->child_span_, setTag(Tracing::Tags::get().GRPC_STATUS_CODE, "0")); - EXPECT_CALL(*request->child_span_, setTag(Tracing::Tags::get().ERROR, Tracing::Tags::get().TRUE)); + EXPECT_CALL(*request->child_span_, setTag(Eq(Tracing::Tags::get().GrpcStatusCode), Eq("0"))); + EXPECT_CALL(*request->child_span_, + setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True))); EXPECT_CALL(*request, onFailure(Status::Internal, "", _)).WillExitIfNeeded(); dispatcher_helper_.setStreamEventPending(); EXPECT_CALL(*request->child_span_, finishSpan()); @@ -340,7 +343,7 @@ TEST_P(GrpcClientIntegrationTest, CancelRequest) { initialize(); auto request = createRequest(empty_metadata_); EXPECT_CALL(*request->child_span_, - setTag(Tracing::Tags::get().STATUS, Tracing::Tags::get().CANCELED)); + setTag(Eq(Tracing::Tags::get().Status), Eq(Tracing::Tags::get().Canceled))); EXPECT_CALL(*request->child_span_, finishSpan()); request->grpc_request_->cancel(); dispatcher_helper_.dispatcher_.run(Event::Dispatcher::RunType::NonBlock); diff --git a/test/common/grpc/grpc_client_integration_test_harness.h b/test/common/grpc/grpc_client_integration_test_harness.h index 950c087a163c2..4d4a94e288e79 100644 --- a/test/common/grpc/grpc_client_integration_test_harness.h +++ b/test/common/grpc/grpc_client_integration_test_harness.h @@ -13,6 +13,7 @@ #include "common/http/async_client_impl.h" #include "common/http/codes.h" #include "common/http/http2/conn_pool.h" +#include "common/stats/fake_symbol_table_impl.h" #include "common/network/connection_impl.h" #include "common/network/raw_buffer_socket.h" @@ -29,10 +30,12 @@ #include "test/mocks/upstream/mocks.h" #include "test/proto/helloworld.pb.h" #include "test/test_common/environment.h" +#include "test/test_common/global.h" #include "test/test_common/test_time.h" #include "test/test_common/utility.h" using testing::_; +using testing::Eq; using testing::Invoke; using testing::InvokeWithoutArgs; using testing::NiceMock; @@ -200,7 +203,7 @@ class HelloworldRequest : public MockAsyncRequestCallbacksstartGrpcStream(); helloworld::HelloReply reply; reply.set_message(HELLO_REPLY); - EXPECT_CALL(*child_span_, setTag(Tracing::Tags::get().GRPC_STATUS_CODE, "0")); + EXPECT_CALL(*child_span_, setTag(Eq(Tracing::Tags::get().GrpcStatusCode), Eq("0"))); EXPECT_CALL(*this, onSuccess_(HelloworldReplyEq(HELLO_REPLY), _)).WillExitIfNeeded(); EXPECT_CALL(*child_span_, finishSpan()); dispatcher_helper_.setStreamEventPending(); @@ -218,7 +221,8 @@ class GrpcClientIntegrationTest : public GrpcClientIntegrationParamTest { public: GrpcClientIntegrationTest() : method_descriptor_(helloworld::Greeter::descriptor()->FindMethodByName("SayHello")), - api_(Api::createApiForTest(*stats_store_, test_time_.timeSystem())), dispatcher_(*api_) {} + api_(Api::createApiForTest(*stats_store_, test_time_.timeSystem())), + dispatcher_(api_->allocateDispatcher()) {} virtual void initialize() { if (fake_upstream_ == nullptr) { @@ -236,9 +240,9 @@ class GrpcClientIntegrationTest : public GrpcClientIntegrationParamTest { } // Setup a test timeout (also needed to maintain an active event in the dispatcher so that // .run() will block until timeout rather than exit immediately). - timeout_timer_ = dispatcher_.createTimer([this] { + timeout_timer_ = dispatcher_->createTimer([this] { FAIL() << "Test timeout"; - dispatcher_.exit(); + dispatcher_->exit(); }); timeout_timer_->enableTimer(std::chrono::milliseconds(10000)); } @@ -265,10 +269,10 @@ class GrpcClientIntegrationTest : public GrpcClientIntegrationParamTest { // infrastructure to initiate a loopback TCP connection to fake_upstream_. AsyncClientPtr createAsyncClientImpl() { client_connection_ = std::make_unique( - dispatcher_, fake_upstream_->localAddress(), nullptr, + *dispatcher_, fake_upstream_->localAddress(), nullptr, std::move(async_client_transport_socket_), nullptr); ON_CALL(*mock_cluster_info_, connectTimeout()) - .WillByDefault(Return(std::chrono::milliseconds(1000))); + .WillByDefault(Return(std::chrono::milliseconds(10000))); EXPECT_CALL(*mock_cluster_info_, name()).WillRepeatedly(ReturnRef(fake_cluster_name_)); EXPECT_CALL(cm_, get(_)).WillRepeatedly(Return(&thread_local_cluster_)); EXPECT_CALL(thread_local_cluster_, info()).WillRepeatedly(Return(cluster_info_ptr_)); @@ -278,11 +282,11 @@ class GrpcClientIntegrationTest : public GrpcClientIntegrationParamTest { EXPECT_CALL(*mock_host_, cluster()).WillRepeatedly(ReturnRef(*cluster_info_ptr_)); EXPECT_CALL(*mock_host_description_, locality()).WillRepeatedly(ReturnRef(host_locality_)); http_conn_pool_ = std::make_unique( - dispatcher_, host_ptr_, Upstream::ResourcePriority::Default, nullptr); + *dispatcher_, host_ptr_, Upstream::ResourcePriority::Default, nullptr); EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, _)) .WillRepeatedly(Return(http_conn_pool_.get())); http_async_client_ = std::make_unique( - cluster_info_ptr_, *stats_store_, dispatcher_, local_info_, cm_, runtime_, random_, + cluster_info_ptr_, *stats_store_, *dispatcher_, local_info_, cm_, runtime_, random_, std::move(shadow_writer_ptr_), http_context_); EXPECT_CALL(cm_, httpAsyncClientForCluster(fake_cluster_name_)) .WillRepeatedly(ReturnRef(*http_async_client_)); @@ -290,7 +294,7 @@ class GrpcClientIntegrationTest : public GrpcClientIntegrationParamTest { envoy::api::v2::core::GrpcService config; config.mutable_envoy_grpc()->set_cluster_name(fake_cluster_name_); fillServiceWideInitialMetadata(config); - return std::make_unique(cm_, config, dispatcher_.timeSystem()); + return std::make_unique(cm_, config, dispatcher_->timeSource()); } virtual envoy::api::v2::core::GrpcService createGoogleGrpcConfig() { @@ -306,7 +310,7 @@ class GrpcClientIntegrationTest : public GrpcClientIntegrationParamTest { #ifdef ENVOY_GOOGLE_GRPC google_tls_ = std::make_unique(*api_); GoogleGenericStubFactory stub_factory; - return std::make_unique(dispatcher_, *google_tls_, stub_factory, + return std::make_unique(*dispatcher_, *google_tls_, stub_factory, stats_scope_, createGoogleGrpcConfig(), *api_); #else NOT_REACHED_GCOVR_EXCL_LINE; @@ -346,9 +350,9 @@ class GrpcClientIntegrationTest : public GrpcClientIntegrationParamTest { EXPECT_CALL(active_span, spawnChild_(_, "async fake_cluster egress", _)) .WillOnce(Return(request->child_span_)); EXPECT_CALL(*request->child_span_, - setTag(Tracing::Tags::get().UPSTREAM_CLUSTER, fake_cluster_name_)); + setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(fake_cluster_name_))); EXPECT_CALL(*request->child_span_, - setTag(Tracing::Tags::get().COMPONENT, Tracing::Tags::get().PROXY)); + setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy))); EXPECT_CALL(*request->child_span_, injectContext(_)); request->grpc_request_ = @@ -357,11 +361,12 @@ class GrpcClientIntegrationTest : public GrpcClientIntegrationParamTest { EXPECT_NE(request->grpc_request_, nullptr); if (!fake_connection_) { - AssertionResult result = fake_upstream_->waitForHttpConnection(dispatcher_, fake_connection_); + AssertionResult result = + fake_upstream_->waitForHttpConnection(*dispatcher_, fake_connection_); RELEASE_ASSERT(result, result.message()); } fake_streams_.emplace_back(); - AssertionResult result = fake_connection_->waitForNewStream(dispatcher_, fake_streams_.back()); + AssertionResult result = fake_connection_->waitForNewStream(*dispatcher_, fake_streams_.back()); RELEASE_ASSERT(result, result.message()); auto& fake_stream = *fake_streams_.back(); request->fake_stream_ = &fake_stream; @@ -370,7 +375,7 @@ class GrpcClientIntegrationTest : public GrpcClientIntegrationParamTest { expectExtraHeaders(fake_stream); helloworld::HelloRequest received_msg; - result = fake_stream.waitForGrpcMessage(dispatcher_, received_msg); + result = fake_stream.waitForGrpcMessage(*dispatcher_, received_msg); RELEASE_ASSERT(result, result.message()); EXPECT_THAT(request_msg, ProtoEq(received_msg)); @@ -390,11 +395,12 @@ class GrpcClientIntegrationTest : public GrpcClientIntegrationParamTest { EXPECT_NE(stream->grpc_stream_, nullptr); if (!fake_connection_) { - AssertionResult result = fake_upstream_->waitForHttpConnection(dispatcher_, fake_connection_); + AssertionResult result = + fake_upstream_->waitForHttpConnection(*dispatcher_, fake_connection_); RELEASE_ASSERT(result, result.message()); } fake_streams_.emplace_back(); - AssertionResult result = fake_connection_->waitForNewStream(dispatcher_, fake_streams_.back()); + AssertionResult result = fake_connection_->waitForNewStream(*dispatcher_, fake_streams_.back()); RELEASE_ASSERT(result, result.message()); auto& fake_stream = *fake_streams_.back(); stream->fake_stream_ = &fake_stream; @@ -410,10 +416,11 @@ class GrpcClientIntegrationTest : public GrpcClientIntegrationParamTest { FakeHttpConnectionPtr fake_connection_; std::vector fake_streams_; const Protobuf::MethodDescriptor* method_descriptor_; - Stats::IsolatedStoreImpl* stats_store_ = new Stats::IsolatedStoreImpl(); + Envoy::Test::Global symbol_table_; + Stats::IsolatedStoreImpl* stats_store_ = new Stats::IsolatedStoreImpl(*symbol_table_); Api::ApiPtr api_; - Event::DispatcherImpl dispatcher_; - DispatcherHelper dispatcher_helper_{dispatcher_}; + Event::DispatcherPtr dispatcher_; + DispatcherHelper dispatcher_helper_{*dispatcher_}; Stats::ScopeSharedPtr stats_scope_{stats_store_}; TestMetadata service_wide_initial_metadata_; #ifdef ENVOY_GOOGLE_GRPC diff --git a/test/common/grpc/service_key.json b/test/common/grpc/service_key.json new file mode 100644 index 0000000000000..0e91dfe83bc7d --- /dev/null +++ b/test/common/grpc/service_key.json @@ -0,0 +1,12 @@ +{ + "type": "service_account", + "project_id": "teset-project", + "private_key_id": "xxx", + "private_key": "-----BEGIN PRIVATE KEY-----\nspUMkfFsoTfa\n-----END PRIVATE KEY-----\n", + "client_email": "test@test.iam.gserviceaccount.com", + "client_id": "42", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://oauth2.googleapis.com/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/test%test-dev.iam.gserviceaccount.com" +} diff --git a/test/common/html/utility_test.cc b/test/common/html/utility_test.cc index d4954b9bf9a3f..8eb83bc3cc602 100644 --- a/test/common/html/utility_test.cc +++ b/test/common/html/utility_test.cc @@ -1,9 +1,10 @@ #include "common/html/utility.h" -#include "test/test_common/test_base.h" +#include "gtest/gtest.h" namespace Envoy { namespace Html { +namespace { TEST(HttpUtility, SanitizeHtml) { EXPECT_EQ("simple text, no cares/worries", Utility::sanitize("simple text, no cares/worries")); @@ -11,5 +12,6 @@ TEST(HttpUtility, SanitizeHtml) { EXPECT_EQ("<script>", Utility::sanitize("