diff --git a/.azure-pipelines/bazel.yml b/.azure-pipelines/bazel.yml new file mode 100644 index 0000000000000..fd1b724864c20 --- /dev/null +++ b/.azure-pipelines/bazel.yml @@ -0,0 +1,60 @@ +parameters: + - name: ciTarget + displayName: "CI target" + type: string + default: bazel.release + +steps: + - task: CacheBeta@1 + inputs: + key: '"${{ parameters.ciTarget }}" | ./WORKSPACE | **/*.bzl' + path: $(Build.StagingDirectory)/repository_cache + + - bash: .azure-pipelines/cleanup.sh + displayName: "Removing tools from agent" + + - bash: | + echo "disk space at beginning of build:" + df -h + displayName: "Check disk space at beginning" + + - bash: | + sudo mkdir -p /etc/docker + echo '{ + "ipv6": true, + "fixed-cidr-v6": "2001:db8:1::/64" + }' | sudo tee /etc/docker/daemon.json + sudo service docker restart + displayName: "Enable IPv6" + + - script: ci/run_envoy_docker.sh 'ci/do_ci.sh ${{ parameters.ciTarget }}' + workingDirectory: $(Build.SourcesDirectory) + env: + ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) + ENVOY_RBE: "true" + # Use https://docs.bazel.build/versions/master/command-line-reference.html#flag--experimental_repository_cache_hardlinks + # to save disk space. + BAZEL_BUILD_EXTRA_OPTIONS: "--config=remote-ci --config=remote --jobs=$(RbeJobs) --curses=no --experimental_repository_cache_hardlinks" + BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com + BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance + GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + displayName: "Run CI script" + + - bash: | + echo "disk space at end of build:" + df -h + displayName: "Check disk space at end" + condition: always() + + - task: PublishTestResults@2 + inputs: + testResultsFiles: "**/bazel-out/**/testlogs/**/test.xml" + testRunTitle: "${{ parameters.ciTarget }}" + searchFolder: $(Build.StagingDirectory)/tmp + condition: always() + + - task: PublishBuildArtifacts@1 + inputs: + pathtoPublish: "$(Build.StagingDirectory)/envoy" + artifactName: ${{ parameters.ciTarget }} + condition: always() diff --git a/.azure-pipelines/cleanup.sh b/.azure-pipelines/cleanup.sh new file mode 100755 index 0000000000000..72a9bbf9fa185 --- /dev/null +++ b/.azure-pipelines/cleanup.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +set -e + +# Temporary script to remove tools from Azure pipelines agent to create more disk space room. +sudo apt-get -y update +sudo apt-get purge -y 'ghc-*' 'zulu-*-azure-jdk' 'libllvm*' 'mysql-*' 'dotnet-*' 'cpp-*' + +dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -rn diff --git a/.azure-pipelines/linux.yml b/.azure-pipelines/linux.yml deleted file mode 100644 index 3d02d37ebf7f4..0000000000000 --- a/.azure-pipelines/linux.yml +++ /dev/null @@ -1,175 +0,0 @@ -trigger: - branches: - include: - - 'master' - tags: - include: - - 'v*' - -# PR build config is manually overridden in Azure pipelines UI with different secrets -pr: none - -jobs: -- job: format - dependsOn: [] # this removes the implicit dependency on previous stage and causes this to run in parallel. - pool: - vmImage: 'Ubuntu 16.04' - steps: - - task: CacheBeta@1 - inputs: - key: 'format | ./WORKSPACE | **/*.bzl' - path: $(Build.StagingDirectory)/repository_cache - - - script: ci/run_envoy_docker.sh 'ci/check_and_fix_format.sh' - workingDirectory: $(Build.SourcesDirectory) - env: - ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) - BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com - BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance - GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) - displayName: "Run check format scripts" - - - task: PublishBuildArtifacts@1 - inputs: - pathtoPublish: "$(Build.StagingDirectory)/fix_format.diff" - artifactName: format - condition: failed() - -- job: bazel - dependsOn: ["format"] - # For master builds, continue even if format fails - condition: or(succeeded(), ne(variables['Build.Reason'], 'PullRequest')) - strategy: - maxParallel: 3 - matrix: - release: - CI_TARGET: 'bazel.release' - asan: - CI_TARGET: 'bazel.asan' - tsan: - CI_TARGET: 'bazel.tsan' - clang_tidy: - CI_TARGET: 'bazel.clang_tidy' - gcc: - CI_TARGET: 'bazel.gcc' - compile_time_options: - CI_TARGET: 'bazel.compile_time_options' - # This will run on every commit/PR and will make sure the corpus generated by the fuzzers as well as fixed crashes - # (on Fuzzit) is not crashing envoy. This will help find bugs BEFORE merging and not after. - fuzzit: - CI_TARGET: 'bazel.fuzzit_regression' - timeoutInMinutes: 360 - pool: - vmImage: 'Ubuntu 16.04' - steps: - - task: CacheBeta@1 - inputs: - key: '"$(CI_TARGET)" | ./WORKSPACE | **/*.bzl' - path: $(Build.StagingDirectory)/repository_cache - - - bash: | - echo "disk space at beginning of build:" - df -h - displayName: "Check disk space at beginning" - - - bash: | - sudo mkdir -p /etc/docker - echo '{ - "ipv6": true, - "fixed-cidr-v6": "2001:db8:1::/64" - }' | sudo tee /etc/docker/daemon.json - sudo service docker restart - displayName: "Enable IPv6" - - - script: ci/run_envoy_docker.sh 'ci/do_ci.sh $(CI_TARGET)' - workingDirectory: $(Build.SourcesDirectory) - env: - ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) - ENVOY_RBE: "true" - BAZEL_BUILD_EXTRA_OPTIONS: "--config=remote-ci --config=remote --jobs=$(RbeJobs) --curses=no" - BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com - BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance - GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) - displayName: "Run CI script" - - - bash: | - echo "disk space at end of build:" - df -h - displayName: "Check disk space at end" - condition: always() - - - task: PublishTestResults@2 - inputs: - testResultsFiles: '**/bazel-out/**/testlogs/**/test.xml' - testRunTitle: '$(CI_TARGET)' - searchFolder: $(Build.StagingDirectory)/tmp - condition: always() - - - task: PublishBuildArtifacts@1 - inputs: - pathtoPublish: "$(Build.StagingDirectory)/envoy" - artifactName: $(CI_TARGET) - condition: always() - -- job: docker - dependsOn: ["bazel"] - condition: and(succeeded(), eq(variables['PostSubmit'], 'true'), ne(variables['Build.Reason'], 'PullRequest')) - pool: - vmImage: 'Ubuntu 16.04' - steps: - - task: DownloadBuildArtifacts@0 - inputs: - buildType: current - artifactName: "bazel.release" - itemPattern: "bazel.release/envoy_binary.tar.gz" - downloadType: single - targetPath: $(Build.StagingDirectory) - - - bash: | - set -e - tar zxf $(Build.StagingDirectory)/bazel.release/envoy_binary.tar.gz - ci/docker_build.sh - ci/docker_push.sh - ci/docker_tag.sh - workingDirectory: $(Build.SourcesDirectory) - env: - AZP_BRANCH: $(Build.SourceBranch) - CIRCLE_SHA1: $(Build.SourceVersion) - DOCKERHUB_USERNAME: $(DockerUsername) - DOCKERHUB_PASSWORD: $(DockerPassword) - -- job: fuzzit_fuzzing - dependsOn: [] # this removes the implicit dependency on previous stage and causes this to run in parallel. - timeoutInMinutes: 360 - # this runs on every push to master / merge to master. this will build the fuzzers and will update them on Fuzzit where - # they will run asynchronously. Essentially this will make sure that the latest master version is always being fuzzed - # continuously. - condition: and(succeeded(), ne(variables['FuzzitApiKey'], ''), eq(variables['Build.SourceBranch'], 'refs/heads/master'), ne(variables['Build.Reason'], 'PullRequest')) - pool: - vmImage: 'Ubuntu 16.04' - steps: - - bash: | - echo "disk space at beginning of build:" - df -h - displayName: "Check disk space at beginning" - - - bash: | - sudo mkdir -p /etc/docker - echo '{ - "ipv6": true, - "fixed-cidr-v6": "2001:db8:1::/64" - }' | sudo tee /etc/docker/daemon.json - sudo service docker restart - displayName: "Enable IPv6" - - - script: ci/run_envoy_docker.sh 'ci/do_ci.sh bazel.fuzzit_fuzzing' - workingDirectory: $(Build.SourcesDirectory) - env: - FUZZIT_API_KEY: $(FuzzitApiKey) - ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) - ENVOY_RBE: "true" - BAZEL_BUILD_EXTRA_OPTIONS: "--config=remote-ci --config=remote --jobs=$(RbeJobs) --curses=no" - BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com - BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance - GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) - displayName: "Fuzzit Regression" diff --git a/.azure-pipelines/macos.yml b/.azure-pipelines/macos.yml deleted file mode 100644 index 20655177351d3..0000000000000 --- a/.azure-pipelines/macos.yml +++ /dev/null @@ -1,26 +0,0 @@ -# Azure Pipelines -trigger: -- master - -jobs: -- job: macOS - timeoutInMinutes: 360 - pool: - vmImage: 'macos-latest' - - steps: - - script: ./ci/mac_ci_setup.sh - displayName: 'Install dependencies' - - - script: ./ci/mac_ci_steps.sh - displayName: 'Run Mac CI' - env: - BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com - BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance - GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) - - - task: PublishTestResults@2 - inputs: - testResultsFiles: '**/bazel-testlogs/**/test.xml' - testRunTitle: 'macOS' - condition: always() diff --git a/.azure-pipelines/pipelines.yml b/.azure-pipelines/pipelines.yml new file mode 100644 index 0000000000000..d4622def0762c --- /dev/null +++ b/.azure-pipelines/pipelines.yml @@ -0,0 +1,123 @@ +trigger: + branches: + include: + - "master" + - "release/v*" + tags: + include: + - "v*" + +# PR build config is manually overridden in Azure pipelines UI with different secrets +pr: none + +jobs: + - job: format + dependsOn: [] # this removes the implicit dependency on previous stage and causes this to run in parallel. + pool: + vmImage: "ubuntu-16.04" + steps: + - task: CacheBeta@1 + inputs: + key: "format | ./WORKSPACE | **/*.bzl" + path: $(Build.StagingDirectory)/repository_cache + + - script: ci/run_envoy_docker.sh 'ci/check_and_fix_format.sh' + workingDirectory: $(Build.SourcesDirectory) + env: + ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) + BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com + BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance + GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + displayName: "Run check format scripts" + + - task: PublishBuildArtifacts@1 + inputs: + pathtoPublish: "$(Build.StagingDirectory)/fix_format.diff" + artifactName: format + condition: failed() + + - job: release + displayName: "Linux-x64 release" + dependsOn: ["format"] + # For master builds, continue even if format fails + condition: and(not(canceled()), or(succeeded(), ne(variables['Build.Reason'], 'PullRequest'))) + pool: + vmImage: "ubuntu-16.04" + steps: + - template: bazel.yml + parameters: + ciTarget: bazel.release + + - job: bazel + displayName: "Linux-x64" + dependsOn: ["release"] + # For master builds, continue even if format fails + condition: and(not(canceled()), or(succeeded(), ne(variables['Build.Reason'], 'PullRequest'))) + strategy: + maxParallel: 3 + matrix: + gcc: + CI_TARGET: "bazel.gcc" + clang_tidy: + CI_TARGET: "bazel.clang_tidy" + asan: + CI_TARGET: "bazel.asan" + tsan: + CI_TARGET: "bazel.tsan" + compile_time_options: + CI_TARGET: "bazel.compile_time_options" + timeoutInMinutes: 360 + pool: + vmImage: "Ubuntu 16.04" + steps: + - template: bazel.yml + parameters: + ciTarget: $(CI_TARGET) + + - job: docker + displayName: "Linux-x64 docker" + dependsOn: ["release"] + condition: and(succeeded(), eq(variables['PostSubmit'], 'true'), ne(variables['Build.Reason'], 'PullRequest')) + pool: + vmImage: "ubuntu-16.04" + steps: + - task: DownloadBuildArtifacts@0 + inputs: + buildType: current + artifactName: "bazel.release" + itemPattern: "bazel.release/envoy_binary.tar.gz" + downloadType: single + targetPath: $(Build.StagingDirectory) + + - bash: | + set -e + tar zxf $(Build.StagingDirectory)/bazel.release/envoy_binary.tar.gz + ci/docker_ci.sh + workingDirectory: $(Build.SourcesDirectory) + env: + AZP_BRANCH: $(Build.SourceBranch) + AZP_SHA1: $(Build.SourceVersion) + DOCKERHUB_USERNAME: $(DockerUsername) + DOCKERHUB_PASSWORD: $(DockerPassword) + + - job: macOS + dependsOn: ["format"] + timeoutInMinutes: 360 + pool: + vmImage: "macos-latest" + steps: + - script: ./ci/mac_ci_setup.sh + displayName: "Install dependencies" + + - script: ./ci/mac_ci_steps.sh + displayName: "Run Mac CI" + env: + BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com + BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance + GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + + - task: PublishTestResults@2 + inputs: + testResultsFiles: "**/bazel-testlogs/**/test.xml" + testRunTitle: "macOS" + condition: always() diff --git a/.azure-pipelines/windows.yml b/.azure-pipelines/windows.yml deleted file mode 100644 index 8877ce28d0f85..0000000000000 --- a/.azure-pipelines/windows.yml +++ /dev/null @@ -1,21 +0,0 @@ -# Azure Pipelines -# TODO(lizan): Consider rolling all presubmit jobs into one file. -trigger: -- master - -jobs: -- job: Windows - timeoutInMinutes: 360 - pool: - vmImage: 'windows-latest' - - steps: - - powershell: | - .\ci\windows_ci_setup.ps1 - Write-Host "##vso[task.prependpath]$env:TOOLS_BIN_DIR" - displayName: 'Install dependencies' - env: - TOOLS_BIN_DIR: $(Pipeline.Workspace)\bin - - - powershell: .\ci\windows_ci_steps.ps1 - displayName: 'Run Windows CI' diff --git a/.bazelrc b/.bazelrc index 8cffa9f703a8b..5209caf1716ce 100644 --- a/.bazelrc +++ b/.bazelrc @@ -140,7 +140,8 @@ build:remote-clang --config=remote build:remote-clang --config=rbe-toolchain-clang # Docker sandbox -build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build@sha256:3ca8acc35fdb57ab26e1bb5f9488f37095f45acd77a12602510410dbefa00b58 +# NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L7 +build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu@sha256:3ca8acc35fdb57ab26e1bb5f9488f37095f45acd77a12602510410dbefa00b58 build:docker-sandbox --spawn_strategy=docker build:docker-sandbox --strategy=Javac=docker build:docker-sandbox --strategy=Closure=docker diff --git a/VERSION b/VERSION index 0eed1a29efd64..868b3aa824516 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.12.0 +1.12.5-dev diff --git a/api/envoy/api/v2/core/protocol.proto b/api/envoy/api/v2/core/protocol.proto index af5e20cd73677..c6eb0ba731966 100644 --- a/api/envoy/api/v2/core/protocol.proto +++ b/api/envoy/api/v2/core/protocol.proto @@ -17,7 +17,28 @@ import "validate/validate.proto"; message TcpProtocolOptions { } +// [#next-free-field: 6] message HttpProtocolOptions { + // Action to take when Envoy receives client request with header names containing underscore + // characters. + // Underscore character is allowed in header names by the RFC-7230 and this behavior is + // implemented as a security measure due to systems that treat '_' and '-' as interchangeable. + // Envoy by default allows client request headers with underscore characters. + enum HeadersWithUnderscoresAction { + // Allow headers with underscores. This is the default behavior. + ALLOW = 0; + + // Reject client request. HTTP/1 requests are rejected with the 400 status. HTTP/2 requests + // end with the stream reset. The "httpN.requests_rejected_with_underscores_in_headers" counter + // is incremented for each rejected request. + REJECT_REQUEST = 1; + + // Drop the header with name containing underscores. The header is dropped before the filter + // chain is invoked and as such filters will not see dropped headers. The + // "httpN.dropped_headers_with_underscores" is incremented for each dropped header. + DROP_HEADER = 2; + } + // The idle timeout for connections. The idle timeout is defined as the // period in which there are no active requests. If not set, there is no idle timeout. When the // idle timeout is reached the connection will be closed. If the connection is an HTTP/2 @@ -44,6 +65,11 @@ message HttpProtocolOptions { // maximum number of request headers allowed is 100. Requests that exceed this limit will receive // a 431 response for HTTP/1.x and cause a stream reset for HTTP/2. google.protobuf.UInt32Value max_headers_count = 2 [(validate.rules).uint32 = {gte: 1}]; + + // Action to take when a client request with a header name containing underscore characters is + // received. If this setting is not specified, the value defaults to ALLOW. Note: upstream + // responses are not affected by this setting. + HeadersWithUnderscoresAction headers_with_underscores_action = 5; } message Http1ProtocolOptions { diff --git a/api/envoy/api/v3alpha/core/protocol.proto b/api/envoy/api/v3alpha/core/protocol.proto index 41246f17675af..1f91eae1603dc 100644 --- a/api/envoy/api/v3alpha/core/protocol.proto +++ b/api/envoy/api/v3alpha/core/protocol.proto @@ -17,7 +17,28 @@ import "validate/validate.proto"; message TcpProtocolOptions { } +// [#next-free-field: 6] message HttpProtocolOptions { + // Action to take when Envoy receives client request with header names containing underscore + // characters. + // Underscore character is allowed in header names by the RFC-7230 and this behavior is + // implemented as a security measure due to systems that treat '_' and '-' as interchangeable. + // Envoy by default allows client request headers with underscore characters. + enum HeadersWithUnderscoresAction { + // Allow headers with underscores. This is the default behavior. + ALLOW = 0; + + // Reject client request. HTTP/1 requests are rejected with the 400 status. HTTP/2 requests + // end with the stream reset. The "httpN.requests_rejected_with_underscores_in_headers" counter + // is incremented for each rejected request. + REJECT_REQUEST = 1; + + // Drop the header with name containing underscores. The header is dropped before the filter + // chain is invoked and as such filters will not see dropped headers. The + // "httpN.dropped_headers_with_underscores" is incremented for each dropped header. + DROP_HEADER = 2; + } + // The idle timeout for connections. The idle timeout is defined as the // period in which there are no active requests. If not set, there is no idle timeout. When the // idle timeout is reached the connection will be closed. If the connection is an HTTP/2 @@ -44,6 +65,11 @@ message HttpProtocolOptions { // maximum number of request headers allowed is 100. Requests that exceed this limit will receive // a 431 response for HTTP/1.x and cause a stream reset for HTTP/2. google.protobuf.UInt32Value max_headers_count = 2 [(validate.rules).uint32 = {gte: 1}]; + + // Action to take when a client request with a header name containing underscore characters is + // received. If this setting is not specified, the value defaults to ALLOW. Note: upstream + // responses are not affected by this setting. + HeadersWithUnderscoresAction headers_with_underscores_action = 5; } message Http1ProtocolOptions { diff --git a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto index 19f3fe3dd37d7..7f4ea0c73f7c1 100644 --- a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +++ b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto @@ -300,6 +300,14 @@ message HttpConnectionManager { // is terminated with a 408 Request Timeout error code if no upstream response // header has been received, otherwise a stream reset occurs. // + // This timeout also specifies the amount of time that Envoy will wait for the peer to open enough + // window to write any remaining stream data once the entirety of stream data (local end stream is + // true) has been buffered pending available window. In other words, this timeout defends against + // a peer that does not release enough window to completely write the stream, even though all + // data has been proxied within available flow control windows. If the timeout is hit in this + // case, the :ref:`tx_flush_timeout ` counter will be + // incremented. + // // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due // to the granularity of events presented to the connection manager. For example, while receiving // very large request headers, it may be the case that there is traffic regularly arriving on the diff --git a/api/envoy/config/filter/network/http_connection_manager/v3alpha/http_connection_manager.proto b/api/envoy/config/filter/network/http_connection_manager/v3alpha/http_connection_manager.proto index f96b590d7130a..040895af45948 100644 --- a/api/envoy/config/filter/network/http_connection_manager/v3alpha/http_connection_manager.proto +++ b/api/envoy/config/filter/network/http_connection_manager/v3alpha/http_connection_manager.proto @@ -287,6 +287,14 @@ message HttpConnectionManager { // is terminated with a 408 Request Timeout error code if no upstream response // header has been received, otherwise a stream reset occurs. // + // This timeout also specifies the amount of time that Envoy will wait for the peer to open enough + // window to write any remaining stream data once the entirety of stream data (local end stream is + // true) has been buffered pending available window. In other words, this timeout defends against + // a peer that does not release enough window to completely write the stream, even though all + // data has been proxied within available flow control windows. If the timeout is hit in this + // case, the :ref:`tx_flush_timeout ` counter will be + // incremented. + // // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due // to the granularity of events presented to the connection manager. For example, while receiving // very large request headers, it may be the case that there is traffic regularly arriving on the diff --git a/api/envoy/config/rbac/v2/rbac.proto b/api/envoy/config/rbac/v2/rbac.proto index 90dfb07bb1dad..dbf7d669a0fe1 100644 --- a/api/envoy/config/rbac/v2/rbac.proto +++ b/api/envoy/config/rbac/v2/rbac.proto @@ -9,6 +9,7 @@ option java_package = "io.envoyproxy.envoy.config.rbac.v2"; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/route/route.proto"; import "envoy/type/matcher/metadata.proto"; +import "envoy/type/matcher/path.proto"; import "envoy/type/matcher/string.proto"; import "google/api/expr/v1alpha1/syntax.proto"; @@ -48,7 +49,8 @@ import "validate/validate.proto"; // - and_rules: // rules: // - header: { name: ":method", exact_match: "GET" } -// - header: { name: ":path", regex_match: "/products(/.*)?" } +// - url_path: +// path: { prefix: "/products" } // - or_rules: // rules: // - destination_port: 80 @@ -99,7 +101,7 @@ message Policy { } // Permission defines an action (or actions) that a principal can take. -// [#next-free-field: 10] +// [#next-free-field: 11] message Permission { // Used in the `and_rules` and `or_rules` fields in the `rule` oneof. Depending on the context, // each are applied with the associated behavior. @@ -121,8 +123,13 @@ message Permission { // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only // available for HTTP request. + // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path` + // field if you want to match the URL path without the query and fragment string. api.v2.route.HeaderMatcher header = 4; + // A URL path on the incoming HTTP request. Only available for HTTP. + type.matcher.PathMatcher url_path = 10; + // A CIDR block that describes the destination IP. api.v2.core.CidrRange destination_ip = 5; @@ -161,7 +168,7 @@ message Permission { } // Principal defines an identity or a group of identities for a downstream subject. -// [#next-free-field: 9] +// [#next-free-field: 10] message Principal { // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. Depending on the context, // each are applied with the associated behavior. @@ -199,8 +206,13 @@ message Principal { // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only // available for HTTP request. + // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path` + // field if you want to match the URL path without the query and fragment string. api.v2.route.HeaderMatcher header = 6; + // A URL path on the incoming HTTP request. Only available for HTTP. + type.matcher.PathMatcher url_path = 9; + // Metadata that describes additional information about the principal. type.matcher.MetadataMatcher metadata = 7; diff --git a/api/envoy/config/rbac/v3alpha/rbac.proto b/api/envoy/config/rbac/v3alpha/rbac.proto index 398b5f099f72f..c84c1faab323d 100644 --- a/api/envoy/config/rbac/v3alpha/rbac.proto +++ b/api/envoy/config/rbac/v3alpha/rbac.proto @@ -9,6 +9,7 @@ option java_package = "io.envoyproxy.envoy.config.rbac.v3alpha"; import "envoy/api/v3alpha/core/address.proto"; import "envoy/api/v3alpha/route/route.proto"; import "envoy/type/matcher/v3alpha/metadata.proto"; +import "envoy/type/matcher/v3alpha/path.proto"; import "envoy/type/matcher/v3alpha/string.proto"; import "google/api/expr/v1alpha1/syntax.proto"; @@ -48,7 +49,8 @@ import "validate/validate.proto"; // - and_rules: // rules: // - header: { name: ":method", exact_match: "GET" } -// - header: { name: ":path", regex_match: "/products(/.*)?" } +// - url_path: +// path: { prefix: "/products" } // - or_rules: // rules: // - destination_port: 80 @@ -99,7 +101,7 @@ message Policy { } // Permission defines an action (or actions) that a principal can take. -// [#next-free-field: 10] +// [#next-free-field: 11] message Permission { // Used in the `and_rules` and `or_rules` fields in the `rule` oneof. Depending on the context, // each are applied with the associated behavior. @@ -121,8 +123,13 @@ message Permission { // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only // available for HTTP request. + // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path` + // field if you want to match the URL path without the query and fragment string. api.v3alpha.route.HeaderMatcher header = 4; + // A URL path on the incoming HTTP request. Only available for HTTP. + type.matcher.v3alpha.PathMatcher url_path = 10; + // A CIDR block that describes the destination IP. api.v3alpha.core.CidrRange destination_ip = 5; @@ -161,7 +168,7 @@ message Permission { } // Principal defines an identity or a group of identities for a downstream subject. -// [#next-free-field: 9] +// [#next-free-field: 10] message Principal { // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. Depending on the context, // each are applied with the associated behavior. @@ -199,8 +206,13 @@ message Principal { // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only // available for HTTP request. + // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path` + // field if you want to match the URL path without the query and fragment string. api.v3alpha.route.HeaderMatcher header = 6; + // A URL path on the incoming HTTP request. Only available for HTTP. + type.matcher.v3alpha.PathMatcher url_path = 9; + // Metadata that describes additional information about the principal. type.matcher.v3alpha.MetadataMatcher metadata = 7; diff --git a/api/envoy/type/matcher/path.proto b/api/envoy/type/matcher/path.proto new file mode 100644 index 0000000000000..d88d42bc902b2 --- /dev/null +++ b/api/envoy/type/matcher/path.proto @@ -0,0 +1,25 @@ +syntax = "proto3"; + +package envoy.type.matcher; + +option java_outer_classname = "PathProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.type.matcher"; + +import "envoy/type/matcher/string.proto"; + +import "validate/validate.proto"; + +// [#protodoc-title: Path matcher] + +// Specifies the way to match a path on HTTP request. +message PathMatcher { + oneof rule { + option (validate.required) = true; + + // The `path` must match the URL path portion of the :path header. The query and fragment + // string (if present) are removed in the URL path portion. + // For example, the path */data* will match the *:path* header */data#fragment?param=value*. + StringMatcher path = 1 [(validate.rules).message = {required: true}]; + } +} diff --git a/api/envoy/type/matcher/string.proto b/api/envoy/type/matcher/string.proto index 8837fab8ee710..fd4e596d15c5d 100644 --- a/api/envoy/type/matcher/string.proto +++ b/api/envoy/type/matcher/string.proto @@ -13,7 +13,7 @@ import "validate/validate.proto"; // [#protodoc-title: StringMatcher] // Specifies the way to match a string. -// [#next-free-field: 6] +// [#next-free-field: 7] message StringMatcher { oneof match_pattern { option (validate.required) = true; @@ -59,6 +59,11 @@ message StringMatcher { // The input string must match the regular expression specified here. RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}]; } + + // If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no + // effect for the safe_regex match. + // For example, the matcher *data* will match both input string *Data* and *data* if set to true. + bool ignore_case = 6; } // Specifies a list of ways to match a string. diff --git a/api/envoy/type/matcher/v3alpha/path.proto b/api/envoy/type/matcher/v3alpha/path.proto new file mode 100644 index 0000000000000..bd076853ee3db --- /dev/null +++ b/api/envoy/type/matcher/v3alpha/path.proto @@ -0,0 +1,25 @@ +syntax = "proto3"; + +package envoy.type.matcher.v3alpha; + +option java_outer_classname = "PathProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.type.matcher.v3alpha"; + +import "envoy/type/matcher/v3alpha/string.proto"; + +import "validate/validate.proto"; + +// [#protodoc-title: Path matcher] + +// Specifies the way to match a path on HTTP request. +message PathMatcher { + oneof rule { + option (validate.required) = true; + + // The `path` must match the URL path portion of the :path header. The query and fragment + // string (if present) are removed in the URL path portion. + // For example, the path */data* will match the *:path* header */data#fragment?param=value*. + StringMatcher path = 1 [(validate.rules).message = {required: true}]; + } +} diff --git a/api/envoy/type/matcher/v3alpha/string.proto b/api/envoy/type/matcher/v3alpha/string.proto index 99d83433e7fa7..b732e509e213f 100644 --- a/api/envoy/type/matcher/v3alpha/string.proto +++ b/api/envoy/type/matcher/v3alpha/string.proto @@ -13,7 +13,7 @@ import "validate/validate.proto"; // [#protodoc-title: StringMatcher] // Specifies the way to match a string. -// [#next-free-field: 6] +// [#next-free-field: 7] message StringMatcher { reserved 4; @@ -48,6 +48,11 @@ message StringMatcher { // The input string must match the regular expression specified here. RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}]; } + + // If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no + // effect for the safe_regex match. + // For example, the matcher *data* will match both input string *Data* and *data* if set to true. + bool ignore_case = 6; } // Specifies a list of ways to match a string. diff --git a/ci/Dockerfile-envoy-image b/ci/Dockerfile-envoy similarity index 100% rename from ci/Dockerfile-envoy-image rename to ci/Dockerfile-envoy diff --git a/ci/do_ci.sh b/ci/do_ci.sh index dc12befccec8a..63761567f45ad 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -56,6 +56,10 @@ function cp_binary_for_image_build() { # Copy for azp which doesn't preserve permissions, creating a tar archive tar czf "${ENVOY_BUILD_DIR}"/envoy_binary.tar.gz -C "${ENVOY_SRCDIR}" build_"$1" build_"$1"_stripped + + # Remove binaries to save space, only if BUILD_REASON exists (running in AZP) + [[ -z "${BUILD_REASON}" ]] || \ + rm -rf "${ENVOY_SRCDIR}"/build_"$1" "${ENVOY_SRCDIR}"/build_"$1"_stripped "${ENVOY_DELIVERY_DIR}"/envoy } function bazel_binary_build() { @@ -145,7 +149,7 @@ elif [[ "$CI_TARGET" == "bazel.debug.server_only" ]]; then exit 0 elif [[ "$CI_TARGET" == "bazel.asan" ]]; then setup_clang_toolchain - BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} -c dbg --config=clang-asan" + BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} -c dbg --config=clang-asan --build_tests_only" echo "bazel ASAN/UBSAN debug build with tests" echo "Building and testing envoy tests ${TEST_TARGETS}" bazel_with_collection test ${BAZEL_BUILD_OPTIONS} ${TEST_TARGETS} @@ -174,7 +178,7 @@ elif [[ "$CI_TARGET" == "bazel.tsan" ]]; then setup_clang_toolchain echo "bazel TSAN debug build with tests" echo "Building and testing envoy tests ${TEST_TARGETS}" - bazel_with_collection test ${BAZEL_BUILD_OPTIONS} -c dbg --config=clang-tsan ${TEST_TARGETS} + bazel_with_collection test ${BAZEL_BUILD_OPTIONS} -c dbg --config=clang-tsan --build_tests_only ${TEST_TARGETS} echo "Building and testing envoy-filter-example tests..." cd "${ENVOY_FILTER_EXAMPLE_SRCDIR}" bazel_with_collection test ${BAZEL_BUILD_OPTIONS} -c dbg --config=clang-tsan \ diff --git a/ci/docker_build.sh b/ci/docker_build.sh deleted file mode 100755 index 4623d44a7b203..0000000000000 --- a/ci/docker_build.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -set -ex - -DOCKER_IMAGE_PREFIX="${DOCKER_IMAGE_PREFIX:-envoyproxy/envoy}" - -docker build -f ci/Dockerfile-envoy-image -t "${DOCKER_IMAGE_PREFIX}-dev:latest" . -docker build -f ci/Dockerfile-envoy-alpine -t "${DOCKER_IMAGE_PREFIX}-alpine-dev:latest" . -docker build -f ci/Dockerfile-envoy-alpine-debug -t "${DOCKER_IMAGE_PREFIX}-alpine-debug-dev:latest" . diff --git a/ci/docker_ci.sh b/ci/docker_ci.sh new file mode 100755 index 0000000000000..fb2d575bd31db --- /dev/null +++ b/ci/docker_ci.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +# Do not ever set -x here, it is a security hazard as it will place the credentials below in the +# CI logs. +set -e + +# This prefix is altered for the private security images on setec builds. +DOCKER_IMAGE_PREFIX="${DOCKER_IMAGE_PREFIX:-envoyproxy/envoy}" + +# Test the docker build in all cases, but use a local tag that we will overwrite before push in the +# cases where we do push. +for BUILD_TYPE in "" "-alpine" "-alpine-debug"; do + docker build -f ci/Dockerfile-envoy"${BUILD_TYPE}" -t "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}:local" . +done + +MASTER_BRANCH="refs/heads/master" +RELEASE_BRANCH_REGEX="^refs/heads/release/v.*" +RELEASE_TAG_REGEX="^refs/tags/v.*" + +# Only push images for master builds, release branch builds, and tag builds. +if [[ "${AZP_BRANCH}" != "${MASTER_BRANCH}" ]] && \ + ! [[ "${AZP_BRANCH}" =~ ${RELEASE_BRANCH_REGEX} ]] && \ + ! [[ "${AZP_BRANCH}" =~ ${RELEASE_TAG_REGEX} ]]; then + echo 'Ignoring non-master branch or tag for docker push.' + exit 0 +fi + +# For master builds and release branch builds use the dev repo. Otherwise we assume it's a tag and +# we push to the primary repo. +if [[ "${AZP_BRANCH}" == "${MASTER_BRANCH}" ]] || \ + [[ "${AZP_BRANCH}" =~ ${RELEASE_BRANCH_REGEX} ]]; then + IMAGE_POSTFIX="-dev" + IMAGE_NAME="$AZP_SHA1" +else + IMAGE_POSTFIX="" + IMAGE_NAME="${AZP_BRANCH/refs\/tags\//}" +fi + +docker login -u "$DOCKERHUB_USERNAME" -p "$DOCKERHUB_PASSWORD" + +for BUILD_TYPE in "" "-alpine" "-alpine-debug"; do + docker tag "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}:local" "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${IMAGE_NAME}" + docker push "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${IMAGE_NAME}" + + # Only push latest on master builds. + if [[ "${AZP_BRANCH}" == "${MASTER_BRANCH}" ]]; then + docker tag "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}:local" "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:latest" + docker push "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:latest" + fi + + # Push vX.Y-latest to tag the latest image in a release line + if [[ "${AZP_BRANCH}" =~ ${RELEASE_TAG_REGEX} ]]; then + RELEASE_LINE=$(echo "$IMAGE_NAME" | sed -E 's/(v[0-9]+\.[0-9]+)\.[0-9]+/\1-latest/') + docker tag "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}:local" "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${RELEASE_LINE}" + docker push "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${RELEASE_LINE}" + fi +done + + diff --git a/ci/docker_push.sh b/ci/docker_push.sh deleted file mode 100755 index 449cfc67b854c..0000000000000 --- a/ci/docker_push.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -# Do not ever set -x here, it is a security hazard as it will place the credentials below in the -# CircleCI logs. -set -e - -if [[ -n "$CIRCLE_PULL_REQUEST" ]]; then - echo 'Ignoring PR branch for docker push.' - exit 0 -fi - -DOCKER_IMAGE_PREFIX="${DOCKER_IMAGE_PREFIX:-envoyproxy/envoy}" - -# push the envoy image on tags or merge to master -if [[ -n "$CIRCLE_TAG" ]] || [[ "$AZP_BRANCH" = 'refs/heads/master' ]]; then - docker login -u "$DOCKERHUB_USERNAME" -p "$DOCKERHUB_PASSWORD" - - for BUILD_TYPE in "" "-alpine" "-alpine-debug"; do - docker push "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}-dev:latest" - docker tag "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}-dev:latest" "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}-dev:${CIRCLE_SHA1}" - docker push "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}-dev:${CIRCLE_SHA1}" - done - - # This script tests the docker examples. - # TODO(mattklein123): This almost always times out on CircleCI. Do not run for now until we - # have a better CI setup. - #./ci/verify_examples.sh -else - echo 'Ignoring non-master branch for docker push.' -fi diff --git a/ci/docker_tag.sh b/ci/docker_tag.sh deleted file mode 100755 index f3a4a205ae625..0000000000000 --- a/ci/docker_tag.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -# Do not ever set -x here, it is a security hazard as it will place the credentials below in the -# CircleCI logs. -set -e - -DOCKER_IMAGE_PREFIX="${DOCKER_IMAGE_PREFIX:-envoyproxy/envoy}" - -if [[ "${AZP_BRANCH}" =~ ^refs/tags/v.* ]]; then - CIRCLE_TAG="${AZP_BRANCH/refs\/tags\//}" -fi - -if [[ -n "$CIRCLE_TAG" ]]; then - docker login -u "$DOCKERHUB_USERNAME" -p "$DOCKERHUB_PASSWORD" - - for BUILD_TYPE in "" "-alpine" "-alpine-debug"; do - docker pull "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}"-dev:"$CIRCLE_SHA1" - docker tag "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}"-dev:"$CIRCLE_SHA1" "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}":"$CIRCLE_TAG" - docker push "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}":"$CIRCLE_TAG" - docker tag "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}"-dev:"$CIRCLE_SHA1" "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}":latest - docker push "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}":latest - done -else - echo 'Ignoring non-tag event for docker tag.' -fi diff --git a/ci/envoy_build_sha.sh b/ci/envoy_build_sha.sh index 5783a2a637bfa..6ccd9848f8d19 100644 --- a/ci/envoy_build_sha.sh +++ b/ci/envoy_build_sha.sh @@ -1,2 +1,2 @@ -ENVOY_BUILD_SHA=$(grep envoyproxy/envoy-build-ubuntu@sha256 $(dirname $0)/../.circleci/config.yml | sed -e 's#.*envoyproxy/envoy-build-ubuntu@sha256:\(.*\)#\1#' | uniq) +ENVOY_BUILD_SHA=$(grep envoyproxy/envoy-build-ubuntu@sha256 $(dirname $0)/../.bazelrc | sed -e 's#.*envoyproxy/envoy-build-ubuntu@sha256:\(.*\)#\1#' | uniq) [[ $(wc -l <<< "${ENVOY_BUILD_SHA}" | awk '{$1=$1};1') == 1 ]] || (echo ".circleci/config.yml hashes are inconsistent!" && exit 1) diff --git a/ci/mac_ci_setup.sh b/ci/mac_ci_setup.sh index 42cb7c63faa57..832aecaf869b1 100755 --- a/ci/mac_ci_setup.sh +++ b/ci/mac_ci_setup.sh @@ -2,9 +2,9 @@ # Installs the dependencies required for a macOS build via homebrew. # Tools are not upgraded to new versions. - -# Setup bazelbuild tap -brew tap bazelbuild/tap +# See: +# https://github.com/actions/virtual-environments/blob/master/images/macos/macos-10.15-Readme.md for +# a list of pre-installed tools in the macOS image. function is_installed { brew ls --versions "$1" >/dev/null @@ -12,8 +12,7 @@ function is_installed { function install { echo "Installing $1" - if ! brew install "$1" - then + if ! brew install "$1"; then echo "Failed to install $1" exit 1 fi @@ -24,7 +23,7 @@ if ! brew update; then exit 1 fi -DEPS="automake bazelbuild/tap/bazelisk cmake coreutils go libtool wget ninja" +DEPS="automake cmake coreutils go libtool wget ninja" for DEP in ${DEPS} do is_installed "${DEP}" || install "${DEP}" @@ -35,3 +34,13 @@ if [ -n "$CIRCLECI" ]; then # convert https://github.com to ssh://git@github.com, which jgit does not support. mv ~/.gitconfig ~/.gitconfig_save fi + +# Required as bazel and a foreign bazelisk are installed in the latest macos vm image, we have +# to unlink/overwrite them to install bazelisk +echo "Installing bazelbuild/tap/bazelisk" +brew tap bazelbuild/tap +brew reinstall --force bazelbuild/tap/bazelisk +if ! brew link --overwrite bazelbuild/tap/bazelisk; then + echo "Failed to install and link bazelbuild/tap/bazelisk" + exit 1 +fi diff --git a/ci/run_fuzzit.sh b/ci/run_fuzzit.sh index 950090e278a27..44173fb7c30dd 100755 --- a/ci/run_fuzzit.sh +++ b/ci/run_fuzzit.sh @@ -35,7 +35,7 @@ wget -O fuzzit https://github.com/fuzzitdev/fuzzit/releases/latest/download/fuzz chmod a+x fuzzit PREFIX=$(realpath /build/tmp/_bazel_bazel/*/execroot/envoy/bazel-out/k8-fastbuild/bin) -SLOW_TARGETS=("access-log-formatter" "h1-capture" "h1-capture-direct-response" "response-header" "request-header") +SLOW_TARGETS=("access-log-formatter" "h1-capture" "h1-capture-direct-response" "response-header" "request-header" "config" "server") for t in ${FILTERED_FUZZER_TARGETS} do TARGET_BASE="$(expr "$t" : '.*/\(.*\)_fuzz_test')" diff --git a/docs/root/_static/multilevel_deployment.svg b/docs/root/_static/multilevel_deployment.svg new file mode 100644 index 0000000000000..ad2b3e12e806e --- /dev/null +++ b/docs/root/_static/multilevel_deployment.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/api-v2/types/types.rst b/docs/root/api-v2/types/types.rst index 63ddb5e827afe..b719a814683b2 100644 --- a/docs/root/api-v2/types/types.rst +++ b/docs/root/api-v2/types/types.rst @@ -11,6 +11,7 @@ Types ../type/range.proto ../type/matcher/metadata.proto ../type/matcher/number.proto + ../type/matcher/path.proto ../type/matcher/regex.proto ../type/matcher/string.proto ../type/matcher/value.proto diff --git a/docs/root/configuration/best_practices/best_practices.rst b/docs/root/configuration/best_practices/best_practices.rst index 259e6aefbd8ea..51e423b6f3034 100644 --- a/docs/root/configuration/best_practices/best_practices.rst +++ b/docs/root/configuration/best_practices/best_practices.rst @@ -5,3 +5,5 @@ Configuration best practices :maxdepth: 2 edge + level_two + diff --git a/docs/root/configuration/best_practices/edge.rst b/docs/root/configuration/best_practices/edge.rst index 94e1728eb1728..29ee298562fa4 100644 --- a/docs/root/configuration/best_practices/edge.rst +++ b/docs/root/configuration/best_practices/edge.rst @@ -23,6 +23,9 @@ HTTP proxies should additionally configure: * :ref:`HTTP/2 maximum concurrent streams limit ` to 100, * :ref:`HTTP/2 initial stream window size limit ` to 64 KiB, * :ref:`HTTP/2 initial connection window size limit ` to 1 MiB. +* :ref:`headers_with_underscores_action setting ` to REJECT_REQUEST, to protect upstream services that treat '_' and '-' as interchangeable. +* :ref:`Listener connection limits. ` +* :ref:`Global downstream connection limits `. The following is a YAML example of the above recommendation. @@ -108,3 +111,15 @@ The following is a YAML example of the above recommendation. http2_protocol_options: initial_stream_window_size: 65536 # 64 KiB initial_connection_window_size: 1048576 # 1 MiB + + layered_runtime: + layers: + - name: static_layer_0 + static_layer: + envoy: + resource_limits: + listener: + example_listener_name: + connection_limit: 10000 + overload: + global_downstream_max_connections: 50000 diff --git a/docs/root/configuration/best_practices/level_two.rst b/docs/root/configuration/best_practices/level_two.rst new file mode 100644 index 0000000000000..6bdf1c85e8bc1 --- /dev/null +++ b/docs/root/configuration/best_practices/level_two.rst @@ -0,0 +1,37 @@ +.. _best_practices_level2: + +Configuring Envoy as a level two proxy +====================================== + +Envoy is a production-ready proxy, however, the default settings that are tailored for the +edge use case may need to be adjusted when using Envoy in a multi-level deployment as a +"level two" HTTP/2 proxy. + +.. image:: /_static/multilevel_deployment.svg + +**In summary, if you run level two Envoy version 1.11.1 or greater which terminates +HTTP/2, we strongly advise you to change the HTTP/2 configuration of your level +two Envoy, by setting its downstream +:ref:`validation of HTTP/2 messaging option ` +to true.** + +If there is an invalid HTTP/2 request and this option is not set, the Envoy in +question will reset the entire connection. This behavior was changed as part of +the 1.11.1 security release, to increase the security of Edge Envoys. Unfortunately, +because there are no guarantees that edge proxies will enforce HTTP/1 or HTTP/2 +standards compliance as rigorously as Envoy’s HTTP/2 stack does, this can result +in a problem as follows. If one client sends a request that for example passes +level one proxy's validation checks, and it is forwarded over an upstream multiplexed +HTTP/2 connection (potentially shared with other clients) the strict enforcement on +the level two Envoy HTTP/2 will reset all the streams on that connection, causing +a service disruption to the clients sharing that L1-L2 connection. If a malicious +user has insight into what traffic will bypass level one checks, they could spray +“bad” traffic across the level one fleet, causing serious disruption to other users’ +traffic. + +Please note that the +:ref:`validation of HTTP/2 messaging option ` +is planned to be deprecated and replaced with mandatory configuration in the HttpConnectionManager, to ensure +that what is now an easily overlooked option would need to be configured, ideally +appropriately for the given Envoy deployment. Please refer to the +https://github.com/envoyproxy/envoy/issues/9285 for more information. diff --git a/docs/root/configuration/http/http_conn_man/stats.rst b/docs/root/configuration/http/http_conn_man/stats.rst index 6d46fb937bd0b..f375832b8c6c6 100644 --- a/docs/root/configuration/http/http_conn_man/stats.rst +++ b/docs/root/configuration/http/http_conn_man/stats.rst @@ -110,8 +110,10 @@ All http1 statistics are rooted at *http1.* :header: Name, Type, Description :widths: 1, 1, 2 + dropped_headers_with_underscores, Counter, Total number of dropped headers with names containing underscores. This action is configured by setting the :ref:`headers_with_underscores_action config setting `. metadata_not_supported_error, Counter, Total number of metadata dropped during HTTP/1 encoding response_flood, Counter, Total number of connections closed due to response flooding + requests_rejected_with_underscores_in_headers, Counter, Total numbers of rejected requests due to header names containing underscores. This action is configured by setting the :ref:`headers_with_underscores_action config setting `. Http2 codec statistics ~~~~~~~~~~~~~~~~~~~~~~ @@ -122,6 +124,7 @@ All http2 statistics are rooted at *http2.* :header: Name, Type, Description :widths: 1, 1, 2 + dropped_headers_with_underscores, Counter, Total number of dropped headers with names containing underscores. This action is configured by setting the :ref:`headers_with_underscores_action config setting `. header_overflow, Counter, Total number of connections reset due to the headers being larger than the :ref:`configured value `. headers_cb_no_stream, Counter, Total number of errors where a header callback is called without an associated stream. This tracks an unexpected occurrence due to an as yet undiagnosed bug inbound_empty_frames_flood, Counter, Total number of connections terminated for exceeding the limit on consecutive inbound frames with an empty payload and no end stream flag. The limit is configured by setting the :ref:`max_consecutive_inbound_frames_with_empty_payload config setting `. @@ -129,11 +132,21 @@ All http2 statistics are rooted at *http2.* inbound_window_update_frames_flood, Counter, Total number of connections terminated for exceeding the limit on inbound frames of type WINDOW_UPDATE. The limit is configured by setting the :ref:`max_inbound_window_updateframes_per_data_frame_sent config setting `. outbound_flood, Counter, Total number of connections terminated for exceeding the limit on outbound frames of all types. The limit is configured by setting the :ref:`max_outbound_frames config setting `. outbound_control_flood, Counter, "Total number of connections terminated for exceeding the limit on outbound frames of types PING, SETTINGS and RST_STREAM. The limit is configured by setting the :ref:`max_outbound_control_frames config setting `." + requests_rejected_with_underscores_in_headers, Counter, Total numbers of rejected requests due to header names containing underscores. This action is configured by setting the :ref:`headers_with_underscores_action config setting `. rx_messaging_error, Counter, Total number of invalid received frames that violated `section 8 `_ of the HTTP/2 spec. This will result in a *tx_reset* rx_reset, Counter, Total number of reset stream frames received by Envoy too_many_header_frames, Counter, Total number of times an HTTP2 connection is reset due to receiving too many headers frames. Envoy currently supports proxying at most one header frame for 100-Continue one non-100 response code header frame and one frame with trailers trailers, Counter, Total number of trailers seen on requests coming from downstream + tx_flush_timeout, Counter, Total number of :ref:`stream idle timeouts ` waiting for open stream window to flush the remainder of a stream tx_reset, Counter, Total number of reset stream frames transmitted by Envoy + streams_active, Gauge, Active streams as observed by the codec + pending_send_bytes, Gauge, Currently buffered body data in bytes waiting to be written when stream/connection window is opened. + +.. attention:: + + The HTTP/2 `streams_active` gauge may be greater than the HTTP connection manager + `downstream_rq_active` gauge due to differences in stream accounting between the codec and the + HTTP connection manager. Tracing statistics ------------------ diff --git a/docs/root/configuration/listeners/listeners.rst b/docs/root/configuration/listeners/listeners.rst index 73605a853658f..3a8ea7112664c 100644 --- a/docs/root/configuration/listeners/listeners.rst +++ b/docs/root/configuration/listeners/listeners.rst @@ -8,6 +8,7 @@ Listeners overview stats + runtime listener_filters/listener_filters network_filters/network_filters lds diff --git a/docs/root/configuration/listeners/runtime.rst b/docs/root/configuration/listeners/runtime.rst new file mode 100644 index 0000000000000..b42b6aa5fa3ff --- /dev/null +++ b/docs/root/configuration/listeners/runtime.rst @@ -0,0 +1,8 @@ +.. _config_listeners_runtime: + +Runtime +------- +The following runtime settings are supported: + +envoy.resource_limits.listener..connection_limit + Sets a limit on the number of active connections to the specified listener. diff --git a/docs/root/configuration/listeners/stats.rst b/docs/root/configuration/listeners/stats.rst index 73be50156bea7..a144f2764df88 100644 --- a/docs/root/configuration/listeners/stats.rst +++ b/docs/root/configuration/listeners/stats.rst @@ -16,8 +16,10 @@ Every listener has a statistics tree rooted at *listener.
.* with the fo downstream_cx_destroy, Counter, Total destroyed connections downstream_cx_active, Gauge, Total active connections downstream_cx_length_ms, Histogram, Connection length milliseconds + downstream_cx_overflow, Counter, Total connections rejected due to enforcement of listener connection limit downstream_pre_cx_timeout, Counter, Sockets that timed out during listener filter processing downstream_pre_cx_active, Gauge, Sockets currently undergoing listener filter processing + global_cx_overflow, Counter, Total connections rejected due to enforecement of the global connection limit no_filter_chain_match, Counter, Total connections that didn't match any filter chain ssl.connection_error, Counter, Total TLS connection errors not including failed certificate verifications ssl.handshake, Counter, Total successful TLS connection handshakes diff --git a/docs/root/configuration/operations/overload_manager/overload_manager.rst b/docs/root/configuration/operations/overload_manager/overload_manager.rst index 06d298a8f22ac..b888e0f649ed8 100644 --- a/docs/root/configuration/operations/overload_manager/overload_manager.rst +++ b/docs/root/configuration/operations/overload_manager/overload_manager.rst @@ -53,6 +53,30 @@ The following overload actions are supported: envoy.overload_actions.stop_accepting_connections, Envoy will stop accepting new network connections on its configured listeners envoy.overload_actions.shrink_heap, Envoy will periodically try to shrink the heap by releasing free memory to the system +Limiting Active Connections +--------------------------- + +Currently, the only supported way to limit the total number of active connections allowed across all +listeners is via specifying an integer through the runtime key +``overload.global_downstream_max_connections``. The connection limit is recommended to be less than +half of the system's file descriptor limit, to account for upstream connections, files, and other +usage of file descriptors. +If the value is unspecified, there is no global limit on the number of active downstream connections +and Envoy will emit a warning indicating this at startup. To disable the warning without setting a +limit on the number of active downstream connections, the runtime value may be set to a very large +limit (~2e9). + +If it is desired to only limit the number of downstream connections for a particular listener, +per-listener limits can be set via the :ref:`listener configuration `. + +One may simultaneously specify both per-listener and global downstream connection limits and the +conditions will be enforced independently. For instance, if it is known that a particular listener +should have a smaller number of open connections than others, one may specify a smaller connection +limit for that specific listener and allow the global limit to enforce resource utilization among +all listeners. + +An example configuration can be found in the :ref:`edge best practices document `. + Statistics ---------- diff --git a/docs/root/faq/configuration/level_two.rst b/docs/root/faq/configuration/level_two.rst new file mode 100644 index 0000000000000..72c46eecd3783 --- /dev/null +++ b/docs/root/faq/configuration/level_two.rst @@ -0,0 +1,7 @@ +.. _faq_level2: + +How do I configure Envoy as a level two proxy? +============================================== + +Refer to :ref:`configuring Envoy as a level two proxy ` +for an example of the level 2 proxy configuration. diff --git a/docs/root/faq/configuration/resource_limits.rst b/docs/root/faq/configuration/resource_limits.rst new file mode 100644 index 0000000000000..214096486eb6f --- /dev/null +++ b/docs/root/faq/configuration/resource_limits.rst @@ -0,0 +1,20 @@ +.. _faq_resource_limits: + +How does Envoy prevent file descriptor exhaustion? +================================================== + +:ref:`Per-listener connection limits ` may be configured as an upper bound +on the number of active connections a particular listener will accept. The listener may accept more +connections than the configured value on the order of the number of worker threads. + +In addition, one may configure a :ref:`global limit ` on the number of +connections that will apply across all listeners. + +On Unix-based systems, it is recommended to keep the sum of all connection limits less than half of +the system's file descriptor limit to account for upstream connections, files, and other usage of +file descriptors. + +.. note:: + + This per-listener connection limiting will eventually be handled by the :ref:`overload manager + `. diff --git a/docs/root/faq/configuration/timeouts.rst b/docs/root/faq/configuration/timeouts.rst index 46d927acf299d..955037607f349 100644 --- a/docs/root/faq/configuration/timeouts.rst +++ b/docs/root/faq/configuration/timeouts.rst @@ -52,7 +52,9 @@ context request/stream is interchangeable. ` is the amount of time that the connection manager will allow a stream to exist with no upstream or downstream activity. The default stream idle timeout is *5 minutes*. This timeout is strongly - recommended for streaming APIs (requests or responses that never end). + recommended for all requests (not just streaming requests/responses) as it additionally defends + against an HTTP/2 peer that does not open stream window once an entire response has been buffered + to be sent to a downstream client). Route timeouts ^^^^^^^^^^^^^^ diff --git a/docs/root/faq/overview.rst b/docs/root/faq/overview.rst index b30be9823d974..9a484a57d75e5 100644 --- a/docs/root/faq/overview.rst +++ b/docs/root/faq/overview.rst @@ -27,11 +27,13 @@ Configuration :maxdepth: 2 configuration/edge + configuration/level_two configuration/sni configuration/zone_aware_routing configuration/zipkin_tracing configuration/flow_control configuration/timeouts + configuration/resource_limits Load balancing -------------- diff --git a/docs/root/install/building.rst b/docs/root/install/building.rst index 9541bc6316bda..a475a6958607a 100644 --- a/docs/root/install/building.rst +++ b/docs/root/install/building.rst @@ -41,13 +41,9 @@ be found in the following repositories: * `envoyproxy/envoy-alpine-debug `_: Release binary with debug symbols on top of a **glibc** alpine base. -In the above repositories, the *latest* tag points to the latest official release. - .. note:: - The above repositories used to contain the dev images described below. They remain to avoid - breaking existing users. New dev images are added to the repositories described in the following - section. + In the above repositories, we tag a *vX.Y-latest* image for each security/stable release line. On every master commit we additionally create a set of development Docker images. These images can be found in the following repositories: diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index a52195130cccb..f0c76944e7ce1 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -7,6 +7,25 @@ Version history * listeners: fixed issue where :ref:`TLS inspector listener filter ` could have been bypassed by a client using only TLS 1.3. * sds: fixed the SDS vulnerability that TLS validation context (e.g., subject alt name or hash) cannot be effectively validated in some cases. * http: added HTTP/1.1 flood protection. Can be temporarily disabled using the runtime feature `envoy.reloadable_features.http1_flood_protection`. +1.12.5 (Pending) +================ +* http: the :ref:`stream_idle_timeout ` + now also defends against an HTTP/2 peer that does not open stream window once an entire response has been buffered to be sent to a downstream client. +* listener: add runtime support for `per-listener limits ` on active/accepted connections. +* overload management: add runtime support for :ref:`global limits ` on active/accepted connections. + +1.12.4 (June 8, 2020) +===================== +* http: added :ref:`headers_with_underscores_action setting ` to control how client requests with header names containing underscore characters are handled. The options are to allow such headers, reject request or drop headers. The default is to allow headers, preserving existing behavior. +* http: fixed CVE-2020-11080 by rejecting HTTP/2 SETTINGS frames with too many parameters. + +1.12.3 (March 3, 2020) +====================== +* buffer: force copy when appending small slices to OwnedImpl buffer to avoid fragmentation. +* http: added HTTP/1.1 flood protection. Can be temporarily disabled using the runtime feature `envoy.reloadable_features.http1_flood_protection`. +* listeners: fixed issue where :ref:`TLS inspector listener filter ` could have been bypassed by a client using only TLS 1.3. +* rbac: added :ref:`url_path ` for matching URL path without the query and fragment string. +* sds: fixed the SDS vulnerability that TLS validation context (e.g., subject alt name or hash) cannot be effectively validated in some cases. 1.12.2 (December 10, 2019) ========================== diff --git a/examples/front-proxy/front-envoy.yaml b/examples/front-proxy/front-envoy.yaml index 5bed8c8490176..e19e6109b28ef 100644 --- a/examples/front-proxy/front-envoy.yaml +++ b/examples/front-proxy/front-envoy.yaml @@ -4,6 +4,7 @@ static_resources: socket_address: address: 0.0.0.0 port_value: 80 + name: example_listener_name filter_chains: - filters: - name: envoy.http_connection_manager @@ -64,3 +65,12 @@ admin: socket_address: address: 0.0.0.0 port_value: 8001 +layered_runtime: + layers: + - name: static_layer_0 + static_layer: + envoy: + resource_limits: + listener: + example_listener_name: + connection_limit: 10000 diff --git a/include/envoy/buffer/buffer.h b/include/envoy/buffer/buffer.h index 801a5c0de9ee9..687cd9ec4f83f 100644 --- a/include/envoy/buffer/buffer.h +++ b/include/envoy/buffer/buffer.h @@ -57,6 +57,15 @@ class Instance { public: virtual ~Instance() = default; + /** + * Register function to call when the last byte in the last slice of this + * buffer has fully drained. Note that slices may be transferred to + * downstream buffers, drain trackers are transferred along with the bytes + * they track so the function is called only after the last byte is drained + * from all buffers. + */ + virtual void addDrainTracker(std::function drain_tracker) PURE; + /** * Copy data into the buffer (deprecated, use absl::string_view variant * instead). diff --git a/include/envoy/common/BUILD b/include/envoy/common/BUILD index 105f8b4374b7c..0565076813c7b 100644 --- a/include/envoy/common/BUILD +++ b/include/envoy/common/BUILD @@ -24,6 +24,11 @@ envoy_cc_library( hdrs = ["mutex_tracer.h"], ) +envoy_cc_library( + name = "resource_interface", + hdrs = ["resource.h"], +) + envoy_cc_library( name = "time_interface", hdrs = ["time.h"], diff --git a/include/envoy/common/resource.h b/include/envoy/common/resource.h new file mode 100644 index 0000000000000..6b04afcfdf4be --- /dev/null +++ b/include/envoy/common/resource.h @@ -0,0 +1,47 @@ +#include + +#include "envoy/common/pure.h" + +#pragma once + +namespace Envoy { + +/** + * A handle for use by any resource managers. + */ +class ResourceLimit { +public: + virtual ~ResourceLimit() = default; + + /** + * @return true if the resource can be created. + */ + virtual bool canCreate() PURE; + + /** + * Increment the resource count. + */ + virtual void inc() PURE; + + /** + * Decrement the resource count. + */ + virtual void dec() PURE; + + /** + * Decrement the resource count by a specific amount. + */ + virtual void decBy(uint64_t amount) PURE; + + /** + * @return the current maximum allowed number of this resource. + */ + virtual uint64_t max() PURE; + + /** + * @return the current resource count. + */ + virtual uint64_t count() const PURE; +}; + +} // namespace Envoy diff --git a/include/envoy/http/codec.h b/include/envoy/http/codec.h index 592635788c37a..5e4d55423d7dc 100644 --- a/include/envoy/http/codec.h +++ b/include/envoy/http/codec.h @@ -203,6 +203,13 @@ class Stream { * @return uint32_t the stream's configured buffer limits. */ virtual uint32_t bufferLimit() PURE; + + /** + * Set the flush timeout for the stream. At the codec level this is used to bound the amount of + * time the codec will wait to flush body data pending open stream window. It does *not* count + * small window updates as satisfying the idle timeout as this is a potential DoS vector. + */ + virtual void setFlushTimeout(std::chrono::milliseconds timeout) PURE; }; /** diff --git a/include/envoy/network/BUILD b/include/envoy/network/BUILD index c1141b97c113a..1dd8fa954a731 100644 --- a/include/envoy/network/BUILD +++ b/include/envoy/network/BUILD @@ -108,6 +108,7 @@ envoy_cc_library( ":connection_balancer_interface", ":connection_interface", ":listen_socket_interface", + "//include/envoy/common:resource_interface", "//include/envoy/stats:stats_interface", "@envoy_api//envoy/api/v2:pkg_cc_proto", ], diff --git a/include/envoy/network/listener.h b/include/envoy/network/listener.h index fbb0ff7c17f97..ef8688cddaee3 100644 --- a/include/envoy/network/listener.h +++ b/include/envoy/network/listener.h @@ -6,6 +6,7 @@ #include "envoy/api/io_error.h" #include "envoy/common/exception.h" +#include "envoy/common/resource.h" #include "envoy/network/connection.h" #include "envoy/network/connection_balancer.h" #include "envoy/network/listen_socket.h" @@ -108,6 +109,11 @@ class ListenerConfig { * though the implementation may be a NOP balancer. */ virtual ConnectionBalancer& connectionBalancer() PURE; + + /** + * Open connection resources for this listener. + */ + virtual ResourceLimit& openConnections() PURE; }; /** @@ -122,6 +128,11 @@ class ListenerCallbacks { * @param socket supplies the socket that is moved into the callee. */ virtual void onAccept(ConnectionSocketPtr&& socket) PURE; + + /** + * Called when a new connection is rejected. + */ + virtual void onReject() PURE; }; /** diff --git a/include/envoy/upstream/BUILD b/include/envoy/upstream/BUILD index fcdb98a90b11b..4d6f85181e903 100644 --- a/include/envoy/upstream/BUILD +++ b/include/envoy/upstream/BUILD @@ -112,6 +112,7 @@ envoy_cc_library( envoy_cc_library( name = "resource_manager_interface", hdrs = ["resource_manager.h"], + deps = ["//include/envoy/common:resource_interface"], ) envoy_cc_library( diff --git a/include/envoy/upstream/resource_manager.h b/include/envoy/upstream/resource_manager.h index 4fe7681aaaa3c..5cac59a1a0ad6 100644 --- a/include/envoy/upstream/resource_manager.h +++ b/include/envoy/upstream/resource_manager.h @@ -5,6 +5,7 @@ #include #include "envoy/common/pure.h" +#include "envoy/common/resource.h" namespace Envoy { namespace Upstream { @@ -16,49 +17,16 @@ namespace Upstream { enum class ResourcePriority { Default, High }; const size_t NumResourcePriorities = 2; -/** - * An individual resource tracked by the resource manager. - */ -class Resource { -public: - virtual ~Resource() = default; - - /** - * @return true if the resource can be created. - */ - virtual bool canCreate() PURE; - - /** - * Increment the resource count. - */ - virtual void inc() PURE; - - /** - * Decrement the resource count. - */ - virtual void dec() PURE; - - /** - * Decrement the resource count by a specific amount. - */ - virtual void decBy(uint64_t amount) PURE; - - /** - * @return the current maximum allowed number of this resource. - */ - virtual uint64_t max() PURE; -}; - /** * RAII wrapper that increments a resource on construction and decrements it on destruction. */ class ResourceAutoIncDec { public: - ResourceAutoIncDec(Resource& resource) : resource_(resource) { resource_.inc(); } + ResourceAutoIncDec(ResourceLimit& resource) : resource_(resource) { resource_.inc(); } ~ResourceAutoIncDec() { resource_.dec(); } private: - Resource& resource_; + ResourceLimit& resource_; }; using ResourceAutoIncDecPtr = std::unique_ptr; @@ -73,31 +41,31 @@ class ResourceManager { virtual ~ResourceManager() = default; /** - * @return Resource& active TCP connections. + * @return ResourceLimit& active TCP connections and UDP sessions. */ - virtual Resource& connections() PURE; + virtual ResourceLimit& connections() PURE; /** - * @return Resource& active pending requests (requests that have not yet been attached to a + * @return ResourceLimit& active pending requests (requests that have not yet been attached to a * connection pool connection). */ - virtual Resource& pendingRequests() PURE; + virtual ResourceLimit& pendingRequests() PURE; /** - * @return Resource& active requests (requests that are currently bound to a connection pool + * @return ResourceLimit& active requests (requests that are currently bound to a connection pool * connection and are awaiting response). */ - virtual Resource& requests() PURE; + virtual ResourceLimit& requests() PURE; /** - * @return Resource& active retries. + * @return ResourceLimit& active retries. */ - virtual Resource& retries() PURE; + virtual ResourceLimit& retries() PURE; /** - * @return Resource& active connection pools. + * @return ResourceLimit& active connection pools. */ - virtual Resource& connectionPools() PURE; + virtual ResourceLimit& connectionPools() PURE; }; } // namespace Upstream diff --git a/source/common/buffer/BUILD b/source/common/buffer/BUILD index ea7d6654f68bf..7d68beb6beee3 100644 --- a/source/common/buffer/BUILD +++ b/source/common/buffer/BUILD @@ -28,6 +28,7 @@ envoy_cc_library( "//source/common/common:stack_array", "//source/common/common:utility_lib", "//source/common/event:libevent_lib", + "//source/server:backtrace_lib", ], ) diff --git a/source/common/buffer/buffer_impl.cc b/source/common/buffer/buffer_impl.cc index bc7c6fbff48db..0629b1fff76b0 100644 --- a/source/common/buffer/buffer_impl.cc +++ b/source/common/buffer/buffer_impl.cc @@ -37,6 +37,12 @@ void OwnedImpl::addImpl(const void* data, uint64_t size) { } } +void OwnedImpl::addDrainTracker(std::function drain_tracker) { + ASSERT(!old_impl_); + ASSERT(!slices_.empty()); + slices_.back()->addDrainTracker(std::move(drain_tracker)); +} + void OwnedImpl::add(const void* data, uint64_t size) { addImpl(data, size); } void OwnedImpl::addBufferFragment(BufferFragment& fragment) { @@ -305,9 +311,11 @@ void* OwnedImpl::linearize(uint32_t size) { auto dest = static_cast(reservation.mem_); do { uint64_t data_size = slices_.front()->dataSize(); - memcpy(dest, slices_.front()->data(), data_size); - bytes_copied += data_size; - dest += data_size; + if (data_size > 0) { + memcpy(dest, slices_.front()->data(), data_size); + bytes_copied += data_size; + dest += data_size; + } slices_.pop_front(); } while (bytes_copied < linearized_size); ASSERT(dest == static_cast(reservation.mem_) + linearized_size); @@ -331,6 +339,7 @@ void OwnedImpl::coalesceOrAddSlice(SlicePtr&& other_slice) { // Copy content of the `other_slice`. The `move` methods which call this method effectively // drain the source buffer. addImpl(other_slice->data(), slice_size); + other_slice->transferDrainTrackersTo(*slices_.back()); } else { // Take ownership of the slice. slices_.emplace_back(std::move(other_slice)); diff --git a/source/common/buffer/buffer_impl.h b/source/common/buffer/buffer_impl.h index 77d5f58f7f28a..f70ccef7f6c30 100644 --- a/source/common/buffer/buffer_impl.h +++ b/source/common/buffer/buffer_impl.h @@ -35,7 +35,11 @@ class Slice { public: using Reservation = RawSlice; - virtual ~Slice() = default; + virtual ~Slice() { + for (const auto& drain_tracker : drain_trackers_) { + drain_tracker(); + } + } /** * @return a pointer to the start of the usable content. @@ -137,6 +141,9 @@ class Slice { */ uint64_t append(const void* data, uint64_t size) { uint64_t copy_size = std::min(size, reservableSize()); + if (copy_size == 0) { + return 0; + } uint8_t* dest = base_ + reservable_; reservable_ += copy_size; // NOLINTNEXTLINE(clang-analyzer-core.NullDereference) @@ -193,6 +200,15 @@ class Slice { */ virtual bool canCoalesce() const { return true; } + void transferDrainTrackersTo(Slice& destination) { + destination.drain_trackers_.splice(destination.drain_trackers_.end(), drain_trackers_); + ASSERT(drain_trackers_.empty()); + } + + void addDrainTracker(std::function drain_tracker) { + drain_trackers_.emplace_back(std::move(drain_tracker)); + } + protected: Slice(uint64_t data, uint64_t reservable, uint64_t capacity) : data_(data), reservable_(reservable), capacity_(capacity) {} @@ -208,6 +224,8 @@ class Slice { /** Total number of bytes in the slice */ uint64_t capacity_; + + std::list> drain_trackers_; }; using SlicePtr = std::unique_ptr; @@ -512,6 +530,7 @@ class OwnedImpl : public LibEventInstance { OwnedImpl(const void* data, uint64_t size); // Buffer::Instance + void addDrainTracker(std::function drain_tracker) override; void add(const void* data, uint64_t size) override; void addBufferFragment(BufferFragment& fragment) override; void add(absl::string_view data) override; @@ -567,6 +586,8 @@ class OwnedImpl : public LibEventInstance { */ static void useOldImpl(bool use_old_impl); + static bool newBuffersUseOldImpl() { return use_old_impl_; } + /** * Describe the in-memory representation of the slices in the buffer. For use * in tests that want to make assertions about the specific arrangement of diff --git a/source/common/common/BUILD b/source/common/common/BUILD index c8e4b2ae5b22a..d1f00dea822d6 100644 --- a/source/common/common/BUILD +++ b/source/common/common/BUILD @@ -157,6 +157,15 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "basic_resource_lib", + hdrs = ["basic_resource_impl.h"], + deps = [ + "//include/envoy/common:resource_interface", + "//include/envoy/runtime:runtime_interface", + ], +) + envoy_cc_library( name = "macros", hdrs = ["macros.h"], @@ -172,6 +181,7 @@ envoy_cc_library( "//include/envoy/common:matchers_interface", "//source/common/common:regex_lib", "//source/common/config:metadata_lib", + "//source/common/http:path_utility_lib", "//source/common/protobuf", "@envoy_api//envoy/type/matcher:pkg_cc_proto", ], diff --git a/source/common/common/basic_resource_impl.h b/source/common/common/basic_resource_impl.h new file mode 100644 index 0000000000000..8fe93aaabcb9f --- /dev/null +++ b/source/common/common/basic_resource_impl.h @@ -0,0 +1,60 @@ +#pragma once + +#include + +#include "envoy/common/resource.h" +#include "envoy/runtime/runtime.h" + +#include "common/common/assert.h" + +#include "absl/types/optional.h" + +namespace Envoy { + +/** + * A handle to track some limited resource. + * + * NOTE: + * This implementation makes some assumptions which favor simplicity over correctness. Though + * atomics are used, it is possible for resources to temporarily go above the supplied maximums. + * This should not effect overall behavior. + */ +class BasicResourceLimitImpl : public ResourceLimit { +public: + BasicResourceLimitImpl(uint64_t max, Runtime::Loader& runtime, const std::string& runtime_key) + : max_(max), runtime_(&runtime), runtime_key_(runtime_key) {} + BasicResourceLimitImpl(uint64_t max) : max_(max), runtime_(nullptr) {} + BasicResourceLimitImpl() : max_(std::numeric_limits::max()), runtime_(nullptr) {} + + bool canCreate() override { return current_.load() < max(); } + + void inc() override { ++current_; } + + void dec() override { decBy(1); } + + void decBy(uint64_t amount) override { + ASSERT(current_ >= amount); + current_ -= amount; + } + + uint64_t max() override { + return (runtime_ != nullptr && runtime_key_.has_value()) + ? runtime_->snapshot().getInteger(runtime_key_.value(), max_) + : max_; + } + + uint64_t count() const override { return current_.load(); } + + void setMax(uint64_t new_max) { max_ = new_max; } + void resetMax() { max_ = std::numeric_limits::max(); } + +protected: + std::atomic current_{}; + +private: + uint64_t max_; + Runtime::Loader* runtime_{nullptr}; + const absl::optional runtime_key_; +}; + +} // namespace Envoy diff --git a/source/common/common/matchers.cc b/source/common/common/matchers.cc index 4c648ebe8eb6a..34183696a7544 100644 --- a/source/common/common/matchers.cc +++ b/source/common/common/matchers.cc @@ -4,6 +4,7 @@ #include "common/common/regex.h" #include "common/config/metadata.h" +#include "common/http/path_utility.h" #include "absl/strings/match.h" @@ -60,8 +61,14 @@ bool DoubleMatcher::match(const ProtobufWkt::Value& value) const { StringMatcherImpl::StringMatcherImpl(const envoy::type::matcher::StringMatcher& matcher) : matcher_(matcher) { if (matcher.match_pattern_case() == envoy::type::matcher::StringMatcher::kRegex) { + if (matcher.ignore_case()) { + throw EnvoyException("ignore_case has no effect for regex."); + } regex_ = Regex::Utility::parseStdRegexAsCompiledMatcher(matcher_.regex()); } else if (matcher.match_pattern_case() == envoy::type::matcher::StringMatcher::kSafeRegex) { + if (matcher.ignore_case()) { + throw EnvoyException("ignore_case has no effect for safe_regex."); + } regex_ = Regex::Utility::parseRegex(matcher_.safe_regex()); } } @@ -77,11 +84,14 @@ bool StringMatcherImpl::match(const ProtobufWkt::Value& value) const { bool StringMatcherImpl::match(const absl::string_view value) const { switch (matcher_.match_pattern_case()) { case envoy::type::matcher::StringMatcher::kExact: - return matcher_.exact() == value; + return matcher_.ignore_case() ? absl::EqualsIgnoreCase(value, matcher_.exact()) + : value == matcher_.exact(); case envoy::type::matcher::StringMatcher::kPrefix: - return absl::StartsWith(value, matcher_.prefix()); + return matcher_.ignore_case() ? absl::StartsWithIgnoreCase(value, matcher_.prefix()) + : absl::StartsWith(value, matcher_.prefix()); case envoy::type::matcher::StringMatcher::kSuffix: - return absl::EndsWith(value, matcher_.suffix()); + return matcher_.ignore_case() ? absl::EndsWithIgnoreCase(value, matcher_.suffix()) + : absl::EndsWith(value, matcher_.suffix()); case envoy::type::matcher::StringMatcher::kRegex: case envoy::type::matcher::StringMatcher::kSafeRegex: return regex_->match(value); @@ -150,10 +160,28 @@ MetadataMatcher::MetadataMatcher(const envoy::type::matcher::MetadataMatcher& ma value_matcher_ = ValueMatcher::create(v); } +PathMatcherConstSharedPtr PathMatcher::createExact(const std::string& exact, bool ignore_case) { + envoy::type::matcher::StringMatcher matcher; + matcher.set_exact(exact); + matcher.set_ignore_case(ignore_case); + return std::make_shared(matcher); +} + +PathMatcherConstSharedPtr PathMatcher::createPrefix(const std::string& prefix, bool ignore_case) { + envoy::type::matcher::StringMatcher matcher; + matcher.set_prefix(prefix); + matcher.set_ignore_case(ignore_case); + return std::make_shared(matcher); +} + bool MetadataMatcher::match(const envoy::api::v2::core::Metadata& metadata) const { const auto& value = Envoy::Config::Metadata::metadataValue(metadata, matcher_.filter(), path_); return value_matcher_ && value_matcher_->match(value); } +bool PathMatcher::match(const absl::string_view path) const { + return matcher_.match(Http::PathUtil::removeQueryAndFragment(path)); +} + } // namespace Matchers } // namespace Envoy diff --git a/source/common/common/matchers.h b/source/common/common/matchers.h index bb89941f3d0e7..c9939075a576e 100644 --- a/source/common/common/matchers.h +++ b/source/common/common/matchers.h @@ -7,6 +7,7 @@ #include "envoy/common/regex.h" #include "envoy/type/matcher/metadata.pb.h" #include "envoy/type/matcher/number.pb.h" +#include "envoy/type/matcher/path.pb.h" #include "envoy/type/matcher/string.pb.h" #include "envoy/type/matcher/value.pb.h" @@ -19,6 +20,9 @@ namespace Matchers { class ValueMatcher; using ValueMatcherConstSharedPtr = std::shared_ptr; +class PathMatcher; +using PathMatcherConstSharedPtr = std::shared_ptr; + class ValueMatcher { public: virtual ~ValueMatcher() = default; @@ -132,5 +136,19 @@ class MetadataMatcher { ValueMatcherConstSharedPtr value_matcher_; }; +class PathMatcher : public StringMatcher { +public: + PathMatcher(const envoy::type::matcher::PathMatcher& path) : matcher_(path.path()) {} + PathMatcher(const envoy::type::matcher::StringMatcher& matcher) : matcher_(matcher) {} + + static PathMatcherConstSharedPtr createExact(const std::string& exact, bool ignore_case); + static PathMatcherConstSharedPtr createPrefix(const std::string& prefix, bool ignore_case); + + bool match(const absl::string_view path) const override; + +private: + const StringMatcherImpl matcher_; +}; + } // namespace Matchers } // namespace Envoy diff --git a/source/common/http/codec_client.cc b/source/common/http/codec_client.cc index c6c3c42f3db32..d850acb3c0af9 100644 --- a/source/common/http/codec_client.cc +++ b/source/common/http/codec_client.cc @@ -17,7 +17,7 @@ namespace Http { CodecClient::CodecClient(Type type, Network::ClientConnectionPtr&& connection, Upstream::HostDescriptionConstSharedPtr host, Event::Dispatcher& dispatcher) - : type_(type), connection_(std::move(connection)), host_(host), + : type_(type), host_(host), connection_(std::move(connection)), idle_timeout_(host_->cluster().idleTimeout()) { if (type_ != Type::HTTP3) { // Make sure upstream connections process data and then the FIN, rather than processing diff --git a/source/common/http/codec_client.h b/source/common/http/codec_client.h index ab29c15db85a4..49ae4bc17a45f 100644 --- a/source/common/http/codec_client.h +++ b/source/common/http/codec_client.h @@ -155,9 +155,11 @@ class CodecClient : Logger::Loggable, } const Type type_; - ClientConnectionPtr codec_; - Network::ClientConnectionPtr connection_; + // The order of host_, connection_, and codec_ matter as during destruction each can refer to + // the previous, at least in tests. Upstream::HostDescriptionConstSharedPtr host_; + Network::ClientConnectionPtr connection_; + ClientConnectionPtr codec_; Event::TimerPtr idle_timer_; const absl::optional idle_timeout_; diff --git a/source/common/http/conn_manager_config.h b/source/common/http/conn_manager_config.h index 5a3221f358cba..0b4ced34bfba0 100644 --- a/source/common/http/conn_manager_config.h +++ b/source/common/http/conn_manager_config.h @@ -389,6 +389,13 @@ class ConnectionManagerConfig { * one. */ virtual bool shouldMergeSlashes() const PURE; + + /** + * @return the action HttpConnectionManager should take when receiving client request + * headers containing underscore characters. + */ + virtual envoy::api::v2::core::HttpProtocolOptions::HeadersWithUnderscoresAction + headersWithUnderscoresAction() const PURE; }; } // namespace Http } // namespace Envoy diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index aa7de6f07fedd..90866fb2fb3a5 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -261,6 +261,7 @@ StreamDecoder& ConnectionManagerImpl::newStream(StreamEncoder& response_encoder, new_stream->state_.is_internally_created_ = is_internally_created; new_stream->response_encoder_ = &response_encoder; new_stream->response_encoder_->getStream().addCallbacks(*new_stream); + new_stream->response_encoder_->getStream().setFlushTimeout(new_stream->idle_timeout_ms_); new_stream->buffer_limit_ = new_stream->response_encoder_->getStream().bufferLimit(); // If the network connection is backed up, the stream should be made aware of it on creation. // Both HTTP/1.x and HTTP/2 codecs handle this in StreamCallbackHelper::addCallbacks_. @@ -866,7 +867,10 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(HeaderMapPtr&& headers, if (hasCachedRoute()) { const Router::RouteEntry* route_entry = cached_route_.value()->routeEntry(); if (route_entry != nullptr && route_entry->idleTimeout()) { + // TODO(mattklein123): Technically if the cached route changes, we should also see if the + // route idle timeout has changed and update the value. idle_timeout_ms_ = route_entry->idleTimeout().value(); + response_encoder_->getStream().setFlushTimeout(idle_timeout_ms_); if (idle_timeout_ms_.count()) { // If we have a route-level idle timeout but no global stream idle timeout, create a timer. if (stream_idle_timer_ == nullptr) { diff --git a/source/common/http/conn_manager_utility.cc b/source/common/http/conn_manager_utility.cc index 18a39688bec10..4797ccf67c070 100644 --- a/source/common/http/conn_manager_utility.cc +++ b/source/common/http/conn_manager_utility.cc @@ -42,15 +42,17 @@ ServerConnectionPtr ConnectionManagerUtility::autoCreateCodec( Network::Connection& connection, const Buffer::Instance& data, ServerConnectionCallbacks& callbacks, Stats::Scope& scope, const Http1Settings& http1_settings, const Http2Settings& http2_settings, uint32_t max_request_headers_kb, - uint32_t max_request_headers_count) { + uint32_t max_request_headers_count, + envoy::api::v2::core::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action) { if (determineNextProtocol(connection, data) == Http2::ALPN_STRING) { - return std::make_unique(connection, callbacks, scope, - http2_settings, max_request_headers_kb, - max_request_headers_count); + return std::make_unique( + connection, callbacks, scope, http2_settings, max_request_headers_kb, + max_request_headers_count, headers_with_underscores_action); } else { - return std::make_unique(connection, scope, callbacks, - http1_settings, max_request_headers_kb, - max_request_headers_count); + return std::make_unique( + connection, scope, callbacks, http1_settings, max_request_headers_kb, + max_request_headers_count, headers_with_underscores_action); } } diff --git a/source/common/http/conn_manager_utility.h b/source/common/http/conn_manager_utility.h index ee67111c8de8f..6990a01e7104e 100644 --- a/source/common/http/conn_manager_utility.h +++ b/source/common/http/conn_manager_utility.h @@ -37,7 +37,9 @@ class ConnectionManagerUtility { autoCreateCodec(Network::Connection& connection, const Buffer::Instance& data, ServerConnectionCallbacks& callbacks, Stats::Scope& scope, const Http1Settings& http1_settings, const Http2Settings& http2_settings, - uint32_t max_request_headers_kb, uint32_t max_request_headers_count); + uint32_t max_request_headers_kb, uint32_t max_request_headers_count, + envoy::api::v2::core::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action); /** * Mutates request headers in various ways. This functionality is broken out because of its diff --git a/source/common/http/header_utility.cc b/source/common/http/header_utility.cc index 7bb74e0ea9324..76645b77d5d6c 100644 --- a/source/common/http/header_utility.cc +++ b/source/common/http/header_utility.cc @@ -130,11 +130,15 @@ bool HeaderUtility::matchHeaders(const HeaderMap& request_headers, const HeaderD return match != header_data.invert_match_; } -bool HeaderUtility::headerIsValid(const absl::string_view header_value) { +bool HeaderUtility::headerValueIsValid(const absl::string_view header_value) { return (nghttp2_check_header_value(reinterpret_cast(header_value.data()), header_value.size()) != 0); } +bool HeaderUtility::headerNameContainsUnderscore(const absl::string_view header_name) { + return header_name.find('_') != absl::string_view::npos; +} + bool HeaderUtility::authorityIsValid(const absl::string_view header_value) { return (nghttp2_check_authority(reinterpret_cast(header_value.data()), header_value.size()) != 0); diff --git a/source/common/http/header_utility.h b/source/common/http/header_utility.h index aa2a92e3ece30..3b7c5fa0729f4 100644 --- a/source/common/http/header_utility.h +++ b/source/common/http/header_utility.h @@ -94,7 +94,16 @@ class HeaderUtility { * http://tools.ietf.org/html/rfc7230#section-3.2 * @return bool true if the header values are valid, according to the aforementioned RFC. */ - static bool headerIsValid(const absl::string_view header_value); + static bool headerValueIsValid(const absl::string_view header_value); + + /** + * Checks if header name contains underscore characters. + * Underscore character is allowed in header names by the RFC-7230 and this check is implemented + * as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by + * default allows headers with underscore characters. + * @return bool true if header name contains underscore characters. + */ + static bool headerNameContainsUnderscore(const absl::string_view header_name); /** * Validates that the characters in the authority are valid. diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index ac1826a534b41..cf718cec06b46 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -404,6 +404,8 @@ ConnectionImpl::ConnectionImpl(Network::Connection& connection, Stats::Scope& st void ConnectionImpl::completeLastHeader() { ENVOY_CONN_LOG(trace, "completed header: key={} value={}", connection_, current_header_field_.getStringView(), current_header_value_.getStringView()); + + checkHeaderNameForUnderscores(); if (!current_header_field_.empty()) { toLowerTable().toLowerCase(current_header_field_.buffer(), current_header_field_.size()); current_header_map_->addViaMove(std::move(current_header_field_), @@ -421,6 +423,20 @@ void ConnectionImpl::completeLastHeader() { ASSERT(current_header_value_.empty()); } +uint32_t ConnectionImpl::getHeadersSize() { + return current_header_field_.size() + current_header_value_.size() + + (current_header_map_->byteSize() ? *current_header_map_->byteSize() : 0); +} + +void ConnectionImpl::checkMaxHeadersSize() { + const uint32_t total = getHeadersSize(); + if (total > (max_headers_kb_ * 1024)) { + error_code_ = Http::Code::RequestHeaderFieldsTooLarge; + sendProtocolError(); + throw CodecProtocolException("headers size exceeds limit"); + } +} + bool ConnectionImpl::maybeDirectDispatch(Buffer::Instance& data) { if (!handling_upgrade_) { // Only direct dispatch for Upgrade requests. @@ -492,6 +508,8 @@ void ConnectionImpl::onHeaderField(const char* data, size_t length) { } current_header_field_.append(data, length); + + checkMaxHeadersSize(); } void ConnectionImpl::onHeaderValue(const char* data, size_t length) { @@ -505,7 +523,7 @@ void ConnectionImpl::onHeaderValue(const char* data, size_t length) { const absl::string_view header_value = StringUtil::trim(absl::string_view(data, length)); if (strict_header_validation_) { - if (!Http::HeaderUtility::headerIsValid(header_value)) { + if (!Http::HeaderUtility::headerValueIsValid(header_value)) { ENVOY_CONN_LOG(debug, "invalid header value: {}", connection_, header_value); error_code_ = Http::Code::BadRequest; sendProtocolError(); @@ -522,16 +540,7 @@ void ConnectionImpl::onHeaderValue(const char* data, size_t length) { header_parsing_state_ = HeaderParsingState::Value; current_header_value_.append(header_value.data(), header_value.length()); - // Verify that the cached value in byte size exists. - ASSERT(current_header_map_->byteSize().has_value()); - const uint32_t total = current_header_field_.size() + current_header_value_.size() + - current_header_map_->byteSize().value(); - if (total > (max_headers_kb_ * 1024)) { - - error_code_ = Http::Code::RequestHeaderFieldsTooLarge; - sendProtocolError(); - throw CodecProtocolException("headers size exceeds limit"); - } + checkMaxHeadersSize(); } int ConnectionImpl::onHeadersCompleteBase() { @@ -610,10 +619,12 @@ void ConnectionImpl::onResetStreamBase(StreamResetReason reason) { onResetStream(reason); } -ServerConnectionImpl::ServerConnectionImpl(Network::Connection& connection, Stats::Scope& stats, - ServerConnectionCallbacks& callbacks, - Http1Settings settings, uint32_t max_request_headers_kb, - const uint32_t max_request_headers_count) +ServerConnectionImpl::ServerConnectionImpl( + Network::Connection& connection, Stats::Scope& stats, ServerConnectionCallbacks& callbacks, + Http1Settings settings, uint32_t max_request_headers_kb, + const uint32_t max_request_headers_count, + envoy::api::v2::core::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action) : ConnectionImpl(connection, stats, HTTP_REQUEST, max_request_headers_kb, max_request_headers_count, formatter(settings)), callbacks_(callbacks), codec_settings_(settings), @@ -627,7 +638,14 @@ ServerConnectionImpl::ServerConnectionImpl(Network::Connection& connection, Stat max_outbound_responses_( Runtime::getInteger("envoy.do_not_use_going_away_max_http2_outbound_responses", 2)), flood_protection_( - Runtime::runtimeFeatureEnabled("envoy.reloadable_features.http1_flood_protection")) {} + Runtime::runtimeFeatureEnabled("envoy.reloadable_features.http1_flood_protection")), + headers_with_underscores_action_(headers_with_underscores_action) {} + +uint32_t ServerConnectionImpl::getHeadersSize() { + // Add in the the size of the request URL if processing request headers. + const uint32_t url_size = active_request_ ? active_request_->request_url_.size() : 0; + return url_size + ConnectionImpl::getHeadersSize(); +} void ServerConnectionImpl::onEncodeComplete() { ASSERT(active_request_); @@ -736,6 +754,8 @@ void ServerConnectionImpl::onMessageBegin() { void ServerConnectionImpl::onUrl(const char* data, size_t length) { if (active_request_) { active_request_->request_url_.append(data, length); + + checkMaxHeadersSize(); } } @@ -806,6 +826,27 @@ void ServerConnectionImpl::releaseOutboundResponse( delete fragment; } +void ServerConnectionImpl::checkHeaderNameForUnderscores() { + if (headers_with_underscores_action_ != envoy::api::v2::core::HttpProtocolOptions::ALLOW && + Http::HeaderUtility::headerNameContainsUnderscore(current_header_field_.getStringView())) { + if (headers_with_underscores_action_ == + envoy::api::v2::core::HttpProtocolOptions::DROP_HEADER) { + ENVOY_CONN_LOG(debug, "Dropping header with invalid characters in its name: {}", connection_, + current_header_field_.getStringView()); + stats_.dropped_headers_with_underscores_.inc(); + current_header_field_.clear(); + current_header_value_.clear(); + } else { + ENVOY_CONN_LOG(debug, "Rejecting request due to header name with underscores: {}", + connection_, current_header_field_.getStringView()); + error_code_ = Http::Code::BadRequest; + sendProtocolError(); + stats_.requests_rejected_with_underscores_in_headers_.inc(); + throw CodecProtocolException("http/1.1 protocol error: header name contains underscores"); + } + } +} + ClientConnectionImpl::ClientConnectionImpl(Network::Connection& connection, Stats::Scope& stats, ConnectionCallbacks&, const Http1Settings& settings, const uint32_t max_response_headers_count) diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index 714f54ca74426..0c85f48c92941 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -8,6 +8,7 @@ #include #include +#include "envoy/api/v2/core/protocol.pb.h" #include "envoy/http/codec.h" #include "envoy/network/connection.h" #include "envoy/stats/scope.h" @@ -28,7 +29,9 @@ namespace Http1 { * All stats for the HTTP/1 codec. @see stats_macros.h */ #define ALL_HTTP1_CODEC_STATS(COUNTER) \ + COUNTER(dropped_headers_with_underscores) \ COUNTER(metadata_not_supported_error) \ + COUNTER(requests_rejected_with_underscores_in_headers) \ COUNTER(response_flood) /** @@ -62,6 +65,11 @@ class StreamEncoderImpl : public StreamEncoder, void resetStream(StreamResetReason reason) override; void readDisable(bool disable) override; uint32_t bufferLimit() override; + void setFlushTimeout(std::chrono::milliseconds) override { + // HTTP/1 has one stream per connection, thus any data encoded is immediately written to the + // connection, invoking any watermarks as necessary. There is no internal buffering that would + // require a flush timeout not already covered by other timeouts. + } void isResponseToHeadRequest(bool value) { is_response_to_head_request_ = value; } @@ -206,6 +214,20 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable; ServerConnectionImpl(Network::Connection& connection, Stats::Scope& stats, ServerConnectionCallbacks& callbacks, Http1Settings settings, - uint32_t max_request_headers_kb, const uint32_t max_request_headers_count); + uint32_t max_request_headers_kb, const uint32_t max_request_headers_count, + envoy::api::v2::core::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action); bool supports_http_10() override { return codec_settings_.accept_http_10_; } @@ -344,6 +386,9 @@ class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { ResponseStreamEncoderImpl response_encoder_; bool remote_complete_{}; }; + // ConnectionImpl + // Add the size of the request_url to the reported header size when processing request headers. + uint32_t getHeadersSize() override; /** * Manipulate the request's first line, parsing the url and converting to a relative path if @@ -371,6 +416,7 @@ class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { void releaseOutboundResponse(const Buffer::OwnedBufferFragmentImpl* fragment); void maybeAddSentinelBufferFragment(Buffer::WatermarkBuffer& output_buffer) override; void doFloodProtectionChecks() const; + void checkHeaderNameForUnderscores() override; ServerConnectionCallbacks& callbacks_; std::function flood_checks_{[&]() { this->doFloodProtectionChecks(); }}; @@ -383,6 +429,9 @@ class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { // we could make this configurable. uint32_t max_outbound_responses_{}; bool flood_protection_{}; + // The action to take when a request header name contains underscore characters. + const envoy::api::v2::core::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action_; }; /** diff --git a/source/common/http/http2/BUILD b/source/common/http/http2/BUILD index ba48dc67903fb..240d4824360a6 100644 --- a/source/common/http/http2/BUILD +++ b/source/common/http/http2/BUILD @@ -38,6 +38,7 @@ envoy_cc_library( "//source/common/http:codes_lib", "//source/common/http:exception_lib", "//source/common/http:header_map_lib", + "//source/common/http:header_utility_lib", "//source/common/http:headers_lib", "//source/common/http:utility_lib", "//source/common/runtime:runtime_lib", diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index 4a756b58bcb2f..9fc191da0883b 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -18,6 +18,7 @@ #include "common/common/utility.h" #include "common/http/codes.h" #include "common/http/exception.h" +#include "common/http/header_utility.h" #include "common/http/headers.h" namespace Envoy { @@ -55,11 +56,20 @@ ConnectionImpl::StreamImpl::StreamImpl(ConnectionImpl& parent, uint32_t buffer_l waiting_for_non_informational_headers_(false), pending_receive_buffer_high_watermark_called_(false), pending_send_buffer_high_watermark_called_(false), reset_due_to_messaging_error_(false) { + parent_.stats_.streams_active_.inc(); if (buffer_limit > 0) { setWriteBufferWatermarks(buffer_limit / 2, buffer_limit); } } +ConnectionImpl::StreamImpl::~StreamImpl() { ASSERT(stream_idle_timer_ == nullptr); } + +void ConnectionImpl::StreamImpl::destroy() { + disarmStreamIdleTimer(); + parent_.stats_.streams_active_.dec(); + parent_.stats_.pending_send_bytes_.sub(pending_send_data_.length()); +} + static void insertHeader(std::vector& headers, const HeaderEntry& header) { uint8_t flags = 0; if (header.key().type() == HeaderString::Type::Reference) { @@ -129,6 +139,7 @@ void ConnectionImpl::StreamImpl::encodeTrailers(const HeaderMap& trailers) { // waiting on window updates. We need to save the trailers so that we can emit them later. ASSERT(!pending_trailers_); pending_trailers_ = std::make_unique(trailers); + createPendingFlushTimer(); } else { submitTrailers(trailers); parent_.sendPendingFrames(); @@ -261,6 +272,7 @@ int ConnectionImpl::StreamImpl::onDataSourceSend(const uint8_t* framehd, size_t return NGHTTP2_ERR_FLOODED; } + parent_.stats_.pending_send_bytes_.sub(length); output.move(pending_send_data_, length); parent_.connection_.write(output, false); return 0; @@ -282,9 +294,30 @@ void ConnectionImpl::ServerStreamImpl::submitHeaders(const std::vector 0) { + stream_idle_timer_ = + parent_.connection_.dispatcher().createTimer([this] { onPendingFlushTimer(); }); + stream_idle_timer_->enableTimer(stream_idle_timeout_); + } +} + +void ConnectionImpl::StreamImpl::onPendingFlushTimer() { + ENVOY_CONN_LOG(debug, "pending stream flush timeout", parent_.connection_); + stream_idle_timer_.reset(); + parent_.stats_.tx_flush_timeout_.inc(); + ASSERT(local_end_stream_ && !local_end_stream_sent_); + // This will emit a reset frame for this stream and close the stream locally. No reset callbacks + // will be run because higher layers think the stream is already finished. + resetStreamWorker(StreamResetReason::LocalReset); + parent_.sendPendingFrames(); +} + void ConnectionImpl::StreamImpl::encodeData(Buffer::Instance& data, bool end_stream) { ASSERT(!local_end_stream_); local_end_stream_ = end_stream; + parent_.stats_.pending_send_bytes_.add(data.length()); pending_send_data_.move(data); if (data_deferred_) { int rc = nghttp2_session_resume_data(parent_.session_, stream_id_); @@ -294,6 +327,9 @@ void ConnectionImpl::StreamImpl::encodeData(Buffer::Instance& data, bool end_str } parent_.sendPendingFrames(); + if (local_end_stream_ && pending_send_data_.length() > 0) { + createPendingFlushTimer(); + } } void ConnectionImpl::StreamImpl::resetStream(StreamResetReason reason) { @@ -372,22 +408,20 @@ bool checkRuntimeOverride(bool config_value, const char* override_key) { ConnectionImpl::ConnectionImpl(Network::Connection& connection, Stats::Scope& stats, const Http2Settings& http2_settings, const uint32_t max_headers_kb, const uint32_t max_headers_count) - : stats_{ALL_HTTP2_CODEC_STATS(POOL_COUNTER_PREFIX(stats, "http2."))}, connection_(connection), - max_headers_kb_(max_headers_kb), max_headers_count_(max_headers_count), + : stats_{ALL_HTTP2_CODEC_STATS(POOL_COUNTER_PREFIX(stats, "http2."), + POOL_GAUGE_PREFIX(stats, "http2."))}, + connection_(connection), max_headers_kb_(max_headers_kb), + max_headers_count_(max_headers_count), per_stream_buffer_limit_(http2_settings.initial_stream_window_size_), stream_error_on_invalid_http_messaging_(checkRuntimeOverride( http2_settings.stream_error_on_invalid_http_messaging_, InvalidHttpMessagingOverrideKey)), flood_detected_(false), max_outbound_frames_( Runtime::getInteger(MaxOutboundFramesOverrideKey, http2_settings.max_outbound_frames_)), - frame_buffer_releasor_([this](const Buffer::OwnedBufferFragmentImpl* fragment) { - releaseOutboundFrame(fragment); - }), + frame_buffer_releasor_([this]() { releaseOutboundFrame(); }), max_outbound_control_frames_(Runtime::getInteger( MaxOutboundControlFramesOverrideKey, http2_settings.max_outbound_control_frames_)), - control_frame_buffer_releasor_([this](const Buffer::OwnedBufferFragmentImpl* fragment) { - releaseOutboundControlFrame(fragment); - }), + control_frame_buffer_releasor_([this]() { releaseOutboundControlFrame(); }), max_consecutive_inbound_frames_with_empty_payload_( Runtime::getInteger(MaxConsecutiveInboundFramesWithEmptyPayloadOverrideKey, http2_settings.max_consecutive_inbound_frames_with_empty_payload_)), @@ -399,7 +433,12 @@ ConnectionImpl::ConnectionImpl(Network::Connection& connection, Stats::Scope& st http2_settings.max_inbound_window_update_frames_per_data_frame_sent_)), dispatching_(false), raised_goaway_(false), pending_deferred_reset_(false) {} -ConnectionImpl::~ConnectionImpl() { nghttp2_session_del(session_); } +ConnectionImpl::~ConnectionImpl() { + for (const auto& stream : active_streams_) { + stream->destroy(); + } + nghttp2_session_del(session_); +} void ConnectionImpl::dispatch(Buffer::Instance& data) { ENVOY_CONN_LOG(trace, "dispatching {} bytes", connection_, data.length()); @@ -610,6 +649,15 @@ int ConnectionImpl::onFrameSend(const nghttp2_frame* frame) { case NGHTTP2_GOAWAY: { ENVOY_CONN_LOG(debug, "sent goaway code={}", connection_, frame->goaway.error_code); if (frame->goaway.error_code != NGHTTP2_NO_ERROR) { + // TODO(mattklein123): Returning this error code abandons standard nghttp2 frame accounting. + // As such, it is not reliable to call sendPendingFrames() again after this and we assume + // that the connection is going to get torn down immediately. One byproduct of this is that + // we need to cancel all pending flush stream timeouts since they can race with connection + // teardown. As part of the work to remove exceptions we should aim to clean up all of this + // error handling logic and only handle this type of case at the end of dispatch. + for (auto& stream : active_streams_) { + stream->disarmStreamIdleTimer(); + } return NGHTTP2_ERR_CALLBACK_FAILURE; } break; @@ -693,27 +741,21 @@ bool ConnectionImpl::addOutboundFrameFragment(Buffer::OwnedImpl& output, const u return false; } - auto fragment = Buffer::OwnedBufferFragmentImpl::create( - absl::string_view(reinterpret_cast(data), length), - is_outbound_flood_monitored_control_frame ? control_frame_buffer_releasor_ - : frame_buffer_releasor_); - - // The Buffer::OwnedBufferFragmentImpl object will be deleted in the *frame_buffer_releasor_ - // callback. - output.addBufferFragment(*fragment.release()); + output.add(data, length); + output.addDrainTracker(is_outbound_flood_monitored_control_frame ? control_frame_buffer_releasor_ + : frame_buffer_releasor_); return true; } -void ConnectionImpl::releaseOutboundFrame(const Buffer::OwnedBufferFragmentImpl* fragment) { +void ConnectionImpl::releaseOutboundFrame() { ASSERT(outbound_frames_ >= 1); --outbound_frames_; - delete fragment; } -void ConnectionImpl::releaseOutboundControlFrame(const Buffer::OwnedBufferFragmentImpl* fragment) { +void ConnectionImpl::releaseOutboundControlFrame() { ASSERT(outbound_control_frames_ >= 1); --outbound_control_frames_; - releaseOutboundFrame(fragment); + releaseOutboundFrame(); } ssize_t ConnectionImpl::onSend(const uint8_t* data, size_t length) { @@ -759,6 +801,7 @@ int ConnectionImpl::onStreamClose(int32_t stream_id, uint32_t error_code) { stream->runResetCallbacks(reason); } + stream->destroy(); connection_.dispatcher().deferredDelete(stream->removeFromList(active_streams_)); // Any unconsumed data must be consumed before the stream is deleted. // nghttp2 does not appear to track this internally, and any stream deleted @@ -818,6 +861,14 @@ int ConnectionImpl::saveHeader(const nghttp2_frame* frame, HeaderString&& name, stats_.headers_cb_no_stream_.inc(); return 0; } + + auto should_return = checkHeaderNameForUnderscores(name.getStringView()); + if (should_return) { + name.clear(); + value.clear(); + return should_return.value(); + } + stream->saveHeader(std::move(name), std::move(value)); // Verify that the cached value in byte size exists. @@ -1138,14 +1189,15 @@ int ClientConnectionImpl::onHeader(const nghttp2_frame* frame, HeaderString&& na return saveHeader(frame, std::move(name), std::move(value)); } -ServerConnectionImpl::ServerConnectionImpl(Network::Connection& connection, - Http::ServerConnectionCallbacks& callbacks, - Stats::Scope& scope, const Http2Settings& http2_settings, - const uint32_t max_request_headers_kb, - const uint32_t max_request_headers_count) +ServerConnectionImpl::ServerConnectionImpl( + Network::Connection& connection, Http::ServerConnectionCallbacks& callbacks, + Stats::Scope& scope, const Http2Settings& http2_settings, const uint32_t max_request_headers_kb, + const uint32_t max_request_headers_count, + envoy::api::v2::core::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action) : ConnectionImpl(connection, scope, http2_settings, max_request_headers_kb, max_request_headers_count), - callbacks_(callbacks) { + callbacks_(callbacks), headers_with_underscores_action_(headers_with_underscores_action) { Http2Options http2_options(http2_settings); nghttp2_session_server_new2(&session_, http2_callbacks_.callbacks(), base(), http2_options.options()); @@ -1292,6 +1344,25 @@ void ServerConnectionImpl::dispatch(Buffer::Instance& data) { ConnectionImpl::dispatch(data); } +absl::optional +ServerConnectionImpl::checkHeaderNameForUnderscores(absl::string_view header_name) { + if (headers_with_underscores_action_ != envoy::api::v2::core::HttpProtocolOptions::ALLOW && + Http::HeaderUtility::headerNameContainsUnderscore(header_name)) { + if (headers_with_underscores_action_ == + envoy::api::v2::core::HttpProtocolOptions::DROP_HEADER) { + ENVOY_CONN_LOG(debug, "Dropping header with invalid characters in its name: {}", connection_, + header_name); + stats_.dropped_headers_with_underscores_.inc(); + return 0; + } + ENVOY_CONN_LOG(debug, "Rejecting request due to header name with underscores: {}", connection_, + header_name); + stats_.requests_rejected_with_underscores_in_headers_.inc(); + return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE; + } + return absl::nullopt; +} + } // namespace Http2 } // namespace Http } // namespace Envoy diff --git a/source/common/http/http2/codec_impl.h b/source/common/http/http2/codec_impl.h index 492893160a036..9fb56887537f8 100644 --- a/source/common/http/http2/codec_impl.h +++ b/source/common/http/http2/codec_impl.h @@ -7,6 +7,7 @@ #include #include +#include "envoy/api/v2/core/protocol.pb.h" #include "envoy/event/deferred_deletable.h" #include "envoy/http/codec.h" #include "envoy/network/connection.h" @@ -39,7 +40,8 @@ const std::string CLIENT_MAGIC_PREFIX = "PRI * HTTP/2"; /** * All stats for the HTTP/2 codec. @see stats_macros.h */ -#define ALL_HTTP2_CODEC_STATS(COUNTER) \ +#define ALL_HTTP2_CODEC_STATS(COUNTER, GAUGE) \ + COUNTER(dropped_headers_with_underscores) \ COUNTER(header_overflow) \ COUNTER(headers_cb_no_stream) \ COUNTER(inbound_empty_frames_flood) \ @@ -47,17 +49,21 @@ const std::string CLIENT_MAGIC_PREFIX = "PRI * HTTP/2"; COUNTER(inbound_window_update_frames_flood) \ COUNTER(outbound_control_flood) \ COUNTER(outbound_flood) \ + COUNTER(requests_rejected_with_underscores_in_headers) \ COUNTER(rx_messaging_error) \ COUNTER(rx_reset) \ COUNTER(too_many_header_frames) \ COUNTER(trailers) \ - COUNTER(tx_reset) + COUNTER(tx_flush_timeout) \ + COUNTER(tx_reset) \ + GAUGE(streams_active, Accumulate) \ + GAUGE(pending_send_bytes, Accumulate) /** * Wrapper struct for the HTTP/2 codec stats. @see stats_macros.h */ struct CodecStats { - ALL_HTTP2_CODEC_STATS(GENERATE_COUNTER_STRUCT) + ALL_HTTP2_CODEC_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT) }; class Utility { @@ -147,6 +153,18 @@ class ConnectionImpl : public virtual Connection, protected Logger::LoggabledisableTimer(); + stream_idle_timer_.reset(); + } + } StreamImpl* base() { return this; } ssize_t onDataSourceRead(uint64_t length, uint32_t* data_flags); @@ -158,6 +176,8 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable; @@ -250,6 +276,10 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable checkHeaderNameForUnderscores(absl::string_view /* header_name */) { + return absl::nullopt; + } + static Http2Callbacks http2_callbacks_; std::list active_streams_; @@ -306,7 +349,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable frame_buffer_releasor_; // This counter keeps track of the number of outbound frames of types PING, SETTINGS and // RST_STREAM (these that were buffered in the underlying connection but not yet written into the // socket). If this counter exceeds the `max_outbound_control_frames_' value the connection is @@ -315,7 +358,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable control_frame_buffer_releasor_; // This counter keeps track of the number of consecutive inbound frames of types HEADERS, // CONTINUATION and DATA with an empty payload and no end stream flag. If this counter exceeds // the `max_consecutive_inbound_frames_with_empty_payload_` value the connection is terminated. @@ -380,9 +423,8 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable checkHeaderNameForUnderscores(absl::string_view header_name) override; // Http::Connection // The reason for overriding the dispatch method is to do flood mitigation only when @@ -455,6 +500,10 @@ class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { // This flag indicates that downstream data is being dispatched and turns on flood mitigation // in the checkMaxOutbound*Framed methods. bool dispatching_downstream_data_{false}; + + // The action to take when a request header name contains underscore characters. + envoy::api::v2::core::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action_; }; } // namespace Http2 diff --git a/source/common/http/path_utility.cc b/source/common/http/path_utility.cc index 49372fc50dbf8..78c32e35532ca 100644 --- a/source/common/http/path_utility.cc +++ b/source/common/http/path_utility.cc @@ -6,7 +6,6 @@ #include "absl/strings/str_join.h" #include "absl/strings/str_split.h" -#include "absl/strings/string_view.h" #include "absl/types/optional.h" namespace Envoy { @@ -69,5 +68,15 @@ void PathUtil::mergeSlashes(HeaderEntry& path_header) { prefix, absl::StrJoin(absl::StrSplit(path, '/', absl::SkipEmpty()), "/"), query, suffix)); } +absl::string_view PathUtil::removeQueryAndFragment(const absl::string_view path) { + absl::string_view ret = path; + // Trim query parameters and/or fragment if present. + size_t offset = ret.find_first_of("?#"); + if (offset != absl::string_view::npos) { + ret.remove_suffix(ret.length() - offset); + } + return ret; +} + } // namespace Http } // namespace Envoy diff --git a/source/common/http/path_utility.h b/source/common/http/path_utility.h index a588f39de46eb..6bf16d3f4e0ee 100644 --- a/source/common/http/path_utility.h +++ b/source/common/http/path_utility.h @@ -2,6 +2,8 @@ #include "envoy/http/header_map.h" +#include "absl/strings/string_view.h" + namespace Envoy { namespace Http { @@ -15,6 +17,9 @@ class PathUtil { static bool canonicalPath(HeaderEntry& path_header); // Merges two or more adjacent slashes in path part of URI into one. static void mergeSlashes(HeaderEntry& path_header); + // Removes the query and/or fragment string (if present) from the input path. + // For example, this function returns "/data" for the input path "/data#fragment?param=value". + static absl::string_view removeQueryAndFragment(const absl::string_view path); }; } // namespace Http diff --git a/source/common/network/BUILD b/source/common/network/BUILD index 69f75b87d504a..7857da3beb7f5 100644 --- a/source/common/network/BUILD +++ b/source/common/network/BUILD @@ -178,6 +178,7 @@ envoy_cc_library( "//include/envoy/event:dispatcher_interface", "//include/envoy/event:file_event_interface", "//include/envoy/network:listener_interface", + "//include/envoy/runtime:runtime_interface", "//include/envoy/stats:stats_interface", "//include/envoy/stats:stats_macros", "//source/common/buffer:buffer_lib", diff --git a/source/common/network/listen_socket_impl.cc b/source/common/network/listen_socket_impl.cc index dc9e8991805c7..0d0c23deadad0 100644 --- a/source/common/network/listen_socket_impl.cc +++ b/source/common/network/listen_socket_impl.cc @@ -72,5 +72,7 @@ UdsListenSocket::UdsListenSocket(IoHandlePtr&& io_handle, const Address::InstanceConstSharedPtr& address) : ListenSocketImpl(std::move(io_handle), address) {} +std::atomic AcceptedSocketImpl::global_accepted_socket_count_; + } // namespace Network } // namespace Envoy diff --git a/source/common/network/listen_socket_impl.h b/source/common/network/listen_socket_impl.h index a5247e68f1923..d74d2d2e3785a 100644 --- a/source/common/network/listen_socket_impl.h +++ b/source/common/network/listen_socket_impl.h @@ -170,7 +170,21 @@ class AcceptedSocketImpl : public ConnectionSocketImpl { public: AcceptedSocketImpl(IoHandlePtr&& io_handle, const Address::InstanceConstSharedPtr& local_address, const Address::InstanceConstSharedPtr& remote_address) - : ConnectionSocketImpl(std::move(io_handle), local_address, remote_address) {} + : ConnectionSocketImpl(std::move(io_handle), local_address, remote_address) { + ++global_accepted_socket_count_; + } + + ~AcceptedSocketImpl() override { + ASSERT(global_accepted_socket_count_.load() > 0); + --global_accepted_socket_count_; + } + + // TODO (tonya11en): Global connection count tracking is temporarily performed via a static + // variable until the logic is moved into the overload manager. + static uint64_t acceptedSocketCount() { return global_accepted_socket_count_.load(); } + +private: + static std::atomic global_accepted_socket_count_; }; // ConnectionSocket used with client connections. diff --git a/source/common/network/listener_impl.cc b/source/common/network/listener_impl.cc index bd0679464cc37..26efba21dd189 100644 --- a/source/common/network/listener_impl.cc +++ b/source/common/network/listener_impl.cc @@ -17,6 +17,30 @@ namespace Envoy { namespace Network { +const std::string ListenerImpl::GlobalMaxCxRuntimeKey = + "overload.global_downstream_max_connections"; + +bool ListenerImpl::rejectCxOverGlobalLimit() { + // Enforce the global connection limit if necessary, immediately closing the accepted connection. + Runtime::Loader* runtime = Runtime::LoaderSingleton::getExisting(); + + if (runtime == nullptr) { + // The runtime singleton won't exist in most unit tests that do not need global downstream limit + // enforcement. Therefore, there is no need to enforce limits if the singleton doesn't exist. + // TODO(tonya11en): Revisit this once runtime is made globally available. + return false; + } + + // If the connection limit is not set, don't limit the connections, but still track them. + // TODO(tonya11en): In integration tests, threadsafeSnapshot is necessary since the FakeUpstreams + // use a listener and do not run in a worker thread. In practice, this code path will always be + // run on a worker thread, but to prevent failed assertions in test environments, threadsafe + // snapshots must be used. This must be revisited. + const uint64_t global_cx_limit = runtime->threadsafeSnapshot()->getInteger( + GlobalMaxCxRuntimeKey, std::numeric_limits::max()); + return AcceptedSocketImpl::acceptedSocketCount() >= global_cx_limit; +} + void ListenerImpl::listenCallback(evconnlistener*, evutil_socket_t fd, sockaddr* remote_addr, int remote_addr_len, void* arg) { ListenerImpl* listener = static_cast(arg); @@ -24,6 +48,13 @@ void ListenerImpl::listenCallback(evconnlistener*, evutil_socket_t fd, sockaddr* // Create the IoSocketHandleImpl for the fd here. IoHandlePtr io_handle = std::make_unique(fd); + if (rejectCxOverGlobalLimit()) { + // The global connection limit has been reached. + io_handle->close(); + listener->cb_.onReject(); + return; + } + // Get the local address from the new socket if the listener is listening on IP ANY // (e.g., 0.0.0.0 for IPv4) (local_address_ is nullptr in this case). const Address::InstanceConstSharedPtr& local_address = diff --git a/source/common/network/listener_impl.h b/source/common/network/listener_impl.h index f9d932e9dd343..c62c8514d3b8a 100644 --- a/source/common/network/listener_impl.h +++ b/source/common/network/listener_impl.h @@ -1,5 +1,8 @@ #pragma once +#include "envoy/runtime/runtime.h" + +#include "absl/strings/string_view.h" #include "base_listener_impl.h" namespace Envoy { @@ -17,6 +20,8 @@ class ListenerImpl : public BaseListenerImpl { void disable() override; void enable() override; + static const std::string GlobalMaxCxRuntimeKey; + protected: void setupServerSocket(Event::DispatcherImpl& dispatcher, Socket& socket); @@ -27,6 +32,10 @@ class ListenerImpl : public BaseListenerImpl { int remote_addr_len, void* arg); static void errorCallback(evconnlistener* listener, void* context); + // Returns true if global connection limit has been reached and the accepted socket should be + // rejected/closed. If the accepted socket is to be admitted, false is returned. + static bool rejectCxOverGlobalLimit(); + Event::Libevent::ListenerPtr listener_; }; diff --git a/source/common/router/BUILD b/source/common/router/BUILD index 2b259aa04ae7e..b15162f477986 100644 --- a/source/common/router/BUILD +++ b/source/common/router/BUILD @@ -49,6 +49,7 @@ envoy_cc_library( "//source/common/common:assert_lib", "//source/common/common:empty_string", "//source/common/common:hash_lib", + "//source/common/common:matchers_lib", "//source/common/common:utility_lib", "//source/common/config:metadata_lib", "//source/common/config:utility_lib", @@ -56,6 +57,7 @@ envoy_cc_library( "//source/common/http:hash_policy_lib", "//source/common/http:header_utility_lib", "//source/common/http:headers_lib", + "//source/common/http:path_utility_lib", "//source/common/http:utility_lib", "//source/common/protobuf:utility_lib", "//source/extensions/filters/http:well_known_names", diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index 5ca6cf2485de4..cb934b2a62509 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -25,6 +25,7 @@ #include "common/config/utility.h" #include "common/config/well_known_names.h" #include "common/http/headers.h" +#include "common/http/path_utility.h" #include "common/http/utility.h" #include "common/protobuf/protobuf.h" #include "common/protobuf/utility.h" @@ -765,8 +766,8 @@ PrefixRouteEntryImpl::PrefixRouteEntryImpl( const VirtualHostImpl& vhost, const envoy::api::v2::route::Route& route, Server::Configuration::ServerFactoryContext& factory_context, ProtobufMessage::ValidationVisitor& validator) - : RouteEntryImplBase(vhost, route, factory_context, validator), - prefix_(route.match().prefix()) {} + : RouteEntryImplBase(vhost, route, factory_context, validator), prefix_(route.match().prefix()), + path_matcher_(Matchers::PathMatcher::createPrefix(prefix_, !case_sensitive_)) {} void PrefixRouteEntryImpl::rewritePathHeader(Http::HeaderMap& headers, bool insert_envoy_original_path) const { @@ -777,9 +778,7 @@ RouteConstSharedPtr PrefixRouteEntryImpl::matches(const Http::HeaderMap& headers const StreamInfo::StreamInfo& stream_info, uint64_t random_value) const { if (RouteEntryImplBase::matchRoute(headers, stream_info, random_value) && - (case_sensitive_ - ? absl::StartsWith(headers.Path()->value().getStringView(), prefix_) - : absl::StartsWithIgnoreCase(headers.Path()->value().getStringView(), prefix_))) { + path_matcher_->match(headers.Path()->value().getStringView())) { return clusterEntry(headers, random_value); } return nullptr; @@ -789,7 +788,8 @@ PathRouteEntryImpl::PathRouteEntryImpl(const VirtualHostImpl& vhost, const envoy::api::v2::route::Route& route, Server::Configuration::ServerFactoryContext& factory_context, ProtobufMessage::ValidationVisitor& validator) - : RouteEntryImplBase(vhost, route, factory_context, validator), path_(route.match().path()) {} + : RouteEntryImplBase(vhost, route, factory_context, validator), path_(route.match().path()), + path_matcher_(Matchers::PathMatcher::createExact(path_, !case_sensitive_)) {} void PathRouteEntryImpl::rewritePathHeader(Http::HeaderMap& headers, bool insert_envoy_original_path) const { @@ -799,28 +799,9 @@ void PathRouteEntryImpl::rewritePathHeader(Http::HeaderMap& headers, RouteConstSharedPtr PathRouteEntryImpl::matches(const Http::HeaderMap& headers, const StreamInfo::StreamInfo& stream_info, uint64_t random_value) const { - if (RouteEntryImplBase::matchRoute(headers, stream_info, random_value)) { - const Http::HeaderString& path = headers.Path()->value(); - absl::string_view query_string = Http::Utility::findQueryStringStart(path); - size_t compare_length = path.size(); - if (query_string.length() > 0) { - compare_length = compare_length - query_string.length(); - } - - if (compare_length != path_.size()) { - return nullptr; - } - - const absl::string_view path_section = path.getStringView().substr(0, compare_length); - if (case_sensitive_) { - if (absl::string_view(path_) == path_section) { - return clusterEntry(headers, random_value); - } - } else { - if (absl::EqualsIgnoreCase(path_, path_section)) { - return clusterEntry(headers, random_value); - } - } + if (RouteEntryImplBase::matchRoute(headers, stream_info, random_value) && + path_matcher_->match(headers.Path()->value().getStringView())) { + return clusterEntry(headers, random_value); } return nullptr; @@ -841,26 +822,23 @@ RegexRouteEntryImpl::RegexRouteEntryImpl( } } -absl::string_view RegexRouteEntryImpl::pathOnly(const Http::HeaderMap& headers) const { - const Http::HeaderString& path = headers.Path()->value(); - const absl::string_view query_string = Http::Utility::findQueryStringStart(path); - const size_t path_string_length = path.size() - query_string.length(); - return path.getStringView().substr(0, path_string_length); -} - void RegexRouteEntryImpl::rewritePathHeader(Http::HeaderMap& headers, bool insert_envoy_original_path) const { + const absl::string_view path = + Http::PathUtil::removeQueryAndFragment(headers.Path()->value().getStringView()); // TODO(yuval-k): This ASSERT can happen if the path was changed by a filter without clearing the // route cache. We should consider if ASSERT-ing is the desired behavior in this case. - ASSERT(regex_->match(pathOnly(headers))); - finalizePathHeader(headers, pathOnly(headers), insert_envoy_original_path); + ASSERT(regex_->match(path)); + finalizePathHeader(headers, path, insert_envoy_original_path); } RouteConstSharedPtr RegexRouteEntryImpl::matches(const Http::HeaderMap& headers, const StreamInfo::StreamInfo& stream_info, uint64_t random_value) const { if (RouteEntryImplBase::matchRoute(headers, stream_info, random_value)) { - if (regex_->match(pathOnly(headers))) { + const absl::string_view path = + Http::PathUtil::removeQueryAndFragment(headers.Path()->value().getStringView()); + if (regex_->match(path)) { return clusterEntry(headers, random_value); } } diff --git a/source/common/router/config_impl.h b/source/common/router/config_impl.h index ceb6cb3e84c05..abce730bbd14c 100644 --- a/source/common/router/config_impl.h +++ b/source/common/router/config_impl.h @@ -17,6 +17,7 @@ #include "envoy/server/filter_config.h" #include "envoy/upstream/cluster_manager.h" +#include "common/common/matchers.h" #include "common/config/metadata.h" #include "common/http/hash_policy.h" #include "common/http/header_utility.h" @@ -717,6 +718,7 @@ class PrefixRouteEntryImpl : public RouteEntryImplBase { private: const std::string prefix_; + const Matchers::PathMatcherConstSharedPtr path_matcher_; }; /** @@ -742,6 +744,7 @@ class PathRouteEntryImpl : public RouteEntryImplBase { private: const std::string path_; + const Matchers::PathMatcherConstSharedPtr path_matcher_; }; /** @@ -766,8 +769,6 @@ class RegexRouteEntryImpl : public RouteEntryImplBase { void rewritePathHeader(Http::HeaderMap& headers, bool insert_envoy_original_path) const override; private: - absl::string_view pathOnly(const Http::HeaderMap& headers) const; - Regex::CompiledMatcherPtr regex_; std::string regex_str_; }; diff --git a/source/common/upstream/BUILD b/source/common/upstream/BUILD index de28edd2ad295..9f396a8b694e8 100644 --- a/source/common/upstream/BUILD +++ b/source/common/upstream/BUILD @@ -280,6 +280,7 @@ envoy_cc_library( "//include/envoy/upstream:resource_manager_interface", "//include/envoy/upstream:upstream_interface", "//source/common/common:assert_lib", + "//source/common/common:basic_resource_lib", ], ) diff --git a/source/common/upstream/conn_pool_map_impl.h b/source/common/upstream/conn_pool_map_impl.h index 19fa80ce4baa0..a19565564c8f7 100644 --- a/source/common/upstream/conn_pool_map_impl.h +++ b/source/common/upstream/conn_pool_map_impl.h @@ -30,7 +30,7 @@ ConnPoolMap::getPool(KEY_TYPE key, const PoolFactory& facto if (pool_iter != active_pools_.end()) { return std::ref(*(pool_iter->second)); } - Resource& connPoolResource = host_->cluster().resourceManager(priority_).connectionPools(); + ResourceLimit& connPoolResource = host_->cluster().resourceManager(priority_).connectionPools(); // We need a new pool. Check if we have room. if (!connPoolResource.canCreate()) { // We're full. Try to free up a pool. If we can't, bail out. diff --git a/source/common/upstream/resource_manager_impl.h b/source/common/upstream/resource_manager_impl.h index 8ba81886f56f7..b48ba8a42c262 100644 --- a/source/common/upstream/resource_manager_impl.h +++ b/source/common/upstream/resource_manager_impl.h @@ -5,11 +5,13 @@ #include #include +#include "envoy/common/resource.h" #include "envoy/runtime/runtime.h" #include "envoy/upstream/resource_manager.h" #include "envoy/upstream/upstream.h" #include "common/common/assert.h" +#include "common/common/basic_resource_impl.h" namespace Envoy { namespace Upstream { @@ -41,37 +43,34 @@ class ResourceManagerImpl : public ResourceManager { cb_stats.cx_pool_open_, cb_stats.remaining_cx_pools_) {} // Upstream::ResourceManager - Resource& connections() override { return connections_; } - Resource& pendingRequests() override { return pending_requests_; } - Resource& requests() override { return requests_; } - Resource& retries() override { return retries_; } - Resource& connectionPools() override { return connection_pools_; } + ResourceLimit& connections() override { return connections_; } + ResourceLimit& pendingRequests() override { return pending_requests_; } + ResourceLimit& requests() override { return requests_; } + ResourceLimit& retries() override { return retries_; } + ResourceLimit& connectionPools() override { return connection_pools_; } private: - struct ResourceImpl : public Resource { - ResourceImpl(uint64_t max, Runtime::Loader& runtime, const std::string& runtime_key, - Stats::Gauge& open_gauge, Stats::Gauge& remaining) - : max_(max), runtime_(runtime), runtime_key_(runtime_key), open_gauge_(open_gauge), + struct ManagedResourceImpl : public BasicResourceLimitImpl { + ManagedResourceImpl(uint64_t max, Runtime::Loader& runtime, const std::string& runtime_key, + Stats::Gauge& open_gauge, Stats::Gauge& remaining) + : BasicResourceLimitImpl(max, runtime, runtime_key), open_gauge_(open_gauge), remaining_(remaining) { remaining_.set(max); } - ~ResourceImpl() override { ASSERT(current_ == 0); } - // Upstream::Resource - bool canCreate() override { return current_ < max(); } + ~ManagedResourceImpl() override { ASSERT(count() == 0); } + void inc() override { - current_++; + BasicResourceLimitImpl::inc(); updateRemaining(); - open_gauge_.set(canCreate() ? 0 : 1); + open_gauge_.set(BasicResourceLimitImpl::canCreate() ? 0 : 1); } - void dec() override { decBy(1); } + void decBy(uint64_t amount) override { - ASSERT(current_ >= amount); - current_ -= amount; + BasicResourceLimitImpl::decBy(amount); updateRemaining(); - open_gauge_.set(canCreate() ? 0 : 1); + open_gauge_.set(BasicResourceLimitImpl::canCreate() ? 0 : 1); } - uint64_t max() override { return runtime_.snapshot().getInteger(runtime_key_, max_); } /** * We set the gauge instead of incrementing and decrementing because, @@ -87,11 +86,6 @@ class ResourceManagerImpl : public ResourceManager { remaining_.set(max() > current_copy ? max() - current_copy : 0); } - const uint64_t max_; - std::atomic current_{}; - Runtime::Loader& runtime_; - const std::string runtime_key_; - /** * A gauge to notify the live circuit breaker state. The gauge is set to 0 * to notify that the circuit breaker is closed, or to 1 to notify that it @@ -105,11 +99,11 @@ class ResourceManagerImpl : public ResourceManager { Stats::Gauge& remaining_; }; - ResourceImpl connections_; - ResourceImpl pending_requests_; - ResourceImpl requests_; - ResourceImpl retries_; - ResourceImpl connection_pools_; + ManagedResourceImpl connections_; + ManagedResourceImpl pending_requests_; + ManagedResourceImpl requests_; + ManagedResourceImpl retries_; + ManagedResourceImpl connection_pools_; }; using ResourceManagerImplPtr = std::unique_ptr; diff --git a/source/extensions/filters/common/rbac/matchers.cc b/source/extensions/filters/common/rbac/matchers.cc index 271d54c29523e..2e509def55270 100644 --- a/source/extensions/filters/common/rbac/matchers.cc +++ b/source/extensions/filters/common/rbac/matchers.cc @@ -28,6 +28,8 @@ MatcherConstSharedPtr Matcher::create(const envoy::config::rbac::v2::Permission& return std::make_shared(permission.not_rule()); case envoy::config::rbac::v2::Permission::RuleCase::kRequestedServerName: return std::make_shared(permission.requested_server_name()); + case envoy::config::rbac::v2::Permission::RuleCase::kUrlPath: + return std::make_shared(permission.url_path()); default: NOT_REACHED_GCOVR_EXCL_LINE; } @@ -51,6 +53,8 @@ MatcherConstSharedPtr Matcher::create(const envoy::config::rbac::v2::Principal& return std::make_shared(principal.metadata()); case envoy::config::rbac::v2::Principal::IdentifierCase::kNotId: return std::make_shared(principal.not_id()); + case envoy::config::rbac::v2::Principal::IdentifierCase::kUrlPath: + return std::make_shared(principal.url_path()); default: NOT_REACHED_GCOVR_EXCL_LINE; } @@ -177,6 +181,14 @@ bool RequestedServerNameMatcher::matches(const Network::Connection& connection, return match(connection.requestedServerName()); } +bool PathMatcher::matches(const Network::Connection&, const Envoy::Http::HeaderMap& headers, + const StreamInfo::StreamInfo&) const { + if (headers.Path() == nullptr) { + return false; + } + return path_matcher_.match(headers.Path()->value().getStringView()); +} + } // namespace RBAC } // namespace Common } // namespace Filters diff --git a/source/extensions/filters/common/rbac/matchers.h b/source/extensions/filters/common/rbac/matchers.h index 8b2ef2bf10766..00a467c6cd0a7 100644 --- a/source/extensions/filters/common/rbac/matchers.h +++ b/source/extensions/filters/common/rbac/matchers.h @@ -6,6 +6,8 @@ #include "envoy/config/rbac/v2/rbac.pb.h" #include "envoy/http/header_map.h" #include "envoy/network/connection.h" +#include "envoy/type/matcher/path.pb.h" +#include "envoy/type/matcher/string.pb.h" #include "common/common/matchers.h" #include "common/http/header_utility.h" @@ -226,6 +228,22 @@ class RequestedServerNameMatcher : public Matcher, Envoy::Matchers::StringMatche const StreamInfo::StreamInfo&) const override; }; +/** + * Perform a match against the path header on the HTTP request. The query and fragment string are + * removed from the path header before matching. + */ +class PathMatcher : public Matcher { +public: + PathMatcher(const envoy::type::matcher::PathMatcher& path_matcher) + : path_matcher_(path_matcher) {} + + bool matches(const Network::Connection& connection, const Envoy::Http::HeaderMap& headers, + const StreamInfo::StreamInfo&) const override; + +private: + const Matchers::PathMatcher path_matcher_; +}; + } // namespace RBAC } // namespace Common } // namespace Filters diff --git a/source/extensions/filters/http/jwt_authn/BUILD b/source/extensions/filters/http/jwt_authn/BUILD index 7ce4b28bfa748..e7bbde282f664 100644 --- a/source/extensions/filters/http/jwt_authn/BUILD +++ b/source/extensions/filters/http/jwt_authn/BUILD @@ -81,6 +81,7 @@ envoy_cc_library( hdrs = ["matcher.h"], deps = [ ":verifier_lib", + "//source/common/common:matchers_lib", "//source/common/http:header_utility_lib", "//source/common/router:config_lib", ], diff --git a/source/extensions/filters/http/jwt_authn/matcher.cc b/source/extensions/filters/http/jwt_authn/matcher.cc index 75753cdb42e17..9d2d25fe93c7e 100644 --- a/source/extensions/filters/http/jwt_authn/matcher.cc +++ b/source/extensions/filters/http/jwt_authn/matcher.cc @@ -1,6 +1,7 @@ #include "extensions/filters/http/jwt_authn/matcher.h" #include "common/common/logger.h" +#include "common/common/matchers.h" #include "common/common/regex.h" #include "common/router/config_impl.h" @@ -58,13 +59,12 @@ class BaseMatcherImpl : public Matcher, public Logger::Loggable class PrefixMatcherImpl : public BaseMatcherImpl { public: PrefixMatcherImpl(const RequirementRule& rule) - : BaseMatcherImpl(rule), prefix_(rule.match().prefix()) {} + : BaseMatcherImpl(rule), prefix_(rule.match().prefix()), + path_matcher_(Matchers::PathMatcher::createPrefix(prefix_, !case_sensitive_)) {} bool matches(const Http::HeaderMap& headers) const override { if (BaseMatcherImpl::matchRoute(headers) && - (case_sensitive_ - ? absl::StartsWith(headers.Path()->value().getStringView(), prefix_) - : absl::StartsWithIgnoreCase(headers.Path()->value().getStringView(), prefix_))) { + path_matcher_->match(headers.Path()->value().getStringView())) { ENVOY_LOG(debug, "Prefix requirement '{}' matched.", prefix_); return true; } @@ -74,6 +74,7 @@ class PrefixMatcherImpl : public BaseMatcherImpl { private: // prefix string const std::string prefix_; + const Matchers::PathMatcherConstSharedPtr path_matcher_; }; /** @@ -82,19 +83,14 @@ class PrefixMatcherImpl : public BaseMatcherImpl { class PathMatcherImpl : public BaseMatcherImpl { public: PathMatcherImpl(const RequirementRule& rule) - : BaseMatcherImpl(rule), path_(rule.match().path()) {} + : BaseMatcherImpl(rule), path_(rule.match().path()), + path_matcher_(Matchers::PathMatcher::createExact(path_, !case_sensitive_)) {} bool matches(const Http::HeaderMap& headers) const override { - if (BaseMatcherImpl::matchRoute(headers)) { - const Http::HeaderString& path = headers.Path()->value(); - const size_t compare_length = - path.getStringView().length() - Http::Utility::findQueryStringStart(path).length(); - auto real_path = path.getStringView().substr(0, compare_length); - bool match = case_sensitive_ ? real_path == path_ : StringUtil::caseCompare(real_path, path_); - if (match) { - ENVOY_LOG(debug, "Path requirement '{}' matched.", path_); - return true; - } + if (BaseMatcherImpl::matchRoute(headers) && + path_matcher_->match(headers.Path()->value().getStringView())) { + ENVOY_LOG(debug, "Path requirement '{}' matched.", path_); + return true; } return false; } @@ -102,6 +98,7 @@ class PathMatcherImpl : public BaseMatcherImpl { private: // path string. const std::string path_; + const Matchers::PathMatcherConstSharedPtr path_matcher_; }; /** diff --git a/source/extensions/filters/network/http_connection_manager/config.cc b/source/extensions/filters/network/http_connection_manager/config.cc index daf2fdf046979..d8679ae6f8c20 100644 --- a/source/extensions/filters/network/http_connection_manager/config.cc +++ b/source/extensions/filters/network/http_connection_manager/config.cc @@ -182,7 +182,9 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( 0 #endif ))), - merge_slashes_(config.merge_slashes()) { + merge_slashes_(config.merge_slashes()), + headers_with_underscores_action_( + config.common_http_protocol_options().headers_with_underscores_action()) { // If idle_timeout_ was not configured in common_http_protocol_options, use value in deprecated // idle_timeout field. // TODO(asraa): Remove when idle_timeout is removed. @@ -422,18 +424,18 @@ HttpConnectionManagerConfig::createCodec(Network::Connection& connection, case CodecType::HTTP1: return std::make_unique( connection, context_.scope(), callbacks, http1_settings_, maxRequestHeadersKb(), - maxRequestHeadersCount()); + maxRequestHeadersCount(), headersWithUnderscoresAction()); case CodecType::HTTP2: return std::make_unique( connection, callbacks, context_.scope(), http2_settings_, maxRequestHeadersKb(), - maxRequestHeadersCount()); + maxRequestHeadersCount(), headersWithUnderscoresAction()); case CodecType::HTTP3: // TODO(danzh) create QUIC specific codec. NOT_IMPLEMENTED_GCOVR_EXCL_LINE; case CodecType::AUTO: return Http::ConnectionManagerUtility::autoCreateCodec( connection, data, callbacks, context_.scope(), http1_settings_, http2_settings_, - maxRequestHeadersKb(), maxRequestHeadersCount()); + maxRequestHeadersKb(), maxRequestHeadersCount(), headersWithUnderscoresAction()); } NOT_REACHED_GCOVR_EXCL_LINE; diff --git a/source/extensions/filters/network/http_connection_manager/config.h b/source/extensions/filters/network/http_connection_manager/config.h index 15c30ee1eee18..67844b94475d8 100644 --- a/source/extensions/filters/network/http_connection_manager/config.h +++ b/source/extensions/filters/network/http_connection_manager/config.h @@ -147,6 +147,10 @@ class HttpConnectionManagerConfig : Logger::Loggable, const Http::Http1Settings& http1Settings() const override { return http1_settings_; } bool shouldNormalizePath() const override { return normalize_path_; } bool shouldMergeSlashes() const override { return merge_slashes_; } + envoy::api::v2::core::HttpProtocolOptions::HeadersWithUnderscoresAction + headersWithUnderscoresAction() const override { + return headers_with_underscores_action_; + } std::chrono::milliseconds delayedCloseTimeout() const override { return delayed_close_timeout_; } private: @@ -196,6 +200,8 @@ class HttpConnectionManagerConfig : Logger::Loggable, std::chrono::milliseconds delayed_close_timeout_; const bool normalize_path_; const bool merge_slashes_; + const envoy::api::v2::core::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action_; // Default idle timeout is 5 minutes if nothing is specified in the HCM config. static const uint64_t StreamIdleTimeoutMs = 5 * 60 * 1000; diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.h b/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.h index 047970f4fdbed..16183f0807ade 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.h @@ -34,6 +34,9 @@ class EnvoyQuicServerStream : public quic::QuicSpdyServerStreamBase, public Envo // Http::Stream void resetStream(Http::StreamResetReason reason) override; void readDisable(bool disable) override; + void setFlushTimeout(std::chrono::milliseconds) override { + // TODO(mattklein123): Actually implement this for HTTP/3 similar to HTTP/2. + } // quic::QuicSpdyStream void OnBodyAvailable() override; void OnStreamReset(const quic::QuicRstStreamFrame& frame) override; diff --git a/source/server/BUILD b/source/server/BUILD index a863b2c0ce071..1247089212cc5 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -310,6 +310,7 @@ envoy_cc_library( "//include/envoy/server:listener_manager_interface", "//include/envoy/server:transport_socket_config_interface", "//include/envoy/server:worker_interface", + "//source/common/common:basic_resource_lib", "//source/common/config:utility_lib", "//source/common/init:manager_lib", "//source/common/network:listen_socket_lib", diff --git a/source/server/connection_handler_impl.cc b/source/server/connection_handler_impl.cc index c196947664033..288e8853ffeb7 100644 --- a/source/server/connection_handler_impl.cc +++ b/source/server/connection_handler_impl.cc @@ -264,6 +264,8 @@ void ConnectionHandlerImpl::ActiveTcpSocket::newConnection() { socket_->setDetectedTransportProtocol( Extensions::TransportSockets::TransportProtocolNames::get().RawBuffer); } + // TODO(lambdai): add integration test + // TODO: Address issues in wider scope. See https://github.com/envoyproxy/envoy/issues/8925 // Erase accept filter states because accept filters may not get the opportunity to clean up. // Particularly the assigned events need to reset before assigning new events in the follow up. accept_filters_.clear(); @@ -273,6 +275,14 @@ void ConnectionHandlerImpl::ActiveTcpSocket::newConnection() { } void ConnectionHandlerImpl::ActiveTcpListener::onAccept(Network::ConnectionSocketPtr&& socket) { + if (listenerConnectionLimitReached()) { + ENVOY_LOG(trace, "closing connection: listener connection limit reached for {}", + config_.name()); + socket->close(); + stats_.downstream_cx_overflow_.inc(); + return; + } + onAcceptWorker(std::move(socket), config_.handOffRestoredDestinationConnections(), false); } diff --git a/source/server/connection_handler_impl.h b/source/server/connection_handler_impl.h index cc809fcf4ea3e..8bdb523602141 100644 --- a/source/server/connection_handler_impl.h +++ b/source/server/connection_handler_impl.h @@ -27,7 +27,9 @@ namespace Server { #define ALL_LISTENER_STATS(COUNTER, GAUGE, HISTOGRAM) \ COUNTER(downstream_cx_destroy) \ + COUNTER(downstream_cx_overflow) \ COUNTER(downstream_cx_total) \ + COUNTER(downstream_global_cx_overflow) \ COUNTER(downstream_pre_cx_timeout) \ COUNTER(no_filter_chain_match) \ GAUGE(downstream_cx_active, Accumulate) \ @@ -106,15 +108,22 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, ActiveTcpListener(ConnectionHandlerImpl& parent, Network::ListenerPtr&& listener, Network::ListenerConfig& config); ~ActiveTcpListener() override; + bool listenerConnectionLimitReached() const { + // TODO(tonya11en): Delegate enforcement of per-listener connection limits to overload + // manager. + return !config_.openConnections().canCreate(); + } void onAcceptWorker(Network::ConnectionSocketPtr&& socket, bool hand_off_restored_destination_connections, bool rebalanced); void decNumConnections() { ASSERT(num_listener_connections_ > 0); --num_listener_connections_; + config_.openConnections().dec(); } // Network::ListenerCallbacks void onAccept(Network::ConnectionSocketPtr&& socket) override; + void onReject() override { stats_.downstream_global_cx_overflow_.inc(); } // ActiveListenerImplBase Network::Listener* listener() override { return listener_.get(); } @@ -122,7 +131,10 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, // Network::BalancedConnectionHandler uint64_t numConnections() const override { return num_listener_connections_; } - void incNumConnections() override { ++num_listener_connections_; } + void incNumConnections() override { + ++num_listener_connections_; + config_.openConnections().inc(); + } void post(Network::ConnectionSocketPtr&& socket) override; /** diff --git a/source/server/http/BUILD b/source/server/http/BUILD index 0600a4174a0e8..9011b91b26924 100644 --- a/source/server/http/BUILD +++ b/source/server/http/BUILD @@ -32,6 +32,7 @@ envoy_cc_library( "//source/common/access_log:access_log_lib", "//source/common/buffer:buffer_lib", "//source/common/common:assert_lib", + "//source/common/common:basic_resource_lib", "//source/common/common:empty_string", "//source/common/common:enum_to_int", "//source/common/common:macros", diff --git a/source/server/http/admin.cc b/source/server/http/admin.cc index 946b5bc77c05a..36e3e5bb460c9 100644 --- a/source/server/http/admin.cc +++ b/source/server/http/admin.cc @@ -1313,7 +1313,7 @@ Http::ServerConnectionPtr AdminImpl::createCodec(Network::Connection& connection Http::ServerConnectionCallbacks& callbacks) { return Http::ConnectionManagerUtility::autoCreateCodec( connection, data, callbacks, server_.stats(), Http::Http1Settings(), Http::Http2Settings(), - maxRequestHeadersKb(), maxRequestHeadersCount()); + maxRequestHeadersKb(), maxRequestHeadersCount(), headersWithUnderscoresAction()); } bool AdminImpl::createNetworkFilterChain(Network::Connection& connection, diff --git a/source/server/http/admin.h b/source/server/http/admin.h index 3ace506ef6efd..967702596a4fa 100644 --- a/source/server/http/admin.h +++ b/source/server/http/admin.h @@ -20,6 +20,7 @@ #include "envoy/upstream/outlier_detection.h" #include "envoy/upstream/resource_manager.h" +#include "common/common/basic_resource_impl.h" #include "common/common/empty_string.h" #include "common/common/logger.h" #include "common/common/macros.h" @@ -153,6 +154,10 @@ class AdminImpl : public Admin, const Http::Http1Settings& http1Settings() const override { return http1_settings_; } bool shouldNormalizePath() const override { return true; } bool shouldMergeSlashes() const override { return true; } + envoy::api::v2::core::HttpProtocolOptions::HeadersWithUnderscoresAction + headersWithUnderscoresAction() const override { + return envoy::api::v2::core::HttpProtocolOptions::ALLOW; + } Http::Code request(absl::string_view path_and_query, absl::string_view method, Http::HeaderMap& response_headers, std::string& body) override; void closeSocket(); @@ -352,12 +357,14 @@ class AdminImpl : public Admin, return envoy::api::v2::core::TrafficDirection::UNSPECIFIED; } Network::ConnectionBalancer& connectionBalancer() override { return connection_balancer_; } + ResourceLimit& openConnections() override { return open_connections_; } AdminImpl& parent_; const std::string name_; Stats::ScopePtr scope_; Http::ConnectionManagerListenerStats stats_; Network::NopConnectionBalancerImpl connection_balancer_; + BasicResourceLimitImpl open_connections_; }; using AdminListenerPtr = std::unique_ptr; diff --git a/source/server/listener_impl.cc b/source/server/listener_impl.cc index 32661629bd52e..36973965401a2 100644 --- a/source/server/listener_impl.cc +++ b/source/server/listener_impl.cc @@ -50,7 +50,11 @@ ListenerImpl::ListenerImpl(const envoy::api::v2::Listener& config, const std::st config_(config), version_info_(version_info), listener_filters_timeout_( PROTOBUF_GET_MS_OR_DEFAULT(config, listener_filters_timeout, 15000)), - continue_on_listener_filters_timeout_(config.continue_on_listener_filters_timeout()) { + continue_on_listener_filters_timeout_(config.continue_on_listener_filters_timeout()), + cx_limit_runtime_key_("envoy.resource_limits.listener." + config_.name() + + ".connection_limit"), + open_connections_(std::make_shared( + std::numeric_limits::max(), parent.server_.runtime(), cx_limit_runtime_key_)) { if (config.has_transparent()) { addListenSocketOptions(Network::SocketOptionFactory::buildIpTransparentOptions()); } @@ -77,6 +81,15 @@ ListenerImpl::ListenerImpl(const envoy::api::v2::Listener& config, const std::st udp_listener_factory_ = config_factory.createActiveUdpListenerFactory(*message); } + const absl::optional runtime_val = + parent_.server_.runtime().snapshot().get(cx_limit_runtime_key_); + if (runtime_val && runtime_val->empty()) { + ENVOY_LOG(warn, + "Listener connection limit runtime key {} is empty. There are currently no " + "limitations on the number of accepted connections for listener {}.", + cx_limit_runtime_key_, config_.name()); + } + if (!config.listener_filters().empty()) { switch (socket_type_) { case Network::Address::SocketType::Datagram: @@ -316,4 +329,4 @@ void ListenerImpl::setSocket(const Network::SocketSharedPtr& socket) { } } // namespace Server -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/source/server/listener_impl.h b/source/server/listener_impl.h index 21e06155aa424..f146482415949 100644 --- a/source/server/listener_impl.h +++ b/source/server/listener_impl.h @@ -8,6 +8,7 @@ #include "envoy/server/filter_config.h" #include "envoy/stats/scope.h" +#include "common/common/basic_resource_impl.h" #include "common/common/logger.h" #include "common/init/manager_impl.h" @@ -101,6 +102,14 @@ class ListenerImpl : public Network::ListenerConfig, return udp_listener_factory_.get(); } Network::ConnectionBalancer& connectionBalancer() override { return *connection_balancer_; } + ResourceLimit& openConnections() override { return *open_connections_; } + + void ensureSocketOptions() { + if (!listen_socket_options_) { + listen_socket_options_ = + std::make_shared>(); + } + } // Server::Configuration::ListenerFactoryContext AccessLog::AccessLogManager& accessLogManager() override; @@ -130,12 +139,6 @@ class ListenerImpl : public Network::ListenerConfig, OptProcessContextRef processContext() override; Configuration::ServerFactoryContext& getServerFactoryContext() const override; - void ensureSocketOptions() { - if (!listen_socket_options_) { - listen_socket_options_ = - std::make_shared>(); - } - } // Network::DrainDecision bool drainClose() const override; @@ -153,6 +156,7 @@ class ListenerImpl : public Network::ListenerConfig, ensureSocketOptions(); listen_socket_options_->emplace_back(std::move(option)); } + void addListenSocketOptions(const Network::Socket::OptionsSharedPtr& options) { ensureSocketOptions(); Network::Socket::appendOptions(listen_socket_options_, options); @@ -195,9 +199,15 @@ class ListenerImpl : public Network::ListenerConfig, Network::ActiveUdpListenerFactoryPtr udp_listener_factory_; Network::ConnectionBalancerPtr connection_balancer_; + // Per-listener connection limits are only specified via runtime. + // + // TODO (tonya11en): Move this functionality into the overload manager. + const std::string cx_limit_runtime_key_; + std::shared_ptr open_connections_; + // to access ListenerManagerImpl::factory_. friend class ListenerFilterChainFactoryBuilder; }; } // namespace Server -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/source/server/server.cc b/source/server/server.cc index 5a136fbf5e53c..0299a71575139 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -31,6 +31,7 @@ #include "common/local_info/local_info_impl.h" #include "common/memory/stats.h" #include "common/network/address_impl.h" +#include "common/network/listener_impl.h" #include "common/protobuf/utility.h" #include "common/router/rds_impl.h" #include "common/runtime/runtime_impl.h" @@ -277,6 +278,8 @@ void InstanceImpl::initialize(const Options& options, // Enable the selected buffer implementation (old libevent evbuffer version or new native // version) early in the initialization, before any buffers can be created. + RELEASE_ASSERT(!options.libeventBufferEnabled(), + "old_impl not supported for buffers"); // This option is no longer supported. Buffer::OwnedImpl::useOldImpl(options.libeventBufferEnabled()); ENVOY_LOG(info, "buffer implementation: {}", Buffer::OwnedImpl().usesOldImpl() ? "old (libevent)" : "new"); @@ -466,6 +469,15 @@ void InstanceImpl::initialize(const Options& options, // GuardDog (deadlock detection) object and thread setup before workers are // started and before our own run() loop runs. guard_dog_ = std::make_unique(stats_store_, config_, *api_); + + // If there is no global limit to the number of active connections, warn on startup. + // TODO (tonya11en): Move this functionality into the overload manager. + if (runtime().snapshot().get(Network::ListenerImpl::GlobalMaxCxRuntimeKey) == EMPTY_STRING) { + ENVOY_LOG(warn, + "there is no configured limit to the number of allowed active connections. Set a " + "limit via the runtime key {}", + Network::ListenerImpl::GlobalMaxCxRuntimeKey); + } } void InstanceImpl::startWorkers() { diff --git a/test/common/buffer/buffer_fuzz.cc b/test/common/buffer/buffer_fuzz.cc index fe2453905ce65..787830f9e5039 100644 --- a/test/common/buffer/buffer_fuzz.cc +++ b/test/common/buffer/buffer_fuzz.cc @@ -67,6 +67,12 @@ void releaseFragmentAllocation(const void* p, size_t, const Buffer::BufferFragme // walk off the edge; the caller should be guaranteeing this. class StringBuffer : public Buffer::Instance { public: + void addDrainTracker(std::function drain_tracker) override { + // Not implemented well. + ASSERT(false); + drain_tracker(); + } + void add(const void* data, uint64_t size) override { FUZZ_ASSERT(start_ + size_ + size <= data_.size()); ::memcpy(mutableEnd(), data, size); @@ -478,7 +484,8 @@ void executeActions(const test::common::buffer::BufferFuzzTestCase& input, Buffe void BufferFuzz::bufferFuzz(const test::common::buffer::BufferFuzzTestCase& input, bool old_impl) { ENVOY_LOG_MISC(trace, "Using {} buffer implementation", old_impl ? "old" : "new"); - Buffer::OwnedImpl::useOldImpl(old_impl); + // Buffer::OwnedImpl::useOldImpl(old_impl); no longer supported + Buffer::OwnedImpl::useOldImpl(false); Context ctxt; // Fuzzed buffers. BufferList buffers; diff --git a/test/common/buffer/owned_impl_test.cc b/test/common/buffer/owned_impl_test.cc index 6e744a9e418cd..62e5578da56f9 100644 --- a/test/common/buffer/owned_impl_test.cc +++ b/test/common/buffer/owned_impl_test.cc @@ -41,12 +41,21 @@ class OwnedImplTest : public BufferImplementationParamTest { return; } const auto& buffer_slices = buffer.describeSlicesForTest(); + ASSERT_EQ(buffer_list.size(), buffer_slices.size()); for (uint64_t i = 0; i < buffer_slices.size(); i++) { EXPECT_EQ(buffer_slices[i].data, buffer_list[i][0]); EXPECT_EQ(buffer_slices[i].reservable, buffer_list[i][1]); EXPECT_EQ(buffer_slices[i].capacity, buffer_list[i][2]); } } + + static void expectFirstSlice(std::vector slice_description, OwnedImpl& buffer) { + const auto& buffer_slices = buffer.describeSlicesForTest(); + ASSERT_LE(1, buffer_slices.size()); + EXPECT_EQ(buffer_slices[0].data, slice_description[0]); + EXPECT_EQ(buffer_slices[0].reservable, slice_description[1]); + EXPECT_EQ(buffer_slices[0].capacity, slice_description[2]); + } }; INSTANTIATE_TEST_SUITE_P(OwnedImplTest, OwnedImplTest, @@ -89,6 +98,7 @@ TEST_P(OwnedImplTest, AddEmptyFragment) { BufferFragmentImpl frag2("", 0, [this](const void*, size_t, const BufferFragmentImpl*) { release_callback_called_ = true; }); + BufferFragmentImpl frag3(input, 11, [](const void*, size_t, const BufferFragmentImpl*) {}); Buffer::OwnedImpl buffer; buffer.addBufferFragment(frag1); EXPECT_EQ(11, buffer.length()); @@ -96,7 +106,18 @@ TEST_P(OwnedImplTest, AddEmptyFragment) { buffer.addBufferFragment(frag2); EXPECT_EQ(11, buffer.length()); - buffer.drain(11); + buffer.addBufferFragment(frag3); + EXPECT_EQ(22, buffer.length()); + + // Cover case of copying a buffer with an empty fragment. + Buffer::OwnedImpl buffer2; + buffer2.add(buffer); + + // Cover copyOut + std::unique_ptr outbuf(new char[buffer.length()]); + buffer.copyOut(0, buffer.length(), outbuf.get()); + + buffer.drain(22); EXPECT_EQ(0, buffer.length()); EXPECT_TRUE(release_callback_called_); } @@ -344,6 +365,300 @@ TEST_P(OwnedImplTest, Read) { EXPECT_THAT(buffer.describeSlicesForTest(), testing::IsEmpty()); } +TEST_P(OwnedImplTest, DrainTracking) { + if (GetParam() == BufferImplementation::Old) { + return; + } + testing::InSequence s; + + Buffer::OwnedImpl buffer; + buffer.add("a"); + + testing::MockFunction tracker1; + testing::MockFunction tracker2; + buffer.addDrainTracker(tracker1.AsStdFunction()); + buffer.addDrainTracker(tracker2.AsStdFunction()); + + testing::MockFunction done; + EXPECT_CALL(tracker1, Call()); + EXPECT_CALL(tracker2, Call()); + EXPECT_CALL(done, Call()); + buffer.drain(buffer.length()); + done.Call(); +} + +TEST_P(OwnedImplTest, MoveDrainTrackersWhenTransferingSlices) { + if (GetParam() == BufferImplementation::Old) { + return; + } + testing::InSequence s; + + Buffer::OwnedImpl buffer1; + buffer1.add("a"); + + testing::MockFunction tracker1; + buffer1.addDrainTracker(tracker1.AsStdFunction()); + + Buffer::OwnedImpl buffer2; + buffer2.add("b"); + + testing::MockFunction tracker2; + buffer2.addDrainTracker(tracker2.AsStdFunction()); + + buffer2.add(std::string(10000, 'c')); + testing::MockFunction tracker3; + buffer2.addDrainTracker(tracker3.AsStdFunction()); + EXPECT_EQ(2, buffer2.getRawSlices(nullptr, 0)); + + buffer1.move(buffer2); + EXPECT_EQ(10002, buffer1.length()); + EXPECT_EQ(0, buffer2.length()); + EXPECT_EQ(3, buffer1.getRawSlices(nullptr, 0)); + EXPECT_EQ(0, buffer2.getRawSlices(nullptr, 0)); + + testing::MockFunction done; + EXPECT_CALL(tracker1, Call()); + EXPECT_CALL(tracker2, Call()); + EXPECT_CALL(tracker3, Call()); + EXPECT_CALL(done, Call()); + buffer1.drain(buffer1.length()); + done.Call(); +} + +TEST_P(OwnedImplTest, MoveDrainTrackersWhenCopying) { + if (GetParam() == BufferImplementation::Old) { + return; + } + testing::InSequence s; + + Buffer::OwnedImpl buffer1; + buffer1.add("a"); + + testing::MockFunction tracker1; + buffer1.addDrainTracker(tracker1.AsStdFunction()); + + Buffer::OwnedImpl buffer2; + buffer2.add("b"); + + testing::MockFunction tracker2; + buffer2.addDrainTracker(tracker2.AsStdFunction()); + + buffer1.move(buffer2); + EXPECT_EQ(2, buffer1.length()); + EXPECT_EQ(0, buffer2.length()); + EXPECT_EQ(1, buffer1.getRawSlices(nullptr, 0)); + EXPECT_EQ(0, buffer2.getRawSlices(nullptr, 0)); + + buffer1.drain(1); + testing::MockFunction done; + EXPECT_CALL(tracker1, Call()); + EXPECT_CALL(tracker2, Call()); + EXPECT_CALL(done, Call()); + buffer1.drain(1); + done.Call(); +} + +TEST_P(OwnedImplTest, PartialMoveDrainTrackers) { + if (GetParam() == BufferImplementation::Old) { + return; + } + testing::InSequence s; + + Buffer::OwnedImpl buffer1; + buffer1.add("a"); + + testing::MockFunction tracker1; + buffer1.addDrainTracker(tracker1.AsStdFunction()); + + Buffer::OwnedImpl buffer2; + buffer2.add("b"); + + testing::MockFunction tracker2; + buffer2.addDrainTracker(tracker2.AsStdFunction()); + + buffer2.add(std::string(10000, 'c')); + testing::MockFunction tracker3; + buffer2.addDrainTracker(tracker3.AsStdFunction()); + EXPECT_EQ(2, buffer2.getRawSlices(nullptr, 0)); + + // Move the first slice and associated trackers and part of the second slice to buffer1. + buffer1.move(buffer2, 4999); + EXPECT_EQ(5000, buffer1.length()); + EXPECT_EQ(5002, buffer2.length()); + EXPECT_EQ(3, buffer1.getRawSlices(nullptr, 0)); + EXPECT_EQ(1, buffer2.getRawSlices(nullptr, 0)); + + testing::MockFunction done; + EXPECT_CALL(tracker1, Call()); + buffer1.drain(1); + + EXPECT_CALL(tracker2, Call()); + EXPECT_CALL(done, Call()); + buffer1.drain(buffer1.length()); + done.Call(); + + // tracker3 remained in buffer2. + EXPECT_CALL(tracker3, Call()); + buffer2.drain(buffer2.length()); +} + +TEST_P(OwnedImplTest, DrainTrackingOnDestruction) { + if (GetParam() == BufferImplementation::Old) { + return; + } + testing::InSequence s; + + auto buffer = std::make_unique(); + buffer->add("a"); + + testing::MockFunction tracker; + buffer->addDrainTracker(tracker.AsStdFunction()); + + testing::MockFunction done; + EXPECT_CALL(tracker, Call()); + EXPECT_CALL(done, Call()); + buffer.reset(); + done.Call(); +} + +TEST_P(OwnedImplTest, Linearize) { + Buffer::OwnedImpl buffer; + + // Unowned slice to track when linearize kicks in. + std::string input(1000, 'a'); + BufferFragmentImpl frag( + input.c_str(), input.size(), + [this](const void*, size_t, const BufferFragmentImpl*) { release_callback_called_ = true; }); + buffer.addBufferFragment(frag); + + // Second slice with more data. + buffer.add(std::string(1000, 'b')); + + // Linearize does not change the pointer associated with the first slice if requested size is less + // than or equal to size of the first slice. + EXPECT_EQ(input.c_str(), buffer.linearize(input.size())); + EXPECT_FALSE(release_callback_called_); + + constexpr uint64_t LinearizeSize = 2000; + void* out_ptr = buffer.linearize(LinearizeSize); + EXPECT_TRUE(release_callback_called_); + EXPECT_EQ(input + std::string(1000, 'b'), + absl::string_view(reinterpret_cast(out_ptr), LinearizeSize)); +} + +TEST_P(OwnedImplTest, LinearizeEmptyBuffer) { + Buffer::OwnedImpl buffer; + EXPECT_EQ(nullptr, buffer.linearize(0)); +} + +TEST_P(OwnedImplTest, LinearizeSingleSlice) { + auto buffer = std::make_unique(); + + // Unowned slice to track when linearize kicks in. + std::string input(1000, 'a'); + BufferFragmentImpl frag( + input.c_str(), input.size(), + [this](const void*, size_t, const BufferFragmentImpl*) { release_callback_called_ = true; }); + buffer->addBufferFragment(frag); + + EXPECT_EQ(input.c_str(), buffer->linearize(buffer->length())); + EXPECT_FALSE(release_callback_called_); + + buffer.reset(); + EXPECT_TRUE(release_callback_called_); +} + +TEST_P(OwnedImplTest, LinearizeDrainTracking) { + if (GetParam() == BufferImplementation::Old) { + return; + } + constexpr uint32_t SmallChunk = 200; + constexpr uint32_t LargeChunk = 16384 - SmallChunk; + constexpr uint32_t LinearizeSize = SmallChunk + LargeChunk; + + // Create a buffer with a eclectic combination of buffer OwnedSlice and UnownedSlices that will + // help us explore the properties of linearize. + Buffer::OwnedImpl buffer; + + // Large add below the target linearize size. + testing::MockFunction tracker1; + buffer.add(std::string(LargeChunk, 'a')); + buffer.addDrainTracker(tracker1.AsStdFunction()); + + // Unowned slice which causes some fragmentation. + testing::MockFunction tracker2; + testing::MockFunction + release_callback_tracker; + std::string frag_input(2 * SmallChunk, 'b'); + BufferFragmentImpl frag(frag_input.c_str(), frag_input.size(), + release_callback_tracker.AsStdFunction()); + buffer.addBufferFragment(frag); + buffer.addDrainTracker(tracker2.AsStdFunction()); + + // And an unowned slice with 0 size, because. + testing::MockFunction tracker3; + testing::MockFunction + release_callback_tracker2; + BufferFragmentImpl frag2(nullptr, 0, release_callback_tracker2.AsStdFunction()); + buffer.addBufferFragment(frag2); + buffer.addDrainTracker(tracker3.AsStdFunction()); + + // Add a very large chunk + testing::MockFunction tracker4; + buffer.add(std::string(LargeChunk + LinearizeSize, 'c')); + buffer.addDrainTracker(tracker4.AsStdFunction()); + + // Small adds that create no gaps. + testing::MockFunction tracker5; + for (int i = 0; i < 105; ++i) { + buffer.add(std::string(SmallChunk, 'd')); + } + buffer.addDrainTracker(tracker5.AsStdFunction()); + + expectSlices({{16184, 136, 16320}, + {400, 0, 400}, + {0, 0, 0}, + {32704, 0, 32704}, + {4032, 0, 4032}, + {4032, 0, 4032}, + {4032, 0, 4032}, + {4032, 0, 4032}, + {4032, 0, 4032}, + {704, 3328, 4032}}, + buffer); + + testing::InSequence s; + testing::MockFunction drain_tracker; + testing::MockFunction done_tracker; + EXPECT_CALL(tracker1, Call()); + EXPECT_CALL(release_callback_tracker, Call(_, _, _)); + EXPECT_CALL(tracker2, Call()); + EXPECT_CALL(drain_tracker, Call(3 * LargeChunk + 108 * SmallChunk, 16384)); + EXPECT_CALL(release_callback_tracker2, Call(_, _, _)); + EXPECT_CALL(tracker3, Call()); + EXPECT_CALL(tracker4, Call()); + EXPECT_CALL(drain_tracker, Call(2 * LargeChunk + 107 * SmallChunk, 16384)); + EXPECT_CALL(drain_tracker, Call(LargeChunk + 106 * SmallChunk, 16384)); + EXPECT_CALL(drain_tracker, Call(105 * SmallChunk, 16384)); + EXPECT_CALL(tracker5, Call()); + EXPECT_CALL(drain_tracker, Call(4616, 4616)); + EXPECT_CALL(done_tracker, Call()); + for (auto& expected_first_slice : std::vector>{{16584, 3832, 20416}, + {32904, 3896, 36800}, + {16520, 3896, 36800}, + {20296, 120, 20416}, + {4616, 3512, 8128}}) { + const uint32_t write_size = std::min(LinearizeSize, buffer.length()); + buffer.linearize(write_size); + expectFirstSlice(expected_first_slice, buffer); + drain_tracker.Call(buffer.length(), write_size); + buffer.drain(write_size); + } + done_tracker.Call(); + + expectSlices({}, buffer); +} + TEST_P(OwnedImplTest, ReserveCommit) { // This fragment will later be added to the buffer. It is declared in an enclosing scope to // ensure it is not destructed until after the buffer is. @@ -403,12 +718,12 @@ TEST_P(OwnedImplTest, ReserveCommit) { // Request a reservation that too big to fit in the existing slices. This should result // in the creation of a third slice. - expectSlices({{1, 4055, 4056}}, buffer); + expectSlices({{1, 4031, 4032}}, buffer); buffer.reserve(4096 - sizeof(OwnedSlice), iovecs, NumIovecs); - expectSlices({{1, 4055, 4056}, {0, 4056, 4056}}, buffer); + expectSlices({{1, 4031, 4032}, {0, 4032, 4032}}, buffer); const void* slice2 = iovecs[1].mem_; num_reserved = buffer.reserve(8192, iovecs, NumIovecs); - expectSlices({{1, 4055, 4056}, {0, 4056, 4056}, {0, 4056, 4056}}, buffer); + expectSlices({{1, 4031, 4032}, {0, 4032, 4032}, {0, 4032, 4032}}, buffer); EXPECT_EQ(3, num_reserved); EXPECT_EQ(slice1, iovecs[0].mem_); EXPECT_EQ(slice2, iovecs[1].mem_); @@ -417,11 +732,11 @@ TEST_P(OwnedImplTest, ReserveCommit) { // Append a fragment to the buffer, and then request a small reservation. The buffer // should make a new slice to satisfy the reservation; it cannot safely use any of // the previously seen slices, because they are no longer at the end of the buffer. - expectSlices({{1, 4055, 4056}}, buffer); + expectSlices({{1, 4031, 4032}}, buffer); buffer.addBufferFragment(fragment); EXPECT_EQ(13, buffer.length()); num_reserved = buffer.reserve(1, iovecs, NumIovecs); - expectSlices({{1, 4055, 4056}, {12, 0, 12}, {0, 4056, 4056}}, buffer); + expectSlices({{1, 4031, 4032}, {12, 0, 12}, {0, 4032, 4032}}, buffer); EXPECT_EQ(1, num_reserved); EXPECT_NE(slice1, iovecs[0].mem_); commitReservation(iovecs, num_reserved, buffer); @@ -454,16 +769,16 @@ TEST_P(OwnedImplTest, ReserveCommitReuse) { EXPECT_EQ(2, num_reserved); const void* first_slice = iovecs[0].mem_; iovecs[0].len_ = 1; - expectSlices({{8000, 4248, 12248}, {0, 12248, 12248}}, buffer); + expectSlices({{8000, 4224, 12224}, {0, 12224, 12224}}, buffer); buffer.commit(iovecs, 1); EXPECT_EQ(8001, buffer.length()); EXPECT_EQ(first_slice, iovecs[0].mem_); // The second slice is now released because there's nothing in the second slice. - expectSlices({{8001, 4247, 12248}}, buffer); + expectSlices({{8001, 4223, 12224}}, buffer); // Reserve 16KB again. num_reserved = buffer.reserve(16384, iovecs, NumIovecs); - expectSlices({{8001, 4247, 12248}, {0, 12248, 12248}}, buffer); + expectSlices({{8001, 4223, 12224}, {0, 12224, 12224}}, buffer); EXPECT_EQ(2, num_reserved); EXPECT_EQ(static_cast(first_slice) + 1, static_cast(iovecs[0].mem_)); @@ -492,7 +807,7 @@ TEST_P(OwnedImplTest, ReserveReuse) { EXPECT_EQ(2, num_reserved); EXPECT_EQ(first_slice, iovecs[0].mem_); EXPECT_EQ(second_slice, iovecs[1].mem_); - expectSlices({{0, 12248, 12248}, {0, 8152, 8152}}, buffer); + expectSlices({{0, 12224, 12224}, {0, 8128, 8128}}, buffer); // The remaining tests validate internal manipulations of the new slice // implementation, so they're not valid for the old evbuffer implementation. @@ -506,51 +821,51 @@ TEST_P(OwnedImplTest, ReserveReuse) { const void* third_slice = iovecs[1].mem_; EXPECT_EQ(2, num_reserved); EXPECT_EQ(first_slice, iovecs[0].mem_); - EXPECT_EQ(12248, iovecs[0].len_); + EXPECT_EQ(12224, iovecs[0].len_); EXPECT_NE(second_slice, iovecs[1].mem_); EXPECT_EQ(30000 - iovecs[0].len_, iovecs[1].len_); - expectSlices({{0, 12248, 12248}, {0, 8152, 8152}, {0, 20440, 20440}}, buffer); + expectSlices({{0, 12224, 12224}, {0, 8128, 8128}, {0, 20416, 20416}}, buffer); // Repeating a the reservation request for a smaller block returns the previous entry. num_reserved = buffer.reserve(16384, iovecs, NumIovecs); EXPECT_EQ(2, num_reserved); EXPECT_EQ(first_slice, iovecs[0].mem_); EXPECT_EQ(second_slice, iovecs[1].mem_); - expectSlices({{0, 12248, 12248}, {0, 8152, 8152}, {0, 20440, 20440}}, buffer); + expectSlices({{0, 12224, 12224}, {0, 8128, 8128}, {0, 20416, 20416}}, buffer); // Repeat the larger reservation notice that it doesn't match the prior reservation for 30000 // bytes. num_reserved = buffer.reserve(30000, iovecs, NumIovecs); EXPECT_EQ(2, num_reserved); EXPECT_EQ(first_slice, iovecs[0].mem_); - EXPECT_EQ(12248, iovecs[0].len_); + EXPECT_EQ(12224, iovecs[0].len_); EXPECT_NE(second_slice, iovecs[1].mem_); EXPECT_NE(third_slice, iovecs[1].mem_); EXPECT_EQ(30000 - iovecs[0].len_, iovecs[1].len_); - expectSlices({{0, 12248, 12248}, {0, 8152, 8152}, {0, 20440, 20440}, {0, 20440, 20440}}, buffer); + expectSlices({{0, 12224, 12224}, {0, 8128, 8128}, {0, 20416, 20416}, {0, 20416, 20416}}, buffer); // Commit the most recent reservation and verify the representation. buffer.commit(iovecs, num_reserved); - expectSlices({{12248, 0, 12248}, {0, 8152, 8152}, {0, 20440, 20440}, {17752, 2688, 20440}}, + expectSlices({{12224, 0, 12224}, {0, 8128, 8128}, {0, 20416, 20416}, {17776, 2640, 20416}}, buffer); // Do another reservation. num_reserved = buffer.reserve(16384, iovecs, NumIovecs); EXPECT_EQ(2, num_reserved); - expectSlices({{12248, 0, 12248}, - {0, 8152, 8152}, - {0, 20440, 20440}, - {17752, 2688, 20440}, - {0, 16344, 16344}}, + expectSlices({{12224, 0, 12224}, + {0, 8128, 8128}, + {0, 20416, 20416}, + {17776, 2640, 20416}, + {0, 16320, 16320}}, buffer); // And commit. buffer.commit(iovecs, num_reserved); - expectSlices({{12248, 0, 12248}, - {0, 8152, 8152}, - {0, 20440, 20440}, - {20440, 0, 20440}, - {13696, 2648, 16344}}, + expectSlices({{12224, 0, 12224}, + {0, 8128, 8128}, + {0, 20416, 20416}, + {20416, 0, 20416}, + {13744, 2576, 16320}}, buffer); } @@ -692,7 +1007,7 @@ TEST_P(OwnedImplTest, ReserveZeroCommit) { ASSERT_EQ(::close(pipe_fds[1]), 0); ASSERT_EQ(previous_length, buf.search(data.data(), rc, previous_length)); EXPECT_EQ("bbbbb", buf.toString().substr(0, 5)); - expectSlices({{5, 0, 4056}, {1953, 2103, 4056}}, buf); + expectSlices({{5, 0, 4032}, {1953, 2079, 4032}}, buf); } TEST_P(OwnedImplTest, ReadReserveAndCommit) { @@ -714,7 +1029,7 @@ TEST_P(OwnedImplTest, ReadReserveAndCommit) { ASSERT_EQ(result.rc_, static_cast(rc)); ASSERT_EQ(::close(pipe_fds[1]), 0); EXPECT_EQ("bbbbbe", buf.toString()); - expectSlices({{6, 4050, 4056}}, buf); + expectSlices({{6, 4026, 4032}}, buf); } TEST(OverflowDetectingUInt64, Arithmetic) { diff --git a/test/common/buffer/utility.h b/test/common/buffer/utility.h index daf67d85c1d29..3dfee93e6acf4 100644 --- a/test/common/buffer/utility.h +++ b/test/common/buffer/utility.h @@ -22,10 +22,11 @@ enum class BufferImplementation { class BufferImplementationParamTest : public testing::TestWithParam { protected: BufferImplementationParamTest() { + saved_buffer_old_impl_ = OwnedImpl::newBuffersUseOldImpl(); OwnedImpl::useOldImpl(GetParam() == BufferImplementation::Old); } - ~BufferImplementationParamTest() override = default; + ~BufferImplementationParamTest() { OwnedImpl::useOldImpl(saved_buffer_old_impl_); } /** Verify that a buffer has been constructed using the expected implementation. */ void verifyImplementation(const OwnedImpl& buffer) { @@ -38,6 +39,9 @@ class BufferImplementationParamTest : public testing::TestWithParam + +#include "common/common/basic_resource_impl.h" + +#include "test/mocks/runtime/mocks.h" + +#include "gtest/gtest.h" + +using testing::NiceMock; +using testing::Return; + +namespace Envoy { + +class BasicResourceLimitImplTest : public testing::Test { +protected: + NiceMock runtime_; +}; + +TEST_F(BasicResourceLimitImplTest, NoArgsConstructorVerifyMax) { + BasicResourceLimitImpl br; + + EXPECT_EQ(br.max(), std::numeric_limits::max()); +} + +TEST_F(BasicResourceLimitImplTest, VerifySetClearMax) { + BasicResourceLimitImpl br(123); + + EXPECT_EQ(br.max(), 123); + br.setMax(321); + EXPECT_EQ(br.max(), 321); + br.resetMax(); + EXPECT_EQ(br.max(), std::numeric_limits::max()); +} + +TEST_F(BasicResourceLimitImplTest, IncDecCount) { + BasicResourceLimitImpl br; + + EXPECT_EQ(br.count(), 0); + br.inc(); + EXPECT_EQ(br.count(), 1); + br.inc(); + br.inc(); + EXPECT_EQ(br.count(), 3); + br.dec(); + EXPECT_EQ(br.count(), 2); + br.decBy(2); + EXPECT_EQ(br.count(), 0); +} + +TEST_F(BasicResourceLimitImplTest, CanCreate) { + BasicResourceLimitImpl br(2); + + EXPECT_TRUE(br.canCreate()); + br.inc(); + EXPECT_TRUE(br.canCreate()); + br.inc(); + EXPECT_FALSE(br.canCreate()); + br.dec(); + EXPECT_TRUE(br.canCreate()); + br.dec(); +} + +TEST_F(BasicResourceLimitImplTest, RuntimeMods) { + BasicResourceLimitImpl br(1337, runtime_, "trololo"); + + EXPECT_CALL(runtime_.snapshot_, getInteger("trololo", 1337)).WillOnce(Return(555)); + EXPECT_EQ(br.max(), 555); + + EXPECT_CALL(runtime_.snapshot_, getInteger("trololo", 1337)).WillOnce(Return(1337)); + EXPECT_EQ(br.max(), 1337); +} + +} // namespace Envoy diff --git a/test/common/common/matchers_test.cc b/test/common/common/matchers_test.cc index 3c0a64f4ca053..8d72416a45307 100644 --- a/test/common/common/matchers_test.cc +++ b/test/common/common/matchers_test.cc @@ -7,6 +7,8 @@ #include "common/config/metadata.h" #include "common/protobuf/protobuf.h" +#include "test/test_common/utility.h" + #include "gtest/gtest.h" namespace Envoy { @@ -257,6 +259,51 @@ TEST(MetadataTest, MatchDoubleListValue) { metadataValue.Clear(); } +TEST(StringMatcher, ExactMatchIgnoreCase) { + envoy::type::matcher::StringMatcher matcher; + matcher.set_exact("exact"); + EXPECT_TRUE(Matchers::StringMatcherImpl(matcher).match("exact")); + EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match("EXACT")); + EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match("exacz")); + EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match("other")); + + matcher.set_ignore_case(true); + EXPECT_TRUE(Matchers::StringMatcherImpl(matcher).match("exact")); + EXPECT_TRUE(Matchers::StringMatcherImpl(matcher).match("EXACT")); + EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match("exacz")); + EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match("other")); +} + +TEST(StringMatcher, PrefixMatchIgnoreCase) { + envoy::type::matcher::StringMatcher matcher; + matcher.set_prefix("prefix"); + EXPECT_TRUE(Matchers::StringMatcherImpl(matcher).match("prefix-abc")); + EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match("PREFIX-ABC")); + EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match("prefiz-abc")); + EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match("other")); + + matcher.set_ignore_case(true); + EXPECT_TRUE(Matchers::StringMatcherImpl(matcher).match("prefix-abc")); + EXPECT_TRUE(Matchers::StringMatcherImpl(matcher).match("PREFIX-ABC")); + EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match("prefiz-abc")); + EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match("other")); +} + +TEST(StringMatcher, SuffixMatchIgnoreCase) { + envoy::type::matcher::StringMatcher matcher; + matcher.set_suffix("suffix"); + EXPECT_TRUE(Matchers::StringMatcherImpl(matcher).match("abc-suffix")); + EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match("ABC-SUFFIX")); + EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match("abc-suffiz")); + EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match("other")); + + matcher.set_ignore_case(true); + EXPECT_TRUE(Matchers::StringMatcherImpl(matcher).match("abc-suffix")); + EXPECT_TRUE(Matchers::StringMatcherImpl(matcher).match("ABC-SUFFIX")); + EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match("abc-suffiz")); + EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match("other")); +} + TEST(StringMatcher, SafeRegexValue) { envoy::type::matcher::StringMatcher matcher; matcher.mutable_safe_regex()->mutable_google_re2(); @@ -266,6 +313,23 @@ TEST(StringMatcher, SafeRegexValue) { EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match("bar")); } +TEST(StringMatcher, RegexValueIgnoreCase) { + envoy::type::matcher::StringMatcher matcher; + matcher.set_ignore_case(true); + matcher.set_regex("foo"); + EXPECT_THROW_WITH_MESSAGE(Matchers::StringMatcherImpl(matcher).match("foo"), EnvoyException, + "ignore_case has no effect for regex."); +} + +TEST(StringMatcher, SafeRegexValueIgnoreCase) { + envoy::type::matcher::StringMatcher matcher; + matcher.set_ignore_case(true); + matcher.mutable_safe_regex()->mutable_google_re2(); + matcher.mutable_safe_regex()->set_regex("foo"); + EXPECT_THROW_WITH_MESSAGE(Matchers::StringMatcherImpl(matcher).match("foo"), EnvoyException, + "ignore_case has no effect for safe_regex."); +} + TEST(LowerCaseStringMatcher, MatchExactValue) { envoy::type::matcher::StringMatcher matcher; matcher.set_exact("Foo"); @@ -298,6 +362,92 @@ TEST(LowerCaseStringMatcher, MatchRegexValue) { EXPECT_FALSE(Envoy::Matchers::LowerCaseStringMatcher(matcher).match("Foo.Bar")); } +TEST(PathMatcher, MatchExactPath) { + const auto matcher = Envoy::Matchers::PathMatcher::createExact("/exact", false); + + EXPECT_TRUE(matcher->match("/exact")); + EXPECT_TRUE(matcher->match("/exact?param=val")); + EXPECT_TRUE(matcher->match("/exact#fragment")); + EXPECT_TRUE(matcher->match("/exact#fragment?param=val")); + EXPECT_FALSE(matcher->match("/EXACT")); + EXPECT_FALSE(matcher->match("/exacz")); + EXPECT_FALSE(matcher->match("/exact-abc")); + EXPECT_FALSE(matcher->match("/exacz?/exact")); + EXPECT_FALSE(matcher->match("/exacz#/exact")); +} + +TEST(PathMatcher, MatchExactPathIgnoreCase) { + const auto matcher = Envoy::Matchers::PathMatcher::createExact("/exact", true); + + EXPECT_TRUE(matcher->match("/exact")); + EXPECT_TRUE(matcher->match("/EXACT")); + EXPECT_TRUE(matcher->match("/exact?param=val")); + EXPECT_TRUE(matcher->match("/Exact#fragment")); + EXPECT_TRUE(matcher->match("/EXACT#fragment?param=val")); + EXPECT_FALSE(matcher->match("/exacz")); + EXPECT_FALSE(matcher->match("/exact-abc")); + EXPECT_FALSE(matcher->match("/exacz?/exact")); + EXPECT_FALSE(matcher->match("/exacz#/exact")); +} + +TEST(PathMatcher, MatchPrefixPath) { + const auto matcher = Envoy::Matchers::PathMatcher::createPrefix("/prefix", false); + + EXPECT_TRUE(matcher->match("/prefix")); + EXPECT_TRUE(matcher->match("/prefix-abc")); + EXPECT_TRUE(matcher->match("/prefix?param=val")); + EXPECT_TRUE(matcher->match("/prefix#fragment")); + EXPECT_TRUE(matcher->match("/prefix#fragment?param=val")); + EXPECT_FALSE(matcher->match("/PREFIX")); + EXPECT_FALSE(matcher->match("/prefiz")); + EXPECT_FALSE(matcher->match("/prefiz?/prefix")); + EXPECT_FALSE(matcher->match("/prefiz#/prefix")); +} + +TEST(PathMatcher, MatchPrefixPathIgnoreCase) { + const auto matcher = Envoy::Matchers::PathMatcher::createPrefix("/prefix", true); + + EXPECT_TRUE(matcher->match("/prefix")); + EXPECT_TRUE(matcher->match("/prefix-abc")); + EXPECT_TRUE(matcher->match("/Prefix?param=val")); + EXPECT_TRUE(matcher->match("/Prefix#fragment")); + EXPECT_TRUE(matcher->match("/PREFIX#fragment?param=val")); + EXPECT_TRUE(matcher->match("/PREFIX")); + EXPECT_FALSE(matcher->match("/prefiz")); + EXPECT_FALSE(matcher->match("/prefiz?/prefix")); + EXPECT_FALSE(matcher->match("/prefiz#/prefix")); +} + +TEST(PathMatcher, MatchSuffixPath) { + envoy::type::matcher::PathMatcher matcher; + matcher.mutable_path()->set_suffix("suffix"); + + EXPECT_TRUE(Matchers::PathMatcher(matcher).match("/suffix")); + EXPECT_TRUE(Matchers::PathMatcher(matcher).match("/abc-suffix")); + EXPECT_TRUE(Matchers::PathMatcher(matcher).match("/suffix?param=val")); + EXPECT_TRUE(Matchers::PathMatcher(matcher).match("/suffix#fragment")); + EXPECT_TRUE(Matchers::PathMatcher(matcher).match("/suffix#fragment?param=val")); + EXPECT_FALSE(Matchers::PathMatcher(matcher).match("/suffiz")); + EXPECT_FALSE(Matchers::PathMatcher(matcher).match("/suffiz?param=suffix")); + EXPECT_FALSE(Matchers::PathMatcher(matcher).match("/suffiz#suffix")); +} + +TEST(PathMatcher, MatchRegexPath) { + envoy::type::matcher::StringMatcher matcher; + matcher.mutable_safe_regex()->mutable_google_re2(); + matcher.mutable_safe_regex()->set_regex(".*regex.*"); + + EXPECT_TRUE(Matchers::PathMatcher(matcher).match("/regex")); + EXPECT_TRUE(Matchers::PathMatcher(matcher).match("/regex/xyz")); + EXPECT_TRUE(Matchers::PathMatcher(matcher).match("/xyz/regex")); + EXPECT_TRUE(Matchers::PathMatcher(matcher).match("/regex?param=val")); + EXPECT_TRUE(Matchers::PathMatcher(matcher).match("/regex#fragment")); + EXPECT_TRUE(Matchers::PathMatcher(matcher).match("/regex#fragment?param=val")); + EXPECT_FALSE(Matchers::PathMatcher(matcher).match("/regez")); + EXPECT_FALSE(Matchers::PathMatcher(matcher).match("/regez?param=regex")); + EXPECT_FALSE(Matchers::PathMatcher(matcher).match("/regez#regex")); +} + } // namespace } // namespace Matcher } // namespace Envoy diff --git a/test/common/http/codec_impl_fuzz_test.cc b/test/common/http/codec_impl_fuzz_test.cc index ed592b66dc790..1f8f804e8a217 100644 --- a/test/common/http/codec_impl_fuzz_test.cc +++ b/test/common/http/codec_impl_fuzz_test.cc @@ -353,6 +353,8 @@ void codecFuzz(const test::common::http::CodecImplFuzzTestCase& input, HttpVersi uint32_t max_request_headers_kb = Http::DEFAULT_MAX_REQUEST_HEADERS_KB; uint32_t max_request_headers_count = Http::DEFAULT_MAX_HEADERS_COUNT; uint32_t max_response_headers_count = Http::DEFAULT_MAX_HEADERS_COUNT; + const envoy::api::v2::core::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action = envoy::api::v2::core::HttpProtocolOptions::ALLOW; ClientConnectionPtr client; ServerConnectionPtr server; const bool http2 = http_version == HttpVersion::Http2; @@ -373,12 +375,12 @@ void codecFuzz(const test::common::http::CodecImplFuzzTestCase& input, HttpVersi const Http2Settings server_http2settings{fromHttp2Settings(input.h2_settings().server())}; server = std::make_unique( server_connection, server_callbacks, stats_store, server_http2settings, - max_request_headers_kb, max_request_headers_count); + max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); } else { const Http1Settings server_http1settings{fromHttp1Settings(input.h1_settings().server())}; server = std::make_unique( server_connection, stats_store, server_callbacks, server_http1settings, - max_request_headers_kb, max_request_headers_count); + max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); } ReorderBuffer client_write_buf{*server}; diff --git a/test/common/http/conn_manager_impl_fuzz_test.cc b/test/common/http/conn_manager_impl_fuzz_test.cc index d64e24b3f1b78..820a0cb7fe967 100644 --- a/test/common/http/conn_manager_impl_fuzz_test.cc +++ b/test/common/http/conn_manager_impl_fuzz_test.cc @@ -130,6 +130,10 @@ class FuzzConfig : public ConnectionManagerConfig { const Http::Http1Settings& http1Settings() const override { return http1_settings_; } bool shouldNormalizePath() const override { return false; } bool shouldMergeSlashes() const override { return false; } + envoy::api::v2::core::HttpProtocolOptions::HeadersWithUnderscoresAction + headersWithUnderscoresAction() const override { + return envoy::api::v2::core::HttpProtocolOptions::ALLOW; + } const envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager config_; std::list access_logs_; diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index f137879b20c46..03b9a90aab590 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -167,6 +167,7 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan EXPECT_CALL(stream_, addCallbacks(_)) .WillOnce(Invoke( [&](Http::StreamCallbacks& callbacks) -> void { stream_callbacks_ = &callbacks; })); + EXPECT_CALL(stream_, setFlushTimeout(_)); EXPECT_CALL(stream_, bufferLimit()).WillOnce(Return(initial_buffer_limit_)); } @@ -315,6 +316,10 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan const Http::Http1Settings& http1Settings() const override { return http1_settings_; } bool shouldNormalizePath() const override { return normalize_path_; } bool shouldMergeSlashes() const override { return merge_slashes_; } + envoy::api::v2::core::HttpProtocolOptions::HeadersWithUnderscoresAction + headersWithUnderscoresAction() const override { + return headers_with_underscores_action_; + } DangerousDeprecatedTestTime test_time_; NiceMock route_config_provider_; @@ -369,6 +374,8 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan Http::Http1Settings http1_settings_; bool normalize_path_ = false; bool merge_slashes_ = false; + envoy::api::v2::core::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action_ = envoy::api::v2::core::HttpProtocolOptions::ALLOW; NiceMock upstream_conn_; // for websocket tests NiceMock conn_pool_; // for websocket tests diff --git a/test/common/http/conn_manager_utility_test.cc b/test/common/http/conn_manager_utility_test.cc index 315e35938a068..fba9aa542e0fe 100644 --- a/test/common/http/conn_manager_utility_test.cc +++ b/test/common/http/conn_manager_utility_test.cc @@ -91,6 +91,8 @@ class MockConnectionManagerConfig : public ConnectionManagerConfig { MOCK_CONST_METHOD0(http1Settings, const Http::Http1Settings&()); MOCK_CONST_METHOD0(shouldNormalizePath, bool()); MOCK_CONST_METHOD0(shouldMergeSlashes, bool()); + MOCK_CONST_METHOD0(headersWithUnderscoresAction, + envoy::api::v2::core::HttpProtocolOptions::HeadersWithUnderscoresAction()); std::unique_ptr internal_address_config_ = std::make_unique(); diff --git a/test/common/http/header_utility_test.cc b/test/common/http/header_utility_test.cc index 067e0a45f776d..cdbc796a0315c 100644 --- a/test/common/http/header_utility_test.cc +++ b/test/common/http/header_utility_test.cc @@ -455,13 +455,13 @@ TEST(HeaderIsValidTest, InvalidHeaderValuesAreRejected) { continue; } - EXPECT_FALSE(HeaderUtility::headerIsValid(std::string(1, i))); + EXPECT_FALSE(HeaderUtility::headerValueIsValid(std::string(1, i))); } } TEST(HeaderIsValidTest, ValidHeaderValuesAreAccepted) { - EXPECT_TRUE(HeaderUtility::headerIsValid("some-value")); - EXPECT_TRUE(HeaderUtility::headerIsValid("Some Other Value")); + EXPECT_TRUE(HeaderUtility::headerValueIsValid("some-value")); + EXPECT_TRUE(HeaderUtility::headerValueIsValid("Some Other Value")); } TEST(HeaderIsValidTest, AuthIsValid) { @@ -487,5 +487,13 @@ TEST(HeaderAddTest, HeaderAdd) { &headers); } +TEST(HeaderIsValidTest, HeaderNameContainsUnderscore) { + EXPECT_FALSE(HeaderUtility::headerNameContainsUnderscore("cookie")); + EXPECT_FALSE(HeaderUtility::headerNameContainsUnderscore("x-something")); + EXPECT_TRUE(HeaderUtility::headerNameContainsUnderscore("_cookie")); + EXPECT_TRUE(HeaderUtility::headerNameContainsUnderscore("cookie_")); + EXPECT_TRUE(HeaderUtility::headerNameContainsUnderscore("x_something")); +} + } // namespace Http } // namespace Envoy diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index 78e7b0acf6c92..ee3256290f822 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -46,9 +46,9 @@ std::string createHeaderFragment(int num_headers) { class Http1ServerConnectionImplTest : public testing::Test { public: void initialize() { - codec_ = - std::make_unique(connection_, store_, callbacks_, codec_settings_, - max_request_headers_kb_, max_request_headers_count_); + codec_ = std::make_unique( + connection_, store_, callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, headers_with_underscores_action_); } NiceMock connection_; @@ -84,6 +84,8 @@ class Http1ServerConnectionImplTest : public testing::Test { protected: uint32_t max_request_headers_kb_{Http::DEFAULT_MAX_REQUEST_HEADERS_KB}; uint32_t max_request_headers_count_{Http::DEFAULT_MAX_HEADERS_COUNT}; + envoy::api::v2::core::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action_{envoy::api::v2::core::HttpProtocolOptions::ALLOW}; Stats::IsolatedStoreImpl store_; }; @@ -96,9 +98,9 @@ void Http1ServerConnectionImplTest::expect400(Protocol p, bool allow_absolute_ur if (allow_absolute_url) { codec_settings_.allow_absolute_url_ = allow_absolute_url; - codec_ = - std::make_unique(connection_, store_, callbacks_, codec_settings_, - max_request_headers_kb_, max_request_headers_count_); + codec_ = std::make_unique( + connection_, store_, callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::api::v2::core::HttpProtocolOptions::ALLOW); } Http::MockStreamDecoder decoder; @@ -117,9 +119,9 @@ void Http1ServerConnectionImplTest::expectHeadersTest(Protocol p, bool allow_abs // Make a new 'codec' with the right settings if (allow_absolute_url) { codec_settings_.allow_absolute_url_ = allow_absolute_url; - codec_ = - std::make_unique(connection_, store_, callbacks_, codec_settings_, - max_request_headers_kb_, max_request_headers_count_); + codec_ = std::make_unique( + connection_, store_, callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::api::v2::core::HttpProtocolOptions::ALLOW); } Http::MockStreamDecoder decoder; @@ -569,6 +571,71 @@ TEST_F(Http1ServerConnectionImplTest, HeaderInvalidCharsRejection) { "http/1.1 protocol error: header value contains invalid chars"); } +// Ensures that request headers with names containing the underscore character are allowed +// when the option is set to allow. +TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAllowed) { + headers_with_underscores_action_ = envoy::api::v2::core::HttpProtocolOptions::ALLOW; + initialize(); + + Http::MockStreamDecoder decoder; + EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); + + TestHeaderMapImpl expected_headers{ + {":authority", "h.com"}, + {":path", "/"}, + {":method", "GET"}, + {"foo_bar", "bar"}, + }; + EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true)).Times(1); + + Buffer::OwnedImpl buffer(absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo_bar: bar\r\n\r\n")); + codec_->dispatch(buffer); + EXPECT_EQ(0U, buffer.length()); + EXPECT_EQ(0, store_.counter("http1.dropped_headers_with_underscores").value()); +} + +// Ensures that request headers with names containing the underscore character are dropped +// when the option is set to drop headers. +TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAreDropped) { + headers_with_underscores_action_ = envoy::api::v2::core::HttpProtocolOptions::DROP_HEADER; + initialize(); + + Http::MockStreamDecoder decoder; + EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); + + TestHeaderMapImpl expected_headers{ + {":authority", "h.com"}, + {":path", "/"}, + {":method", "GET"}, + }; + EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true)).Times(1); + + Buffer::OwnedImpl buffer(absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo_bar: bar\r\n\r\n")); + codec_->dispatch(buffer); + EXPECT_EQ(0U, buffer.length()); + EXPECT_EQ(1, store_.counter("http1.dropped_headers_with_underscores").value()); +} + +// Ensures that request with header names containing the underscore character are rejected +// when the option is set to reject request. +TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreCauseRequestRejected) { + headers_with_underscores_action_ = envoy::api::v2::core::HttpProtocolOptions::REJECT_REQUEST; + initialize(); + + Http::MockStreamDecoder decoder; + Http::StreamEncoder* response_encoder = nullptr; + EXPECT_CALL(callbacks_, newStream(_, _)) + .WillOnce(Invoke([&](Http::StreamEncoder& encoder, bool) -> Http::StreamDecoder& { + response_encoder = &encoder; + return decoder; + })); + + Buffer::OwnedImpl buffer(absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo_bar: bar\r\n\r\n")); + EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), EnvoyException, + "http/1.1 protocol error: header name contains underscores"); + EXPECT_EQ(1, store_.counter("http1.requests_rejected_with_underscores_in_headers").value()); +} + // Regression test for http-parser allowing embedded NULs in header values, // verify we reject them. TEST_F(Http1ServerConnectionImplTest, HeaderEmbeddedNulRejection) { @@ -1486,6 +1553,26 @@ TEST_F(Http1ClientConnectionImplTest, HighwatermarkMultipleResponses) { static_cast(codec_.get()) ->onUnderlyingConnectionBelowWriteBufferLowWatermark(); } + +TEST_F(Http1ServerConnectionImplTest, LargeRequestUrlRejected) { + initialize(); + + std::string exception_reason; + NiceMock decoder; + Http::StreamEncoder* response_encoder = nullptr; + EXPECT_CALL(callbacks_, newStream(_, _)) + .WillOnce(Invoke([&](StreamEncoder& encoder, bool) -> StreamDecoder& { + response_encoder = &encoder; + return decoder; + })); + + // Default limit of 60 KiB + std::string long_url = "/" + std::string(60 * 1024, 'q'); + Buffer::OwnedImpl buffer("GET " + long_url + " HTTP/1.1\r\n"); + + EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), EnvoyException, "headers size exceeds limit"); +} + TEST_F(Http1ServerConnectionImplTest, LargeRequestHeadersRejected) { // Default limit of 60 KiB std::string long_string = "big: " + std::string(60 * 1024, 'q') + "\r\n"; @@ -1568,8 +1655,24 @@ TEST_F(Http1ServerConnectionImplTest, ManyRequestHeadersAccepted) { testRequestHeadersAccepted(createHeaderFragment(150)); } -// Tests that response headers of 80 kB fails. -TEST_F(Http1ClientConnectionImplTest, LargeResponseHeadersRejected) { +// Tests that incomplete response headers of 80 kB header value fails. +TEST_F(Http1ClientConnectionImplTest, ResponseHeadersWithLargeValueRejected) { + initialize(); + + NiceMock response_decoder; + Http::StreamEncoder& request_encoder = codec_->newStream(response_decoder); + TestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; + request_encoder.encodeHeaders(headers, true); + + Buffer::OwnedImpl buffer("HTTP/1.1 200 OK\r\nContent-Length: 0\r\n"); + codec_->dispatch(buffer); + std::string long_header = "big: " + std::string(80 * 1024, 'q'); + buffer = Buffer::OwnedImpl(long_header); + EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), EnvoyException, "headers size exceeds limit"); +} + +// Tests that incomplete response headers with a 80 kB header field fails. +TEST_F(Http1ClientConnectionImplTest, ResponseHeadersWithLargeFieldRejected) { initialize(); NiceMock response_decoder; @@ -1579,7 +1682,7 @@ TEST_F(Http1ClientConnectionImplTest, LargeResponseHeadersRejected) { Buffer::OwnedImpl buffer("HTTP/1.1 200 OK\r\nContent-Length: 0\r\n"); codec_->dispatch(buffer); - std::string long_header = "big: " + std::string(80 * 1024, 'q') + "\r\n"; + std::string long_header = "bigfield" + std::string(80 * 1024, 'q'); buffer = Buffer::OwnedImpl(long_header); EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), EnvoyException, "headers size exceeds limit"); } diff --git a/test/common/http/http2/codec_impl_test.cc b/test/common/http/http2/codec_impl_test.cc index 87f20fe6509aa..d4f124b3469e1 100644 --- a/test/common/http/http2/codec_impl_test.cc +++ b/test/common/http/http2/codec_impl_test.cc @@ -59,18 +59,37 @@ class Http2CodecImplTestFixture { }; Http2CodecImplTestFixture(Http2SettingsTuple client_settings, Http2SettingsTuple server_settings) - : client_settings_(client_settings), server_settings_(server_settings) {} - virtual ~Http2CodecImplTestFixture() = default; + : client_settings_(client_settings), server_settings_(server_settings) { + // Make sure we explicitly test for stream flush timer creation. + EXPECT_CALL(client_connection_.dispatcher_, createTimer_(_)).Times(0); + EXPECT_CALL(server_connection_.dispatcher_, createTimer_(_)).Times(0); + } + virtual ~Http2CodecImplTestFixture() { + client_connection_.dispatcher_.clearDeferredDeleteList(); + if (client_ != nullptr) { + client_.reset(); + EXPECT_EQ(0, TestUtility::findGauge(client_stats_store_, "http2.streams_active")->value()); + EXPECT_EQ(0, + TestUtility::findGauge(client_stats_store_, "http2.pending_send_bytes")->value()); + } + server_connection_.dispatcher_.clearDeferredDeleteList(); + if (server_ != nullptr) { + server_.reset(); + EXPECT_EQ(0, TestUtility::findGauge(server_stats_store_, "http2.streams_active")->value()); + EXPECT_EQ(0, + TestUtility::findGauge(server_stats_store_, "http2.pending_send_bytes")->value()); + } + } virtual void initialize() { Http2SettingsFromTuple(client_http2settings_, client_settings_); Http2SettingsFromTuple(server_http2settings_, server_settings_); client_ = std::make_unique( - client_connection_, client_callbacks_, stats_store_, client_http2settings_, + client_connection_, client_callbacks_, client_stats_store_, client_http2settings_, max_request_headers_kb_, max_response_headers_count_); server_ = std::make_unique( - server_connection_, server_callbacks_, stats_store_, server_http2settings_, - max_request_headers_kb_, max_request_headers_count_); + server_connection_, server_callbacks_, server_stats_store_, server_http2settings_, + max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); request_encoder_ = &client_->newStream(response_decoder_); setupDefaultConnectionMocks(); @@ -79,6 +98,7 @@ class Http2CodecImplTestFixture { .WillRepeatedly(Invoke([&](StreamEncoder& encoder, bool) -> StreamDecoder& { response_encoder_ = &encoder; encoder.getStream().addCallbacks(server_stream_callbacks_); + encoder.getStream().setFlushTimeout(std::chrono::milliseconds(30000)); return request_decoder_; })); } @@ -136,12 +156,13 @@ class Http2CodecImplTestFixture { const Http2SettingsTuple server_settings_; bool allow_metadata_ = false; bool stream_error_on_invalid_http_messaging_ = false; - Stats::IsolatedStoreImpl stats_store_; + Stats::IsolatedStoreImpl client_stats_store_; Http2Settings client_http2settings_; NiceMock client_connection_; MockConnectionCallbacks client_callbacks_; std::unique_ptr client_; ConnectionWrapper client_wrapper_; + Stats::IsolatedStoreImpl server_stats_store_; Http2Settings server_http2settings_; NiceMock server_connection_; MockServerConnectionCallbacks server_callbacks_; @@ -166,6 +187,8 @@ class Http2CodecImplTestFixture { Http2Settings::DEFAULT_MAX_INBOUND_PRIORITY_FRAMES_PER_STREAM; uint32_t max_inbound_window_update_frames_per_data_frame_sent_ = Http2Settings::DEFAULT_MAX_INBOUND_WINDOW_UPDATE_FRAMES_PER_DATA_FRAME_SENT; + envoy::api::v2::core::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action_{envoy::api::v2::core::HttpProtocolOptions::ALLOW}; }; class Http2CodecImplTest : public ::testing::TestWithParam, @@ -284,7 +307,7 @@ TEST_P(Http2CodecImplTest, InvalidContinueWithFin) { TestHeaderMapImpl continue_headers{{":status", "100"}}; EXPECT_THROW(response_encoder_->encodeHeaders(continue_headers, true), CodecProtocolException); - EXPECT_EQ(1, stats_store_.counter("http2.rx_messaging_error").value()); + EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); } TEST_P(Http2CodecImplTest, InvalidContinueWithFinAllowed) { @@ -312,7 +335,7 @@ TEST_P(Http2CodecImplTest, InvalidContinueWithFinAllowed) { setupDefaultConnectionMocks(); client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); - EXPECT_EQ(1, stats_store_.counter("http2.rx_messaging_error").value()); + EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); } TEST_P(Http2CodecImplTest, InvalidRepeatContinue) { @@ -328,7 +351,7 @@ TEST_P(Http2CodecImplTest, InvalidRepeatContinue) { response_encoder_->encode100ContinueHeaders(continue_headers); EXPECT_THROW(response_encoder_->encodeHeaders(continue_headers, true), CodecProtocolException); - EXPECT_EQ(1, stats_store_.counter("http2.rx_messaging_error").value()); + EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); }; TEST_P(Http2CodecImplTest, InvalidRepeatContinueAllowed) { @@ -359,7 +382,7 @@ TEST_P(Http2CodecImplTest, InvalidRepeatContinueAllowed) { setupDefaultConnectionMocks(); client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); - EXPECT_EQ(1, stats_store_.counter("http2.rx_messaging_error").value()); + EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); }; TEST_P(Http2CodecImplTest, Invalid103) { @@ -380,7 +403,7 @@ TEST_P(Http2CodecImplTest, Invalid103) { EXPECT_THROW_WITH_MESSAGE(response_encoder_->encodeHeaders(early_hint_headers, false), CodecProtocolException, "Unexpected 'trailers' with no end stream."); - EXPECT_EQ(1, stats_store_.counter("http2.too_many_header_frames").value()); + EXPECT_EQ(1, client_stats_store_.counter("http2.too_many_header_frames").value()); } TEST_P(Http2CodecImplTest, Invalid204WithContentLength) { @@ -401,7 +424,7 @@ TEST_P(Http2CodecImplTest, Invalid204WithContentLength) { } EXPECT_THROW(response_encoder_->encodeHeaders(response_headers, false), CodecProtocolException); - EXPECT_EQ(1, stats_store_.counter("http2.rx_messaging_error").value()); + EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); }; TEST_P(Http2CodecImplTest, Invalid204WithContentLengthAllowed) { @@ -438,7 +461,7 @@ TEST_P(Http2CodecImplTest, Invalid204WithContentLengthAllowed) { setupDefaultConnectionMocks(); client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); - EXPECT_EQ(1, stats_store_.counter("http2.rx_messaging_error").value()); + EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); }; TEST_P(Http2CodecImplTest, RefusedStreamReset) { @@ -461,7 +484,7 @@ TEST_P(Http2CodecImplTest, InvalidHeadersFrame) { initialize(); EXPECT_THROW(request_encoder_->encodeHeaders(TestHeaderMapImpl{}, true), CodecProtocolException); - EXPECT_EQ(1, stats_store_.counter("http2.rx_messaging_error").value()); + EXPECT_EQ(1, server_stats_store_.counter("http2.rx_messaging_error").value()); } TEST_P(Http2CodecImplTest, InvalidHeadersFrameAllowed) { @@ -523,7 +546,7 @@ TEST_P(Http2CodecImplTest, TrailingHeaders) { response_encoder_->encodeTrailers(TestHeaderMapImpl{{"trailing", "header"}}); } -TEST_P(Http2CodecImplTest, TrailingHeadersLargeBody) { +TEST_P(Http2CodecImplTest, TrailingHeadersLargeClientBody) { initialize(); // Buffer server data so we can make sure we don't get any window updates. @@ -538,11 +561,11 @@ TEST_P(Http2CodecImplTest, TrailingHeadersLargeBody) { EXPECT_CALL(request_decoder_, decodeData(_, false)).Times(AtLeast(1)); Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); request_encoder_->encodeData(body, false); - EXPECT_CALL(request_decoder_, decodeTrailers_(_)); request_encoder_->encodeTrailers(TestHeaderMapImpl{{"trailing", "header"}}); // Flush pending data. setupDefaultConnectionMocks(); + EXPECT_CALL(request_decoder_, decodeTrailers_(_)); server_wrapper_.dispatch(Buffer::OwnedImpl(), *server_); TestHeaderMapImpl response_headers{{":status", "200"}}; @@ -695,8 +718,11 @@ TEST_P(Http2CodecImplDeferredResetTest, DeferredResetServer) { response_encoder_->encodeHeaders(response_headers, false); Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); EXPECT_CALL(server_stream_callbacks_, onAboveWriteBufferHighWatermark()).Times(AnyNumber()); + auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_); + EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _)); response_encoder_->encodeData(body, true); EXPECT_CALL(server_stream_callbacks_, onResetStream(StreamResetReason::LocalReset, _)); + EXPECT_CALL(*flush_timer, disableTimer()); response_encoder_->getStream().resetStream(StreamResetReason::LocalReset); MockStreamCallbacks client_stream_callbacks; @@ -730,6 +756,8 @@ TEST_P(Http2CodecImplFlowControlTest, TestFlowControlInPendingSendData) { // Force the server stream to be read disabled. This will cause it to stop sending window // updates to the client. server_->getStream(1)->readDisable(true); + EXPECT_EQ(1, TestUtility::findGauge(client_stats_store_, "http2.streams_active")->value()); + EXPECT_EQ(1, TestUtility::findGauge(server_stats_store_, "http2.streams_active")->value()); uint32_t initial_stream_window = nghttp2_session_get_stream_effective_local_window_size(client_->session(), 1); @@ -755,6 +783,8 @@ TEST_P(Http2CodecImplFlowControlTest, TestFlowControlInPendingSendData) { Buffer::OwnedImpl more_long_data(std::string(initial_stream_window, 'a')); request_encoder_->encodeData(more_long_data, false); EXPECT_EQ(initial_stream_window, client_->getStream(1)->pending_send_data_.length()); + EXPECT_EQ(initial_stream_window, + TestUtility::findGauge(client_stats_store_, "http2.pending_send_bytes")->value()); EXPECT_EQ(initial_stream_window, server_->getStream(1)->unconsumed_bytes_); // If we go over the limit, the stream callbacks should fire. @@ -762,6 +792,8 @@ TEST_P(Http2CodecImplFlowControlTest, TestFlowControlInPendingSendData) { Buffer::OwnedImpl last_byte("!"); request_encoder_->encodeData(last_byte, false); EXPECT_EQ(initial_stream_window + 1, client_->getStream(1)->pending_send_data_.length()); + EXPECT_EQ(initial_stream_window + 1, + TestUtility::findGauge(client_stats_store_, "http2.pending_send_bytes")->value()); // Now create a second stream on the connection. MockStreamDecoder response_decoder2; @@ -805,6 +837,7 @@ TEST_P(Http2CodecImplFlowControlTest, TestFlowControlInPendingSendData) { EXPECT_CALL(callbacks3, onBelowWriteBufferLowWatermark()); server_->getStream(1)->readDisable(false); EXPECT_EQ(0, client_->getStream(1)->pending_send_data_.length()); + EXPECT_EQ(0, TestUtility::findGauge(client_stats_store_, "http2.pending_send_bytes")->value()); // The extra 1 byte sent won't trigger another window update, so the final window should be the // initial window minus the last 1 byte flush from the client to server. EXPECT_EQ(initial_stream_window - 1, @@ -897,6 +930,142 @@ TEST_P(Http2CodecImplFlowControlTest, FlowControlPendingRecvData) { request_encoder_->encodeData(data, false); } +// Verify that we create and disable the stream flush timer when trailers follow a stream that +// does not have enough window. +TEST_P(Http2CodecImplFlowControlTest, TrailingHeadersLargeServerBody) { + initialize(); + + InSequence s; + TestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + ON_CALL(client_connection_, write(_, _)) + .WillByDefault( + Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); })); + TestHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); + response_encoder_->encodeHeaders(response_headers, false); + EXPECT_CALL(server_stream_callbacks_, onAboveWriteBufferHighWatermark()); + EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1)); + auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_); + EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _)); + Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); + response_encoder_->encodeData(body, false); + response_encoder_->encodeTrailers(TestHeaderMapImpl{{"trailing", "header"}}); + + // Send window updates from the client. + setupDefaultConnectionMocks(); + EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1)); + EXPECT_CALL(response_decoder_, decodeTrailers_(_)); + EXPECT_CALL(*flush_timer, disableTimer()); + server_wrapper_.dispatch(Buffer::OwnedImpl(), *server_); + EXPECT_EQ(0, server_stats_store_.counter("http2.tx_flush_timeout").value()); +} + +// Verify that we create and handle the stream flush timeout when trailers follow a stream that +// does not have enough window. +TEST_P(Http2CodecImplFlowControlTest, TrailingHeadersLargeServerBodyFlushTimeout) { + initialize(); + + InSequence s; + MockStreamCallbacks client_stream_callbacks; + request_encoder_->getStream().addCallbacks(client_stream_callbacks); + TestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + ON_CALL(client_connection_, write(_, _)) + .WillByDefault( + Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); })); + TestHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); + response_encoder_->encodeHeaders(response_headers, false); + EXPECT_CALL(server_stream_callbacks_, onAboveWriteBufferHighWatermark()); + EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1)); + auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_); + EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _)); + Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); + response_encoder_->encodeData(body, false); + response_encoder_->encodeTrailers(TestHeaderMapImpl{{"trailing", "header"}}); + + // Invoke a stream flush timeout. Make sure we don't get a reset locally for higher layers but + // we do get a reset on the client. + EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); + EXPECT_CALL(client_stream_callbacks, onResetStream(StreamResetReason::RemoteReset, _)); + flush_timer->invokeCallback(); + EXPECT_EQ(1, server_stats_store_.counter("http2.tx_flush_timeout").value()); +} + +// Verify that we create and handle the stream flush timeout when there is a large body that +// does not have enough window. +TEST_P(Http2CodecImplFlowControlTest, LargeServerBodyFlushTimeout) { + initialize(); + + InSequence s; + MockStreamCallbacks client_stream_callbacks; + request_encoder_->getStream().addCallbacks(client_stream_callbacks); + TestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + ON_CALL(client_connection_, write(_, _)) + .WillByDefault( + Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); })); + TestHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); + response_encoder_->encodeHeaders(response_headers, false); + EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1)); + auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_); + EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _)); + Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); + response_encoder_->encodeData(body, true); + + // Invoke a stream flush timeout. Make sure we don't get a reset locally for higher layers but + // we do get a reset on the client. + EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); + EXPECT_CALL(client_stream_callbacks, onResetStream(StreamResetReason::RemoteReset, _)); + flush_timer->invokeCallback(); + EXPECT_EQ(1, server_stats_store_.counter("http2.tx_flush_timeout").value()); +} + +// Verify that when an incoming protocol error races with a stream flush timeout we correctly +// disable the flush timeout and do not attempt to reset the stream. +TEST_P(Http2CodecImplFlowControlTest, LargeServerBodyFlushTimeoutAfterGoaway) { + initialize(); + + InSequence s; + MockStreamCallbacks client_stream_callbacks; + request_encoder_->getStream().addCallbacks(client_stream_callbacks); + TestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + ON_CALL(client_connection_, write(_, _)) + .WillByDefault( + Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); })); + TestHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); + response_encoder_->encodeHeaders(response_headers, false); + EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1)); + auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_); + EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _)); + Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); + response_encoder_->encodeData(body, true); + + // Force a protocol error. + Buffer::OwnedImpl garbage_data("this should cause a protocol error"); + EXPECT_CALL(client_callbacks_, onGoAway()); + EXPECT_CALL(*flush_timer, disableTimer()); + EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); + EXPECT_THROW(server_wrapper_.dispatch(garbage_data, *server_), CodecProtocolException); + EXPECT_EQ(0, server_stats_store_.counter("http2.tx_flush_timeout").value()); +} + TEST_P(Http2CodecImplTest, WatermarkUnderEndStream) { initialize(); MockStreamCallbacks callbacks; @@ -951,11 +1120,11 @@ TEST_P(Http2CodecImplStreamLimitTest, MaxClientStreams) { Http2SettingsFromTuple(client_http2settings_, ::testing::get<0>(GetParam())); Http2SettingsFromTuple(server_http2settings_, ::testing::get<1>(GetParam())); client_ = std::make_unique( - client_connection_, client_callbacks_, stats_store_, client_http2settings_, + client_connection_, client_callbacks_, client_stats_store_, client_http2settings_, max_request_headers_kb_, max_response_headers_count_); server_ = std::make_unique( - server_connection_, server_callbacks_, stats_store_, server_http2settings_, - max_request_headers_kb_, max_request_headers_count_); + server_connection_, server_callbacks_, server_stats_store_, server_http2settings_, + max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); for (int i = 0; i < 101; ++i) { request_encoder_ = &client_->newStream(response_decoder_); @@ -1090,6 +1259,53 @@ TEST_P(Http2CodecImplTest, LargeRequestHeadersAccepted) { request_encoder_->encodeHeaders(request_headers, false); } +// Tests request headers with name containing underscore are dropped when the option is set to drop +// header. +TEST_P(Http2CodecImplTest, HeaderNameWithUnderscoreAreDropped) { + headers_with_underscores_action_ = envoy::api::v2::core::HttpProtocolOptions::DROP_HEADER; + initialize(); + + TestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + TestHeaderMapImpl expected_headers(request_headers); + request_headers.addCopy("bad_header", "something"); + EXPECT_CALL(request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), _)); + request_encoder_->encodeHeaders(request_headers, false); + EXPECT_EQ(1, server_stats_store_.counter("http2.dropped_headers_with_underscores").value()); +} + +// Tests that request with header names containing underscore are rejected when the option is set to +// reject request. +TEST_P(Http2CodecImplTest, HeaderNameWithUnderscoreAreRejectedByDefault) { + headers_with_underscores_action_ = envoy::api::v2::core::HttpProtocolOptions::REJECT_REQUEST; + initialize(); + + TestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + request_headers.addCopy("bad_header", "something"); + EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(1); + request_encoder_->encodeHeaders(request_headers, false); + EXPECT_EQ( + 1, + server_stats_store_.counter("http2.requests_rejected_with_underscores_in_headers").value()); +} + +// Tests request headers with name containing underscore are allowed when the option is set to +// allow. +TEST_P(Http2CodecImplTest, HeaderNameWithUnderscoreAllowed) { + headers_with_underscores_action_ = envoy::api::v2::core::HttpProtocolOptions::ALLOW; + initialize(); + + TestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + request_headers.addCopy("bad_header", "something"); + TestHeaderMapImpl expected_headers(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), _)); + EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); + request_encoder_->encodeHeaders(request_headers, false); + EXPECT_EQ(0, server_stats_store_.counter("http2.dropped_headers_with_underscores").value()); +} + // This is the HTTP/2 variant of the HTTP/1 regression test for CVE-2019-18801. // Large method headers should not trigger ASSERTs or ASAN. The underlying issue // in CVE-2019-18801 only affected the HTTP/1 encoder, but we include a test @@ -1302,7 +1518,7 @@ TEST_P(Http2CodecImplTest, PingFlood) { EXPECT_THROW(client_->sendPendingFrames(), FrameFloodException); EXPECT_EQ(ack_count, Http2Settings::DEFAULT_MAX_OUTBOUND_CONTROL_FRAMES); - EXPECT_EQ(1, stats_store_.counter("http2.outbound_control_flood").value()); + EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_control_flood").value()); } // Verify that codec allows PING flood when mitigation is disabled @@ -1329,7 +1545,10 @@ TEST_P(Http2CodecImplTest, PingFloodMitigationDisabled) { // Verify that outbound control frame counter decreases when send buffer is drained TEST_P(Http2CodecImplTest, PingFloodCounterReset) { - static const int kMaxOutboundControlFrames = 100; + // Ping frames are 17 bytes each so 237 full frames and a partial frame fit in the current min + // size for buffer slices. Setting the limit to 2x+1 the number that fits in a single slice allows + // the logic below that verifies drain and overflow thresholds. + static const int kMaxOutboundControlFrames = 475; max_outbound_control_frames_ = kMaxOutboundControlFrames; initialize(); @@ -1354,15 +1573,16 @@ TEST_P(Http2CodecImplTest, PingFloodCounterReset) { EXPECT_NO_THROW(client_->sendPendingFrames()); EXPECT_EQ(ack_count, kMaxOutboundControlFrames); - // Drain kMaxOutboundFrames / 2 slices from the send buffer + // Drain floor(kMaxOutboundFrames / 2) slices from the send buffer buffer.drain(buffer.length() / 2); - // Send kMaxOutboundFrames / 2 more pings. + // Send floor(kMaxOutboundFrames / 2) more pings. for (int i = 0; i < kMaxOutboundControlFrames / 2; ++i) { EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); } // The number of outbound frames should be half of max so the connection should not be terminated. EXPECT_NO_THROW(client_->sendPendingFrames()); + EXPECT_EQ(ack_count, kMaxOutboundControlFrames + kMaxOutboundControlFrames / 2); // 1 more ping frame should overflow the outbound frame limit. EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); @@ -1396,7 +1616,7 @@ TEST_P(Http2CodecImplTest, ResponseHeadersFlood) { EXPECT_THROW(client_->sendPendingFrames(), FrameFloodException); EXPECT_EQ(frame_count, Http2Settings::DEFAULT_MAX_OUTBOUND_FRAMES + 1); - EXPECT_EQ(1, stats_store_.counter("http2.outbound_flood").value()); + EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_flood").value()); } // Verify that codec detects flood of outbound DATA frames @@ -1429,7 +1649,7 @@ TEST_P(Http2CodecImplTest, ResponseDataFlood) { EXPECT_THROW(client_->sendPendingFrames(), FrameFloodException); EXPECT_EQ(frame_count, Http2Settings::DEFAULT_MAX_OUTBOUND_FRAMES + 1); - EXPECT_EQ(1, stats_store_.counter("http2.outbound_flood").value()); + EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_flood").value()); } // Verify that codec allows outbound DATA flood when mitigation is disabled @@ -1533,7 +1753,7 @@ TEST_P(Http2CodecImplTest, PingStacksWithDataFlood) { EXPECT_THROW(client_->sendPendingFrames(), FrameFloodException); EXPECT_EQ(frame_count, Http2Settings::DEFAULT_MAX_OUTBOUND_FRAMES); - EXPECT_EQ(1, stats_store_.counter("http2.outbound_flood").value()); + EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_flood").value()); } TEST_P(Http2CodecImplTest, PriorityFlood) { diff --git a/test/common/http/http2/codec_impl_test_util.h b/test/common/http/http2/codec_impl_test_util.h index 67521db0fec86..2370e541ec3c9 100644 --- a/test/common/http/http2/codec_impl_test_util.h +++ b/test/common/http/http2/codec_impl_test_util.h @@ -10,9 +10,11 @@ class TestServerConnectionImpl : public ServerConnectionImpl { public: TestServerConnectionImpl(Network::Connection& connection, ServerConnectionCallbacks& callbacks, Stats::Scope& scope, const Http2Settings& http2_settings, - uint32_t max_request_headers_kb, uint32_t max_request_headers_count) + uint32_t max_request_headers_kb, uint32_t max_request_headers_count, + envoy::api::v2::core::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action) : ServerConnectionImpl(connection, callbacks, scope, http2_settings, max_request_headers_kb, - max_request_headers_count) {} + max_request_headers_count, headers_with_underscores_action) {} nghttp2_session* session() { return session_; } using ServerConnectionImpl::getStream; }; diff --git a/test/common/http/http2/frame_replay_test.cc b/test/common/http/http2/frame_replay_test.cc index af022412b8a08..d211b3ae431f3 100644 --- a/test/common/http/http2/frame_replay_test.cc +++ b/test/common/http/http2/frame_replay_test.cc @@ -58,7 +58,8 @@ TEST_F(RequestFrameCommentTest, SimpleExampleHuffman) { ServerCodecFrameInjector codec; TestServerConnectionImpl connection( codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.settings_, - Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT); + Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, + envoy::api::v2::core::HttpProtocolOptions::ALLOW); codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection); codec.write(WellKnownFrames::defaultSettingsFrame(), connection); codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); @@ -134,7 +135,8 @@ TEST_F(RequestFrameCommentTest, SimpleExamplePlain) { ServerCodecFrameInjector codec; TestServerConnectionImpl connection( codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.settings_, - Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT); + Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, + envoy::api::v2::core::HttpProtocolOptions::ALLOW); codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection); codec.write(WellKnownFrames::defaultSettingsFrame(), connection); codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); @@ -197,7 +199,8 @@ TEST_F(RequestFrameCommentTest, SingleByteNulCrLfInHeaderFrame) { ServerCodecFrameInjector codec; TestServerConnectionImpl connection( codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.settings_, - Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT); + Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, + envoy::api::v2::core::HttpProtocolOptions::ALLOW); codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection); codec.write(WellKnownFrames::defaultSettingsFrame(), connection); codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); @@ -265,7 +268,8 @@ TEST_F(RequestFrameCommentTest, SingleByteNulCrLfInHeaderField) { ServerCodecFrameInjector codec; TestServerConnectionImpl connection( codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.settings_, - Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT); + Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, + envoy::api::v2::core::HttpProtocolOptions::ALLOW); codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection); codec.write(WellKnownFrames::defaultSettingsFrame(), connection); codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); diff --git a/test/common/http/http2/request_header_fuzz_test.cc b/test/common/http/http2/request_header_fuzz_test.cc index 440014afb648b..b61699c524e85 100644 --- a/test/common/http/http2/request_header_fuzz_test.cc +++ b/test/common/http/http2/request_header_fuzz_test.cc @@ -18,7 +18,8 @@ void Replay(const Frame& frame, ServerCodecFrameInjector& codec) { // Create the server connection containing the nghttp2 session. TestServerConnectionImpl connection( codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.settings_, - Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT); + Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, + envoy::api::v2::core::HttpProtocolOptions::ALLOW); codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection); codec.write(WellKnownFrames::defaultSettingsFrame(), connection); codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); diff --git a/test/common/http/path_utility_test.cc b/test/common/http/path_utility_test.cc index 3ee558a2633e7..140be67631eb8 100644 --- a/test/common/http/path_utility_test.cc +++ b/test/common/http/path_utility_test.cc @@ -107,5 +107,25 @@ TEST_F(PathUtilityTest, MergeSlashes) { EXPECT_EQ("/a/b?", mergeSlashes("/a//b?")); // empty query } +TEST_F(PathUtilityTest, RemoveQueryAndFragment) { + EXPECT_EQ("", PathUtil::removeQueryAndFragment("")); + EXPECT_EQ("/abc", PathUtil::removeQueryAndFragment("/abc")); + EXPECT_EQ("/abc", PathUtil::removeQueryAndFragment("/abc?")); + EXPECT_EQ("/abc", PathUtil::removeQueryAndFragment("/abc?param=value")); + EXPECT_EQ("/abc", PathUtil::removeQueryAndFragment("/abc?param=value1¶m=value2")); + EXPECT_EQ("/abc", PathUtil::removeQueryAndFragment("/abc??")); + EXPECT_EQ("/abc", PathUtil::removeQueryAndFragment("/abc??param=value")); + EXPECT_EQ("/abc", PathUtil::removeQueryAndFragment("/abc#")); + EXPECT_EQ("/abc", PathUtil::removeQueryAndFragment("/abc#fragment")); + EXPECT_EQ("/abc", PathUtil::removeQueryAndFragment("/abc#fragment?param=value")); + EXPECT_EQ("/abc", PathUtil::removeQueryAndFragment("/abc##")); + EXPECT_EQ("/abc", PathUtil::removeQueryAndFragment("/abc#?")); + EXPECT_EQ("/abc", PathUtil::removeQueryAndFragment("/abc#?param=value")); + EXPECT_EQ("/abc", PathUtil::removeQueryAndFragment("/abc?#")); + EXPECT_EQ("/abc", PathUtil::removeQueryAndFragment("/abc?#fragment")); + EXPECT_EQ("/abc", PathUtil::removeQueryAndFragment("/abc?param=value#")); + EXPECT_EQ("/abc", PathUtil::removeQueryAndFragment("/abc?param=value#fragment")); +} + } // namespace Http } // namespace Envoy diff --git a/test/common/network/BUILD b/test/common/network/BUILD index ad449c8601a7a..17bda98208529 100644 --- a/test/common/network/BUILD +++ b/test/common/network/BUILD @@ -25,6 +25,7 @@ envoy_cc_test_library( "//test/test_common:environment_lib", "//test/test_common:network_utility_lib", "//test/test_common:simulated_time_system_lib", + "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", ], ) @@ -125,11 +126,11 @@ envoy_cc_test( "//test/mocks/buffer:buffer_mocks", "//test/mocks/network:network_mocks", "//test/mocks/ratelimit:ratelimit_mocks", - "//test/mocks/runtime:runtime_mocks", "//test/mocks/server:server_mocks", "//test/mocks/tracing:tracing_mocks", "//test/mocks/upstream:host_mocks", "//test/mocks/upstream:upstream_mocks", + "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", ], ) @@ -171,6 +172,7 @@ envoy_cc_test( "//source/common/stats:stats_lib", "//test/common/network:listener_impl_test_base_lib", "//test/mocks/network:network_mocks", + "//test/mocks/runtime:runtime_mocks", "//test/mocks/server:server_mocks", "//test/test_common:environment_lib", "//test/test_common:network_utility_lib", diff --git a/test/common/network/dns_impl_test.cc b/test/common/network/dns_impl_test.cc index c9bcd1468773e..fa1de4e07862e 100644 --- a/test/common/network/dns_impl_test.cc +++ b/test/common/network/dns_impl_test.cc @@ -266,6 +266,8 @@ class TestDnsServer : public ListenerCallbacks { queries_.emplace_back(query); } + void onReject() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } + void addHosts(const std::string& hostname, const IpList& ip, const RecordType& type) { if (type == RecordType::A) { hosts_a_[hostname] = ip; diff --git a/test/common/network/listener_impl_test.cc b/test/common/network/listener_impl_test.cc index 8c5fe2738efe8..ac8b2152a9c63 100644 --- a/test/common/network/listener_impl_test.cc +++ b/test/common/network/listener_impl_test.cc @@ -7,6 +7,7 @@ #include "test/mocks/server/mocks.h" #include "test/test_common/environment.h" #include "test/test_common/network_utility.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -134,6 +135,72 @@ TEST_P(ListenerImplTest, UseActualDst) { dispatcher_->run(Event::Dispatcher::RunType::Block); } +TEST_P(ListenerImplTest, GlobalConnectionLimitEnforcement) { + // Required to manipulate runtime values when there is no test server. + TestScopedRuntime scoped_runtime; + + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"overload.global_downstream_max_connections", "2"}}); + Network::TcpListenSocket socket(Network::Test::getCanonicalLoopbackAddress(version_), nullptr, + true); + Network::MockListenerCallbacks listener_callbacks; + Network::MockConnectionHandler connection_handler; + Network::ListenerPtr listener = dispatcher_->createListener(socket, listener_callbacks, true); + + std::vector client_connections; + std::vector server_connections; + StreamInfo::StreamInfoImpl stream_info(dispatcher_->timeSource()); + EXPECT_CALL(listener_callbacks, onAccept_(_)) + .WillRepeatedly(Invoke([&](Network::ConnectionSocketPtr& accepted_socket) -> void { + server_connections.emplace_back(dispatcher_->createServerConnection( + std::move(accepted_socket), Network::Test::createRawBufferSocket())); + dispatcher_->exit(); + })); + + auto initiate_connections = [&](const int count) { + for (int i = 0; i < count; ++i) { + client_connections.emplace_back(dispatcher_->createClientConnection( + socket.localAddress(), Network::Address::InstanceConstSharedPtr(), + Network::Test::createRawBufferSocket(), nullptr)); + client_connections.back()->connect(); + } + }; + + initiate_connections(5); + EXPECT_CALL(listener_callbacks, onReject()).Times(3); + dispatcher_->run(Event::Dispatcher::RunType::Block); + + // We expect any server-side connections that get created to populate 'server_connections'. + EXPECT_EQ(2, server_connections.size()); + + // Let's increase the allowed connections and try sending more connections. + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"overload.global_downstream_max_connections", "3"}}); + initiate_connections(5); + EXPECT_CALL(listener_callbacks, onReject()).Times(4); + dispatcher_->run(Event::Dispatcher::RunType::Block); + + EXPECT_EQ(3, server_connections.size()); + + // Clear the limit and verify there's no longer a limit. + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"overload.global_downstream_max_connections", ""}}); + initiate_connections(10); + dispatcher_->run(Event::Dispatcher::RunType::Block); + + EXPECT_EQ(13, server_connections.size()); + + for (const auto& conn : client_connections) { + conn->close(ConnectionCloseType::NoFlush); + } + for (const auto& conn : server_connections) { + conn->close(ConnectionCloseType::NoFlush); + } + + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"overload.global_downstream_max_connections", ""}}); +} + TEST_P(ListenerImplTest, WildcardListenerUseActualDst) { Network::TcpListenSocket socket(Network::Test::getAnyAddress(version_), nullptr, true); Network::MockListenerCallbacks listener_callbacks; diff --git a/test/config/utility.cc b/test/config/utility.cc index 29717682558e6..58e1d400c58a9 100644 --- a/test/config/utility.cc +++ b/test/config/utility.cc @@ -330,6 +330,22 @@ void ConfigHelper::applyConfigModifiers() { config_modifiers_.clear(); } +void ConfigHelper::addRuntimeOverride(const std::string& key, const std::string& value) { + if (bootstrap_.mutable_layered_runtime()->layers_size() == 0) { + auto* static_layer = bootstrap_.mutable_layered_runtime()->add_layers(); + static_layer->set_name("static_layer"); + static_layer->mutable_static_layer(); + auto* admin_layer = bootstrap_.mutable_layered_runtime()->add_layers(); + admin_layer->set_name("admin"); + admin_layer->mutable_admin_layer(); + } + auto* static_layer = + bootstrap_.mutable_layered_runtime()->mutable_layers(0)->mutable_static_layer(); + ProtobufWkt::Value string_value; + string_value.set_string_value(value); + (*static_layer->mutable_fields())[std::string(key)] = std::move(string_value); +} + void ConfigHelper::finalize(const std::vector& ports) { RELEASE_ASSERT(!finalized_, ""); diff --git a/test/config/utility.h b/test/config/utility.h index 8359fa0edeb1b..ee7ae27c65118 100644 --- a/test/config/utility.h +++ b/test/config/utility.h @@ -169,6 +169,9 @@ class ConfigHelper { // configuration generated in ConfigHelper::finalize. void skipPortUsageValidation() { skip_port_usage_validation_ = true; } + // Add this key value pair to the static runtime. + void addRuntimeOverride(const std::string& key, const std::string& value); + private: // Load the first HCM struct from the first listener into a parsed proto. bool loadHttpConnectionManager( diff --git a/test/coverage/gen_build.sh b/test/coverage/gen_build.sh index fb79e074745ad..a058b1686b1d4 100755 --- a/test/coverage/gen_build.sh +++ b/test/coverage/gen_build.sh @@ -35,11 +35,6 @@ for target in ${COVERAGE_TARGETS}; do TARGETS="$TARGETS $("${BAZEL_BIN}" query ${BAZEL_QUERY_OPTIONS} "attr('tags', 'coverage_test_lib', ${REPOSITORY}${target})" | grep "^//")" done -# Run the QUICHE platform api tests for coverage. -if [[ "${COVERAGE_TARGETS}" == "//test/..." ]]; then - TARGETS="$TARGETS $("${BAZEL_BIN}" query ${BAZEL_QUERY_OPTIONS} "attr('tags', 'coverage_test_lib', '@com_googlesource_quiche//:all')" | grep "^@com_googlesource_quiche")" -fi - if [ -n "${EXTRA_QUERY_PATHS}" ]; then TARGETS="$TARGETS $("${BAZEL_BIN}" query ${BAZEL_QUERY_OPTIONS} "attr('tags', 'coverage_test_lib', ${EXTRA_QUERY_PATHS})" | grep "^//")" fi diff --git a/test/extensions/filters/common/rbac/matchers_test.cc b/test/extensions/filters/common/rbac/matchers_test.cc index 699b53d8b4e76..67ccf0d3f0e1d 100644 --- a/test/extensions/filters/common/rbac/matchers_test.cc +++ b/test/extensions/filters/common/rbac/matchers_test.cc @@ -353,6 +353,40 @@ TEST(RequestedServerNameMatcher, EmptyRequestedServerName) { conn); } +TEST(PathMatcher, NoPathInHeader) { + Envoy::Http::HeaderMapImpl headers; + envoy::type::matcher::PathMatcher matcher; + matcher.mutable_path()->mutable_safe_regex()->mutable_google_re2(); + matcher.mutable_path()->mutable_safe_regex()->set_regex(".*"); + + Envoy::Http::LowerCaseString path(":path"); + std::string value = "/path"; + headers.setReference(path, value); + checkMatcher(PathMatcher(matcher), true, Envoy::Network::MockConnection(), headers); + headers.removePath(); + checkMatcher(PathMatcher(matcher), false, Envoy::Network::MockConnection(), headers); +} + +TEST(PathMatcher, ValidPathInHeader) { + Envoy::Http::HeaderMapImpl headers; + envoy::type::matcher::PathMatcher matcher; + matcher.mutable_path()->set_exact("/exact"); + + Envoy::Http::LowerCaseString path(":path"); + std::string value = "/exact"; + headers.setReference(path, value); + checkMatcher(PathMatcher(matcher), true, Envoy::Network::MockConnection(), headers); + value = "/exact?param=val"; + headers.setReference(path, value); + checkMatcher(PathMatcher(matcher), true, Envoy::Network::MockConnection(), headers); + value = "/exact#fragment"; + headers.setReference(path, value); + checkMatcher(PathMatcher(matcher), true, Envoy::Network::MockConnection(), headers); + value = "/exacz"; + headers.setReference(path, value); + checkMatcher(PathMatcher(matcher), false, Envoy::Network::MockConnection(), headers); +} + } // namespace } // namespace RBAC } // namespace Common diff --git a/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc b/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc index 2bdbe6f83af43..1d44ceecf98e0 100644 --- a/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc +++ b/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc @@ -35,6 +35,34 @@ name: envoy.filters.http.rbac - any: true )EOF"; +const std::string RBAC_CONFIG_WITH_PATH_EXACT_MATCH = R"EOF( +name: envoy.filters.http.rbac +typed_config: + "@type": type.googleapis.com/envoy.config.filter.http.rbac.v2.RBAC + rules: + policies: + foo: + permissions: + - url_path: + path: { exact: "/allow" } + principals: + - any: true +)EOF"; + +const std::string RBAC_CONFIG_WITH_PATH_IGNORE_CASE_MATCH = R"EOF( +name: envoy.filters.http.rbac +typed_config: + "@type": type.googleapis.com/envoy.config.filter.http.rbac.v2.RBAC + rules: + policies: + foo: + permissions: + - url_path: + path: { exact: "/ignore_case", ignore_case: true } + principals: + - any: true +)EOF"; + using RBACIntegrationTest = HttpProtocolIntegrationTest; INSTANTIATE_TEST_SUITE_P(Protocols, RBACIntegrationTest, @@ -195,5 +223,59 @@ TEST_P(RBACIntegrationTest, RouteOverride) { EXPECT_EQ("200", response->headers().Status()->value().getStringView()); } +TEST_P(RBACIntegrationTest, PathWithQueryAndFragment) { + config_helper_.addFilter(RBAC_CONFIG_WITH_PATH_EXACT_MATCH); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + const std::vector paths{"/allow", "/allow?p1=v1&p2=v2", "/allow?p1=v1#seg"}; + + for (const auto& path : paths) { + auto response = codec_client_->makeRequestWithBody( + Http::TestHeaderMapImpl{ + {":method", "POST"}, + {":path", path}, + {":scheme", "http"}, + {":authority", "host"}, + {"x-forwarded-for", "10.0.0.1"}, + }, + 1024); + waitForNextUpstreamRequest(); + upstream_request_->encodeHeaders(Http::TestHeaderMapImpl{{":status", "200"}}, true); + + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + } +} + +TEST_P(RBACIntegrationTest, PathIgnoreCase) { + config_helper_.addFilter(RBAC_CONFIG_WITH_PATH_IGNORE_CASE_MATCH); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + const std::vector paths{"/ignore_case", "/IGNORE_CASE", "/ignore_CASE"}; + + for (const auto& path : paths) { + auto response = codec_client_->makeRequestWithBody( + Http::TestHeaderMapImpl{ + {":method", "POST"}, + {":path", path}, + {":scheme", "http"}, + {":authority", "host"}, + {"x-forwarded-for", "10.0.0.1"}, + }, + 1024); + waitForNextUpstreamRequest(); + upstream_request_->encodeHeaders(Http::TestHeaderMapImpl{{":status", "200"}}, true); + + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + } +} + } // namespace } // namespace Envoy diff --git a/test/extensions/filters/http/rbac/rbac_filter_test.cc b/test/extensions/filters/http/rbac/rbac_filter_test.cc index ecc0b1dcafacf..52cd7ac3ad789 100644 --- a/test/extensions/filters/http/rbac/rbac_filter_test.cc +++ b/test/extensions/filters/http/rbac/rbac_filter_test.cc @@ -30,6 +30,7 @@ class RoleBasedAccessControlFilterTest : public testing::Test { auto policy_rules = policy.add_permissions()->mutable_or_rules(); policy_rules->add_rules()->mutable_requested_server_name()->set_regex(".*cncf.io"); policy_rules->add_rules()->set_destination_port(123); + policy_rules->add_rules()->mutable_url_path()->mutable_path()->set_suffix("suffix"); policy.add_principals()->set_any(true); config.mutable_rules()->set_action(envoy::config::rbac::v2::RBAC::ALLOW); (*config.mutable_rules()->mutable_policies())["foo"] = policy; @@ -113,6 +114,18 @@ TEST_F(RoleBasedAccessControlFilterTest, RequestedServerName) { EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(headers_)); } +TEST_F(RoleBasedAccessControlFilterTest, Path) { + setDestinationPort(999); + + auto headers = Http::TestHeaderMapImpl{ + {":method", "GET"}, + {":path", "/suffix#seg?param=value"}, + {":scheme", "http"}, + {":authority", "host"}, + }; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(headers, false)); +} + TEST_F(RoleBasedAccessControlFilterTest, Denied) { setDestinationPort(456); setMetadata(); diff --git a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc index 9184220073016..529cc6f6a2b40 100644 --- a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc +++ b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc @@ -76,6 +76,7 @@ class ProxyProtocolTest : public testing::TestWithParam connection_callbacks_; Network::Connection* server_connection_; Network::MockConnectionCallbacks server_callbacks_; + BasicResourceLimitImpl open_connections_; std::shared_ptr read_filter_; std::string name_; const Network::FilterChainSharedPtr filter_chain_; @@ -920,9 +922,8 @@ class WildcardProxyProtocolTest : public testing::TestWithParamconnected()); } @@ -252,7 +252,7 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, AltsIntegrationTestClientWrongHandshaker, // and connection closes. TEST_P(AltsIntegrationTestClientWrongHandshaker, ConnectToWrongHandshakerAddress) { initialize(); - codec_client_ = makeRawHttpConnection(makeAltsConnection()); + codec_client_ = makeRawHttpConnection(makeAltsConnection(), absl::nullopt); EXPECT_FALSE(codec_client_->connected()); } diff --git a/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc b/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc index c3d0960dc06c9..a353b1aa508f7 100644 --- a/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc +++ b/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc @@ -276,7 +276,8 @@ TEST_P(SslCertficateIntegrationTest, ServerEcdsaClientRsaOnly) { server_rsa_cert_ = false; server_ecdsa_cert_ = true; initialize(); - auto codec_client = makeRawHttpConnection(makeSslClientConnection(rsaOnlyClientOptions())); + auto codec_client = + makeRawHttpConnection(makeSslClientConnection(rsaOnlyClientOptions()), absl::nullopt); EXPECT_FALSE(codec_client->connected()); const std::string counter_name = listenerStatPrefix("ssl.connection_error"); Stats::CounterSharedPtr counter = test_server_->counter(counter_name); @@ -303,7 +304,8 @@ TEST_P(SslCertficateIntegrationTest, ServerRsaClientEcdsaOnly) { client_ecdsa_cert_ = true; initialize(); EXPECT_FALSE( - makeRawHttpConnection(makeSslClientConnection(ecdsaOnlyClientOptions()))->connected()); + makeRawHttpConnection(makeSslClientConnection(ecdsaOnlyClientOptions()), absl::nullopt) + ->connected()); const std::string counter_name = listenerStatPrefix("ssl.connection_error"); Stats::CounterSharedPtr counter = test_server_->counter(counter_name); test_server_->waitForCounterGe(counter_name, 1); diff --git a/test/integration/BUILD b/test/integration/BUILD index a2788937b5c35..6e03419e5a1f4 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -10,10 +10,6 @@ load( "envoy_select_hot_restart", "envoy_sh_test", ) -load( - "//source/extensions:all_extensions.bzl", - "envoy_all_extensions", -) envoy_package() @@ -449,6 +445,7 @@ envoy_cc_test_library( "//source/common/buffer:buffer_lib", "//source/common/buffer:zero_copy_input_stream_lib", "//source/common/common:assert_lib", + "//source/common/common:basic_resource_lib", "//source/common/common:minimal_logger_lib", "//source/common/event:dispatcher_lib", "//source/common/grpc:codec_lib", @@ -549,8 +546,9 @@ envoy_cc_test( srcs = [ "echo_integration_test.cc", ], - # This test must be run in exclusive mode: see comments in AddRemoveListener - tags = ["exclusive"], + # Uncomment this line to run this test repeatedly in exclusive mode if not using docker-sandbox, + # or RBE, see comments in AddRemoveListener. + # tags = ["exclusive"], deps = [ ":integration_lib", "//source/extensions/filters/network/echo:config", @@ -913,3 +911,17 @@ envoy_cc_test( "//test/test_common:utility_lib", ], ) + +envoy_cc_test( + name = "cx_limit_integration_test", + srcs = ["cx_limit_integration_test.cc"], + deps = [ + ":http_integration_lib", + "//include/envoy/network:filter_interface", + "//include/envoy/registry", + "//source/extensions/filters/network/tcp_proxy:config", + "//test/config:utility_lib", + "//test/test_common:logging_lib", + "//test/test_common:simulated_time_system_lib", + ], +) diff --git a/test/integration/autonomous_upstream.cc b/test/integration/autonomous_upstream.cc index bd00fd176b1ef..d4c8c94da3aab 100644 --- a/test/integration/autonomous_upstream.cc +++ b/test/integration/autonomous_upstream.cc @@ -63,7 +63,8 @@ AutonomousHttpConnection::AutonomousHttpConnection(SharedConnectionWrapper& shar Stats::Store& store, Type type, AutonomousUpstream& upstream) : FakeHttpConnection(shared_connection, store, type, upstream.timeSystem(), - Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT), + Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, + envoy::api::v2::core::HttpProtocolOptions::ALLOW), upstream_(upstream) {} Http::StreamDecoder& AutonomousHttpConnection::newStream(Http::StreamEncoder& response_encoder, diff --git a/test/integration/cx_limit_integration_test.cc b/test/integration/cx_limit_integration_test.cc new file mode 100644 index 0000000000000..ce96cb47e1b40 --- /dev/null +++ b/test/integration/cx_limit_integration_test.cc @@ -0,0 +1,147 @@ +#include "envoy/network/filter.h" +#include "envoy/registry/registry.h" + +#include "common/network/utility.h" + +#include "test/config/utility.h" +#include "test/integration/integration.h" +#include "test/test_common/logging.h" +#include "test/test_common/simulated_time_system.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace { + +class ConnectionLimitIntegrationTest : public testing::TestWithParam, + public Event::TestUsingSimulatedTime, + public BaseIntegrationTest { +public: + ConnectionLimitIntegrationTest() + : BaseIntegrationTest(GetParam(), ConfigHelper::TCP_PROXY_CONFIG) {} + + void setEmptyListenerLimit() { + config_helper_.addRuntimeOverride("envoy.resource_limits.listener.listener_0.connection_limit", + ""); + } + + void setListenerLimit(const uint32_t num_conns) { + config_helper_.addRuntimeOverride("envoy.resource_limits.listener.listener_0.connection_limit", + std::to_string(num_conns)); + } + + void setGlobalLimit(std::string&& num_conns) { + config_helper_.addRuntimeOverride("overload.global_downstream_max_connections", num_conns); + } + + void initialize() override { BaseIntegrationTest::initialize(); } + + // Assumes a limit of 2 connections. + void doTest(std::function init_func, std::string&& check_stat) { + init_func(); + + std::vector tcp_clients; + std::vector raw_conns; + + tcp_clients.emplace_back(makeTcpConnection(lookupPort("listener_0"))); + raw_conns.emplace_back(); + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(raw_conns.back())); + ASSERT_TRUE(tcp_clients.back()->connected()); + + tcp_clients.emplace_back(makeTcpConnection(lookupPort("listener_0"))); + raw_conns.emplace_back(); + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(raw_conns.back())); + ASSERT_TRUE(tcp_clients.back()->connected()); + + tcp_clients.emplace_back(makeTcpConnection(lookupPort("listener_0"))); + raw_conns.emplace_back(); + ASSERT_FALSE(fake_upstreams_[0]->waitForRawConnection(raw_conns.back())); + tcp_clients.back()->waitForDisconnect(); + + // Get rid of the client that failed to connect. + tcp_clients.back()->close(); + tcp_clients.pop_back(); + + // Close the first connection that was successful so that we can open a new successful + // connection. + tcp_clients.front()->close(); + ASSERT_TRUE(raw_conns.front()->close()); + ASSERT_TRUE(raw_conns.front()->waitForDisconnect()); + + tcp_clients.emplace_back(makeTcpConnection(lookupPort("listener_0"))); + raw_conns.emplace_back(); + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(raw_conns.back())); + ASSERT_TRUE(tcp_clients.back()->connected()); + + const bool isV4 = (version_ == Network::Address::IpVersion::v4); + auto local_address = isV4 ? Network::Utility::getCanonicalIpv4LoopbackAddress() + : Network::Utility::getIpv6LoopbackAddress(); + + const std::string counter_prefix = (isV4 ? "listener.127.0.0.1_0." : "listener.[__1]_0."); + + test_server_->waitForCounterEq(counter_prefix + check_stat, 1); + + for (auto& tcp_client : tcp_clients) { + tcp_client->close(); + } + + tcp_clients.clear(); + raw_conns.clear(); + } +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, ConnectionLimitIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +TEST_P(ConnectionLimitIntegrationTest, TestListenerLimit) { + std::function init_func = [this]() { + setListenerLimit(2); + initialize(); + }; + + doTest(init_func, "downstream_cx_overflow"); +} + +TEST_P(ConnectionLimitIntegrationTest, TestEmptyGlobalCxRuntimeLimit) { + const std::string log_line = "no configured limit to the number of allowed active connections."; + EXPECT_LOG_CONTAINS("warn", log_line, { initialize(); }); +} + +TEST_P(ConnectionLimitIntegrationTest, TestEmptyListenerRuntimeLimit) { + const std::string log_line = + "Listener connection limit runtime key " + "envoy.resource_limits.listener.listener_0.connection_limit is empty. There are currently " + "no limitations on the number of accepted connections for listener listener_0."; + EXPECT_LOG_CONTAINS("warn", log_line, { + setEmptyListenerLimit(); + initialize(); + }); +} + +TEST_P(ConnectionLimitIntegrationTest, TestGlobalLimit) { + std::function init_func = [this]() { + // Includes twice the number of connections expected because the tracking is performed via a + // static variable and the fake upstream has a listener. This causes upstream connections to the + // fake upstream to also be tracked as part of the global downstream connection tracking. + setGlobalLimit("4"); + initialize(); + }; + + doTest(init_func, "downstream_global_cx_overflow"); +} + +TEST_P(ConnectionLimitIntegrationTest, TestBothLimits) { + std::function init_func = [this]() { + // Setting the listener limit to a much higher value and making sure the right stat gets + // incremented when both limits are set. + setGlobalLimit("4"); + setListenerLimit(100); + initialize(); + }; + + doTest(init_func, "downstream_global_cx_overflow"); +} + +} // namespace +} // namespace Envoy diff --git a/test/integration/fake_upstream.cc b/test/integration/fake_upstream.cc index f3884981a1f2a..093f38402021e 100644 --- a/test/integration/fake_upstream.cc +++ b/test/integration/fake_upstream.cc @@ -214,23 +214,24 @@ void FakeStream::finishGrpcStream(Grpc::Status::GrpcStatus status) { Http::TestHeaderMapImpl{{"grpc-status", std::to_string(static_cast(status))}}); } -FakeHttpConnection::FakeHttpConnection(SharedConnectionWrapper& shared_connection, - Stats::Store& store, Type type, - Event::TestTimeSystem& time_system, - uint32_t max_request_headers_kb, - uint32_t max_request_headers_count) +FakeHttpConnection::FakeHttpConnection( + SharedConnectionWrapper& shared_connection, Stats::Store& store, Type type, + Event::TestTimeSystem& time_system, uint32_t max_request_headers_kb, + uint32_t max_request_headers_count, + envoy::api::v2::core::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action) : FakeConnectionBase(shared_connection, time_system) { if (type == Type::HTTP1) { codec_ = std::make_unique( shared_connection_.connection(), store, *this, Http::Http1Settings(), - max_request_headers_kb, max_request_headers_count); + max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); } else { auto settings = Http::Http2Settings(); settings.allow_connect_ = true; settings.allow_metadata_ = true; codec_ = std::make_unique( shared_connection_.connection(), *this, store, settings, max_request_headers_kb, - max_request_headers_count); + max_request_headers_count, headers_with_underscores_action); ASSERT(type == Type::HTTP2); } @@ -449,11 +450,11 @@ void FakeUpstream::threadRoutine() { } } -AssertionResult FakeUpstream::waitForHttpConnection(Event::Dispatcher& client_dispatcher, - FakeHttpConnectionPtr& connection, - milliseconds timeout, - uint32_t max_request_headers_kb, - uint32_t max_request_headers_count) { +AssertionResult FakeUpstream::waitForHttpConnection( + Event::Dispatcher& client_dispatcher, FakeHttpConnectionPtr& connection, milliseconds timeout, + uint32_t max_request_headers_kb, uint32_t max_request_headers_count, + envoy::api::v2::core::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action) { Event::TestTimeSystem& time_system = timeSystem(); auto end_time = time_system.monotonicTime() + timeout; { @@ -472,9 +473,9 @@ AssertionResult FakeUpstream::waitForHttpConnection(Event::Dispatcher& client_di if (new_connections_.empty()) { return AssertionFailure() << "Got a new connection event, but didn't create a connection."; } - connection = std::make_unique(consumeConnection(), stats_store_, http_type_, - time_system, max_request_headers_kb, - max_request_headers_count); + connection = std::make_unique( + consumeConnection(), stats_store_, http_type_, time_system, max_request_headers_kb, + max_request_headers_count, headers_with_underscores_action); } VERIFY_ASSERTION(connection->initialize()); VERIFY_ASSERTION(connection->readDisable(false)); @@ -505,7 +506,7 @@ FakeUpstream::waitForHttpConnection(Event::Dispatcher& client_dispatcher, connection = std::make_unique( upstream.consumeConnection(), upstream.stats_store_, upstream.http_type_, upstream.timeSystem(), Http::DEFAULT_MAX_REQUEST_HEADERS_KB, - Http::DEFAULT_MAX_HEADERS_COUNT); + Http::DEFAULT_MAX_HEADERS_COUNT, envoy::api::v2::core::HttpProtocolOptions::ALLOW); lock.release(); VERIFY_ASSERTION(connection->initialize()); VERIFY_ASSERTION(connection->readDisable(false)); diff --git a/test/integration/fake_upstream.h b/test/integration/fake_upstream.h index 49106fc3e8024..aac2111e22a54 100644 --- a/test/integration/fake_upstream.h +++ b/test/integration/fake_upstream.h @@ -18,6 +18,7 @@ #include "common/buffer/buffer_impl.h" #include "common/buffer/zero_copy_input_stream_impl.h" +#include "common/common/basic_resource_impl.h" #include "common/common/callback_impl.h" #include "common/common/linked_object.h" #include "common/common/lock_guard.h" @@ -414,7 +415,9 @@ class FakeHttpConnection : public Http::ServerConnectionCallbacks, public FakeCo FakeHttpConnection(SharedConnectionWrapper& shared_connection, Stats::Store& store, Type type, Event::TestTimeSystem& time_system, uint32_t max_request_headers_kb, - uint32_t max_request_headers_count); + uint32_t max_request_headers_count, + envoy::api::v2::core::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action); // By default waitForNewStream assumes the next event is a new stream and // returns AssertionFailure if an unexpected event occurs. If a caller truly @@ -555,11 +558,13 @@ class FakeUpstream : Logger::Loggable, // Returns the new connection via the connection argument. ABSL_MUST_USE_RESULT - testing::AssertionResult - waitForHttpConnection(Event::Dispatcher& client_dispatcher, FakeHttpConnectionPtr& connection, - std::chrono::milliseconds timeout = TestUtility::DefaultTimeout, - uint32_t max_request_headers_kb = Http::DEFAULT_MAX_REQUEST_HEADERS_KB, - uint32_t max_request_headers_count = Http::DEFAULT_MAX_HEADERS_COUNT); + testing::AssertionResult waitForHttpConnection( + Event::Dispatcher& client_dispatcher, FakeHttpConnectionPtr& connection, + std::chrono::milliseconds timeout = TestUtility::DefaultTimeout, + uint32_t max_request_headers_kb = Http::DEFAULT_MAX_REQUEST_HEADERS_KB, + uint32_t max_request_headers_count = Http::DEFAULT_MAX_HEADERS_COUNT, + envoy::api::v2::core::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action = envoy::api::v2::core::HttpProtocolOptions::ALLOW); ABSL_MUST_USE_RESULT testing::AssertionResult @@ -629,10 +634,17 @@ class FakeUpstream : Logger::Loggable, envoy::api::v2::core::TrafficDirection direction() const override { return envoy::api::v2::core::TrafficDirection::UNSPECIFIED; } + ResourceLimit& openConnections() override { return connection_resource_; } + + void setMaxConnections(const uint32_t num_connections) { + connection_resource_.setMax(num_connections); + } + void clearMaxConnections() { connection_resource_.resetMax(); } FakeUpstream& parent_; const std::string name_; Network::NopConnectionBalancerImpl connection_balancer_; + BasicResourceLimitImpl connection_resource_; }; void threadRoutine(); diff --git a/test/integration/http2_integration_test.cc b/test/integration/http2_integration_test.cc index 80b03bd88ad2e..830f043374e07 100644 --- a/test/integration/http2_integration_test.cc +++ b/test/integration/http2_integration_test.cc @@ -66,6 +66,28 @@ TEST_P(Http2IntegrationTest, RetryAttemptCount) { testRetryAttemptCountHeader(); TEST_P(Http2IntegrationTest, LargeRequestTrailersRejected) { testLargeRequestTrailers(66, 60); } +// Verify downstream codec stream flush timeout. +TEST_P(Http2IntegrationTest, CodecStreamIdleTimeout) { + config_helper_.setBufferLimits(1024, 1024); + config_helper_.addConfigModifier( + [&](envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager& hcm) + -> void { + hcm.mutable_stream_idle_timeout()->set_seconds(0); + constexpr uint64_t IdleTimeoutMs = 400; + hcm.mutable_stream_idle_timeout()->set_nanos(IdleTimeoutMs * 1000 * 1000); + }); + initialize(); + Http::Http2Settings http2_options; + http2_options.initial_stream_window_size_ = 65535; + codec_client_ = makeRawHttpConnection(makeClientConnection(lookupPort("http")), http2_options); + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + waitForNextUpstreamRequest(); + upstream_request_->encodeHeaders(default_response_headers_, false); + upstream_request_->encodeData(70000, true); + test_server_->waitForCounterEq("http2.tx_flush_timeout", 1); + response->waitForReset(); +} + static std::string response_metadata_filter = R"EOF( name: response-metadata-filter typed_config: diff --git a/test/integration/http2_upstream_integration_test.cc b/test/integration/http2_upstream_integration_test.cc index 8b32a232f97d7..15f00afddcc19 100644 --- a/test/integration/http2_upstream_integration_test.cc +++ b/test/integration/http2_upstream_integration_test.cc @@ -237,6 +237,9 @@ void Http2UpstreamIntegrationTest::manySimultaneousRequests(uint32_t request_byt EXPECT_EQ("503", responses[i]->headers().Status()->value().getStringView()); } } + + EXPECT_EQ(0, test_server_->gauge("http2.streams_active")->value()); + EXPECT_EQ(0, test_server_->gauge("http2.pending_send_bytes")->value()); } TEST_P(Http2UpstreamIntegrationTest, ManySimultaneousRequest) { diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index 310542194c8ab..681a363e9a732 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -189,11 +189,16 @@ IntegrationCodecClientPtr HttpIntegrationTest::makeHttpConnection(uint32_t port) } IntegrationCodecClientPtr -HttpIntegrationTest::makeRawHttpConnection(Network::ClientConnectionPtr&& conn) { +HttpIntegrationTest::makeRawHttpConnection(Network::ClientConnectionPtr&& conn, + absl::optional http2_options) { std::shared_ptr cluster{new NiceMock()}; cluster->max_response_headers_count_ = 200; - cluster->http2_settings_.allow_connect_ = true; - cluster->http2_settings_.allow_metadata_ = true; + if (http2_options.has_value()) { + cluster->http2_settings_ = http2_options.value(); + } else { + cluster->http2_settings_.allow_connect_ = true; + cluster->http2_settings_.allow_metadata_ = true; + } Upstream::HostDescriptionConstSharedPtr host_description{Upstream::makeTestHostDescription( cluster, fmt::format("tcp://{}:80", Network::Test::getLoopbackAddressUrlString(version_)))}; return std::make_unique(*dispatcher_, std::move(conn), host_description, @@ -202,7 +207,7 @@ HttpIntegrationTest::makeRawHttpConnection(Network::ClientConnectionPtr&& conn) IntegrationCodecClientPtr HttpIntegrationTest::makeHttpConnection(Network::ClientConnectionPtr&& conn) { - auto codec = makeRawHttpConnection(std::move(conn)); + auto codec = makeRawHttpConnection(std::move(conn), absl::nullopt); EXPECT_TRUE(codec->connected()); return codec; } @@ -884,6 +889,44 @@ void HttpIntegrationTest::testTwoRequests(bool network_backup) { EXPECT_EQ(1024U, response->body().size()); } +void HttpIntegrationTest::testLargeRequestUrl(uint32_t url_size, uint32_t max_headers_size) { + // `size` parameter dictates the size of each header that will be added to the request and `count` + // parameter is the number of headers to be added. The actual request byte size will exceed `size` + // due to the keys and other headers. The actual request header count will exceed `count` by four + // due to default headers. + + config_helper_.addConfigModifier( + [&](envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager& hcm) + -> void { hcm.mutable_max_request_headers_kb()->set_value(max_headers_size); }); + max_request_headers_kb_ = max_headers_size; + + Http::TestHeaderMapImpl big_headers{{":method", "GET"}, + {":path", "/" + std::string(url_size * 1024, 'a')}, + {":scheme", "http"}, + {":authority", "host"}}; + + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + if (url_size >= max_headers_size) { + // header size includes keys too, so expect rejection when equal + auto encoder_decoder = codec_client_->startRequest(big_headers); + auto response = std::move(encoder_decoder.second); + + if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { + codec_client_->waitForDisconnect(); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("431", response->headers().Status()->value().getStringView()); + } else { + response->waitForReset(); + codec_client_->close(); + } + } else { + auto response = sendRequestAndWaitForResponse(big_headers, 0, default_response_headers_, 0); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + } +} + void HttpIntegrationTest::testLargeRequestHeaders(uint32_t size, uint32_t count, uint32_t max_size, uint32_t max_count) { // `size` parameter dictates the size of each header that will be added to the request and `count` diff --git a/test/integration/http_integration.h b/test/integration/http_integration.h index 8bbae101304b0..9839fd55ae9b3 100644 --- a/test/integration/http_integration.h +++ b/test/integration/http_integration.h @@ -108,7 +108,9 @@ class HttpIntegrationTest : public BaseIntegrationTest { IntegrationCodecClientPtr makeHttpConnection(uint32_t port); // Makes a http connection object without checking its connected state. - IntegrationCodecClientPtr makeRawHttpConnection(Network::ClientConnectionPtr&& conn); + virtual IntegrationCodecClientPtr + makeRawHttpConnection(Network::ClientConnectionPtr&& conn, + absl::optional http2_options); // Makes a http connection object with asserting a connected state. IntegrationCodecClientPtr makeHttpConnection(Network::ClientConnectionPtr&& conn); @@ -190,6 +192,7 @@ class HttpIntegrationTest : public BaseIntegrationTest { void testTwoRequests(bool force_network_backup = false); void testLargeHeaders(Http::TestHeaderMapImpl request_headers, Http::TestHeaderMapImpl request_trailers, uint32_t size, uint32_t max_size); + void testLargeRequestUrl(uint32_t url_size, uint32_t max_headers_size); void testLargeRequestHeaders(uint32_t size, uint32_t count, uint32_t max_size = 60, uint32_t max_count = 100); void testLargeRequestTrailers(uint32_t size, uint32_t max_size = 60); diff --git a/test/integration/integration_admin_test.cc b/test/integration/integration_admin_test.cc index bb590a860d142..548181de30e2f 100644 --- a/test/integration/integration_admin_test.cc +++ b/test/integration/integration_admin_test.cc @@ -260,14 +260,17 @@ TEST_P(IntegrationAdminTest, Admin) { switch (GetParam().downstream_protocol) { case Http::CodecClient::Type::HTTP1: EXPECT_EQ(" Count Lookup\n" + " 1 http1.dropped_headers_with_underscores\n" " 1 http1.metadata_not_supported_error\n" + " 1 http1.requests_rejected_with_underscores_in_headers\n" " 1 http1.response_flood\n" "\n" - "total: 2\n", + "total: 4\n", response->body()); break; case Http::CodecClient::Type::HTTP2: EXPECT_EQ(" Count Lookup\n" + " 1 http2.dropped_headers_with_underscores\n" " 1 http2.header_overflow\n" " 1 http2.headers_cb_no_stream\n" " 1 http2.inbound_empty_frames_flood\n" @@ -275,13 +278,17 @@ TEST_P(IntegrationAdminTest, Admin) { " 1 http2.inbound_window_update_frames_flood\n" " 1 http2.outbound_control_flood\n" " 1 http2.outbound_flood\n" + " 1 http2.pending_send_bytes\n" + " 1 http2.requests_rejected_with_underscores_in_headers\n" " 1 http2.rx_messaging_error\n" " 1 http2.rx_reset\n" + " 1 http2.streams_active\n" " 1 http2.too_many_header_frames\n" " 1 http2.trailers\n" + " 1 http2.tx_flush_timeout\n" " 1 http2.tx_reset\n" "\n" - "total: 12\n", + "total: 17\n", response->body()); break; case Http::CodecClient::Type::HTTP3: diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index c306bdbb4827b..d94fe99d50576 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -34,6 +34,7 @@ #include "gtest/gtest.h" using testing::HasSubstr; +using testing::Not; namespace Envoy { @@ -593,6 +594,85 @@ TEST_P(ProtocolIntegrationTest, TwoRequests) { testTwoRequests(); } TEST_P(ProtocolIntegrationTest, TwoRequestsWithForcedBackup) { testTwoRequests(true); } +// Verify that headers with underscores in their names are dropped from client requests +// but remain in upstream responses. +TEST_P(ProtocolIntegrationTest, HeadersWithUnderscoresDropped) { + config_helper_.addConfigModifier( + [&](envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager& hcm) + -> void { + hcm.mutable_common_http_protocol_options()->set_headers_with_underscores_action( + envoy::api::v2::core::HttpProtocolOptions::DROP_HEADER); + }); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = + codec_client_->makeHeaderOnlyRequest(Http::TestHeaderMapImpl{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"foo_bar", "baz"}}); + waitForNextUpstreamRequest(); + + EXPECT_THAT(upstream_request_->headers(), Not(HeaderHasValueRef("foo_bar", "baz"))); + upstream_request_->encodeHeaders(Http::TestHeaderMapImpl{{":status", "200"}, {"bar_baz", "fooz"}}, + true); + response->waitForEndStream(); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_THAT(response->headers(), HeaderHasValueRef("bar_baz", "fooz")); +} + +// Verify that by default headers with underscores in their names remain in both requests and +// responses when allowed in configuration. +TEST_P(ProtocolIntegrationTest, HeadersWithUnderscoresRemainByDefault) { + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = + codec_client_->makeHeaderOnlyRequest(Http::TestHeaderMapImpl{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"foo_bar", "baz"}}); + waitForNextUpstreamRequest(); + + EXPECT_THAT(upstream_request_->headers(), HeaderHasValueRef("foo_bar", "baz")); + upstream_request_->encodeHeaders(Http::TestHeaderMapImpl{{":status", "200"}, {"bar_baz", "fooz"}}, + true); + response->waitForEndStream(); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_THAT(response->headers(), HeaderHasValueRef("bar_baz", "fooz")); +} + +// Verify that request with headers containing underscores is rejected when configured. +TEST_P(ProtocolIntegrationTest, HeadersWithUnderscoresCauseRequestRejectedByDefault) { + config_helper_.addConfigModifier( + [&](envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager& hcm) + -> void { + hcm.mutable_common_http_protocol_options()->set_headers_with_underscores_action( + envoy::api::v2::core::HttpProtocolOptions::REJECT_REQUEST); + }); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = + codec_client_->makeHeaderOnlyRequest(Http::TestHeaderMapImpl{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"foo_bar", "baz"}}); + + if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { + codec_client_->waitForDisconnect(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("400", response->headers().Status()->value().getStringView()); + } else { + response->waitForReset(); + codec_client_->close(); + ASSERT_TRUE(response->reset()); + EXPECT_EQ(Http::StreamResetReason::RemoteReset, response->reset_reason()); + } +} + TEST_P(DownstreamProtocolIntegrationTest, ValidZeroLengthContent) { initialize(); @@ -926,6 +1006,16 @@ name: decode-headers-only EXPECT_EQ(0, upstream_request_->body().length()); } +TEST_P(DownstreamProtocolIntegrationTest, LargeRequestUrlRejected) { + // Send one 95 kB URL with limit 60 kB headers. + testLargeRequestUrl(95, 60); +} + +TEST_P(DownstreamProtocolIntegrationTest, LargeRequestUrlAccepted) { + // Send one 95 kB URL with limit 96 kB headers. + testLargeRequestUrl(95, 96); +} + TEST_P(DownstreamProtocolIntegrationTest, LargeRequestHeadersRejected) { // Send one 95 kB header with limit 60 kB and 100 headers. testLargeRequestHeaders(95, 1, 60, 100); diff --git a/test/integration/sds_dynamic_integration_test.cc b/test/integration/sds_dynamic_integration_test.cc index 38d8391444e91..e86a28a79e616 100644 --- a/test/integration/sds_dynamic_integration_test.cc +++ b/test/integration/sds_dynamic_integration_test.cc @@ -222,7 +222,7 @@ TEST_P(SdsDynamicDownstreamIntegrationTest, WrongSecretFirst) { }; initialize(); - codec_client_ = makeRawHttpConnection(makeSslClientConnection()); + codec_client_ = makeRawHttpConnection(makeSslClientConnection(), absl::nullopt); // the connection state is not connected. EXPECT_FALSE(codec_client_->connected()); codec_client_->connection()->close(Network::ConnectionCloseType::NoFlush); diff --git a/test/integration/stats_integration_test.cc b/test/integration/stats_integration_test.cc index aeb41d47ab82a..b211eb0c78d28 100644 --- a/test/integration/stats_integration_test.cc +++ b/test/integration/stats_integration_test.cc @@ -257,6 +257,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // 2019/09/30 8354 43310 44000 Implement transport socket match. // 2019/10/17 8537 43308 44000 add new enum value HTTP3 // 2019/10/17 8484 43340 44000 stats: add unit support to histogram + // 2020/06/09 11182 43404 44000 v1.12.x cherry-pick: Refactor resource manager // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -270,7 +271,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // If you encounter a failure here, please see // https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#stats-memory-tests // for details on how to fix. - EXPECT_MEMORY_EQ(m_per_cluster, 43340); // 104 bytes higher than a debug build. + EXPECT_MEMORY_EQ(m_per_cluster, 43404); // 104 bytes higher than a debug build. EXPECT_MEMORY_LE(m_per_cluster, 44000); } @@ -301,6 +302,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // 2019/09/30 8354 34969 35000 Implement transport socket match. // 2019/10/17 8537 34966 35000 add new enum value HTTP3 // 2019/10/17 8484 34998 35000 stats: add unit support to histogram + // 2020/06/09 11182 35062 36000 v1.12.x cherry-pick: Refactor resource manager // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -314,7 +316,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // If you encounter a failure here, please see // https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#stats-memory-tests // for details on how to fix. - EXPECT_MEMORY_EQ(m_per_cluster, 34998); // 104 bytes higher than a debug build. + EXPECT_MEMORY_EQ(m_per_cluster, 35062); // 104 bytes higher than a debug build. EXPECT_MEMORY_LE(m_per_cluster, 36000); } diff --git a/test/mocks/http/stream.h b/test/mocks/http/stream.h index 54b81c4fd912a..16531b3c79778 100644 --- a/test/mocks/http/stream.h +++ b/test/mocks/http/stream.h @@ -19,6 +19,7 @@ class MockStream : public Stream { MOCK_METHOD1(readDisable, void(bool disable)); MOCK_METHOD2(setWriteBufferWatermarks, void(uint32_t, uint32_t)); MOCK_METHOD0(bufferLimit, uint32_t()); + MOCK_METHOD1(setFlushTimeout, void(std::chrono::milliseconds timeout)); std::list callbacks_{}; diff --git a/test/mocks/network/mocks.cc b/test/mocks/network/mocks.cc index 7e3799968be62..fec947cc549f9 100644 --- a/test/mocks/network/mocks.cc +++ b/test/mocks/network/mocks.cc @@ -98,7 +98,7 @@ MockDrainDecision::MockDrainDecision() = default; MockDrainDecision::~MockDrainDecision() = default; MockListenerFilter::MockListenerFilter() = default; -MockListenerFilter::~MockListenerFilter() = default; +MockListenerFilter::~MockListenerFilter() { destroy_(); } MockListenerFilterCallbacks::MockListenerFilterCallbacks() { ON_CALL(*this, socket()).WillByDefault(ReturnRef(socket_)); diff --git a/test/mocks/network/mocks.h b/test/mocks/network/mocks.h index ee7b392e40bee..2605fb0577845 100644 --- a/test/mocks/network/mocks.h +++ b/test/mocks/network/mocks.h @@ -128,7 +128,8 @@ class MockListenerCallbacks : public ListenerCallbacks { void onAccept(ConnectionSocketPtr&& socket) override { onAccept_(socket); } - MOCK_METHOD1(onAccept_, void(ConnectionSocketPtr& socket)); + MOCK_METHOD(void, onAccept_, (ConnectionSocketPtr & socket)); + MOCK_METHOD(void, onReject, ()); }; class MockUdpListenerCallbacks : public UdpListenerCallbacks { @@ -164,6 +165,7 @@ class MockListenerFilter : public ListenerFilter { MockListenerFilter(); ~MockListenerFilter() override; + MOCK_METHOD0(destroy_, void()); MOCK_METHOD1(onAccept, Network::FilterStatus(ListenerFilterCallbacks&)); }; @@ -313,6 +315,7 @@ class MockListenerConfig : public ListenerConfig { MOCK_CONST_METHOD0(name, const std::string&()); MOCK_METHOD0(udpListenerFactory, const Network::ActiveUdpListenerFactory*()); MOCK_METHOD0(connectionBalancer, ConnectionBalancer&()); + MOCK_METHOD0(openConnections, ResourceLimit&()); envoy::api::v2::core::TrafficDirection direction() const override { return envoy::api::v2::core::TrafficDirection::UNSPECIFIED; diff --git a/test/mocks/runtime/BUILD b/test/mocks/runtime/BUILD index 7aed549bc3871..f406cbc85b95d 100644 --- a/test/mocks/runtime/BUILD +++ b/test/mocks/runtime/BUILD @@ -16,6 +16,7 @@ envoy_cc_mock( deps = [ "//include/envoy/runtime:runtime_interface", "//include/envoy/upstream:cluster_manager_interface", + "//source/common/common:empty_string", "//test/mocks:common_lib", ], ) diff --git a/test/mocks/runtime/mocks.cc b/test/mocks/runtime/mocks.cc index 2556987922c1b..e34429e74d985 100644 --- a/test/mocks/runtime/mocks.cc +++ b/test/mocks/runtime/mocks.cc @@ -1,11 +1,14 @@ #include "mocks.h" +#include "common/common/empty_string.h" + #include "gmock/gmock.h" #include "gtest/gtest.h" using testing::_; using testing::Return; using testing::ReturnArg; +using testing::ReturnRef; namespace Envoy { namespace Runtime { @@ -18,6 +21,7 @@ MockSnapshot::MockSnapshot() { ON_CALL(*this, getInteger(_, _)).WillByDefault(ReturnArg<1>()); ON_CALL(*this, getDouble(_, _)).WillByDefault(ReturnArg<1>()); ON_CALL(*this, getBoolean(_, _)).WillByDefault(ReturnArg<1>()); + ON_CALL(*this, get(_)).WillByDefault(ReturnRef(EMPTY_STRING)); } MockSnapshot::~MockSnapshot() = default; diff --git a/test/server/connection_handler_test.cc b/test/server/connection_handler_test.cc index 6ff0301a26713..f7244ac04d1ba 100644 --- a/test/server/connection_handler_test.cc +++ b/test/server/connection_handler_test.cc @@ -86,6 +86,12 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable udp_listener_factory_; Network::ConnectionBalancerPtr connection_balancer_; + BasicResourceLimitImpl open_connections_; }; using TestListenerPtr = std::unique_ptr; @@ -176,6 +183,98 @@ TEST_F(ConnectionHandlerTest, RemoveListenerDuringRebalance) { #endif } +TEST_F(ConnectionHandlerTest, ListenerConnectionLimitEnforced) { + auto listener1 = new NiceMock(); + Network::ListenerCallbacks* listener_callbacks1; + TestListener* test_listener1 = addListener(1, false, false, "test_listener1"); + EXPECT_CALL(dispatcher_, createListener_(_, _, _)) + .WillOnce( + Invoke([&](Network::Socket&, Network::ListenerCallbacks& cb, bool) -> Network::Listener* { + listener_callbacks1 = &cb; + return listener1; + })); + Network::Address::InstanceConstSharedPtr normal_address( + new Network::Address::Ipv4Instance("127.0.0.1", 10001)); + EXPECT_CALL(test_listener1->socket_, localAddress()).WillRepeatedly(ReturnRef(normal_address)); + // Only allow a single connection on this listener. + test_listener1->setMaxConnections(1); + handler_->addListener(*test_listener1); + + auto listener2 = new NiceMock(); + Network::ListenerCallbacks* listener_callbacks2; + TestListener* test_listener2 = addListener(2, false, false, "test_listener2"); + EXPECT_CALL(dispatcher_, createListener_(_, _, _)) + .WillOnce( + Invoke([&](Network::Socket&, Network::ListenerCallbacks& cb, bool) -> Network::Listener* { + listener_callbacks2 = &cb; + return listener2; + })); + Network::Address::InstanceConstSharedPtr alt_address( + new Network::Address::Ipv4Instance("127.0.0.2", 20002)); + EXPECT_CALL(test_listener2->socket_, localAddress()).WillRepeatedly(ReturnRef(alt_address)); + // Do not allow any connections on this listener. + test_listener2->setMaxConnections(0); + handler_->addListener(*test_listener2); + + EXPECT_CALL(manager_, findFilterChain(_)).WillRepeatedly(Return(filter_chain_.get())); + EXPECT_CALL(factory_, createNetworkFilterChain(_, _)).WillRepeatedly(Return(true)); + Network::MockListenerFilter* test_filter = new Network::MockListenerFilter(); + EXPECT_CALL(*test_filter, destroy_()); + EXPECT_CALL(factory_, createListenerFilterChain(_)) + .WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool { + manager.addAcceptFilter(Network::ListenerFilterPtr{test_filter}); + return true; + })); + EXPECT_CALL(*test_filter, onAccept(_)) + .WillRepeatedly(Invoke([&](Network::ListenerFilterCallbacks&) -> Network::FilterStatus { + return Network::FilterStatus::Continue; + })); + + // For listener 2, verify its connection limit is independent of listener 1. + + // We expect that listener 2 accepts the connection, so there will be a call to + // createServerConnection and active cx should increase, while cx overflow remains the same. + listener_callbacks2->onAccept( + Network::ConnectionSocketPtr{new NiceMock()}); + EXPECT_EQ(0, handler_->numConnections()); + EXPECT_EQ(0, TestUtility::findCounter(stats_store_, "downstream_cx_total")->value()); + EXPECT_EQ(0, TestUtility::findGauge(stats_store_, "downstream_cx_active")->value()); + EXPECT_EQ(1, TestUtility::findCounter(stats_store_, "downstream_cx_overflow")->value()); + + // For listener 1, verify connections are limited after one goes active. + + // First connection attempt should result in an active connection being created. + auto conn1 = new NiceMock(); + EXPECT_CALL(dispatcher_, createServerConnection_()).WillOnce(Return(conn1)); + listener_callbacks1->onAccept( + Network::ConnectionSocketPtr{new NiceMock()}); + EXPECT_EQ(1, handler_->numConnections()); + // Note that these stats are not the per-worker stats, but the per-listener stats. + EXPECT_EQ(1, TestUtility::findCounter(stats_store_, "downstream_cx_total")->value()); + EXPECT_EQ(1, TestUtility::findGauge(stats_store_, "downstream_cx_active")->value()); + EXPECT_EQ(1, TestUtility::findCounter(stats_store_, "downstream_cx_overflow")->value()); + + // Don't expect server connection to be created, should be instantly closed and increment + // overflow stat. + listener_callbacks1->onAccept( + Network::ConnectionSocketPtr{new NiceMock()}); + EXPECT_EQ(1, handler_->numConnections()); + EXPECT_EQ(1, TestUtility::findCounter(stats_store_, "downstream_cx_total")->value()); + EXPECT_EQ(1, TestUtility::findGauge(stats_store_, "downstream_cx_active")->value()); + EXPECT_EQ(2, TestUtility::findCounter(stats_store_, "downstream_cx_overflow")->value()); + + // Check behavior again for good measure. + listener_callbacks1->onAccept( + Network::ConnectionSocketPtr{new NiceMock()}); + EXPECT_EQ(1, handler_->numConnections()); + EXPECT_EQ(1, TestUtility::findCounter(stats_store_, "downstream_cx_total")->value()); + EXPECT_EQ(1, TestUtility::findGauge(stats_store_, "downstream_cx_active")->value()); + EXPECT_EQ(3, TestUtility::findCounter(stats_store_, "downstream_cx_overflow")->value()); + + EXPECT_CALL(*listener1, onDestroy()); + EXPECT_CALL(*listener2, onDestroy()); +} + TEST_F(ConnectionHandlerTest, RemoveListener) { InSequence s; @@ -361,7 +460,8 @@ TEST_F(ConnectionHandlerTest, NormalRedirect) { EXPECT_CALL(test_listener2->socket_, localAddress()).WillRepeatedly(ReturnRef(alt_address)); handler_->addListener(*test_listener2); - Network::MockListenerFilter* test_filter = new Network::MockListenerFilter(); + auto* test_filter = new NiceMock(); + EXPECT_CALL(*test_filter, destroy_()); Network::MockConnectionSocket* accepted_socket = new NiceMock(); bool redirected = false; EXPECT_CALL(factory_, createListenerFilterChain(_)) @@ -432,6 +532,7 @@ TEST_F(ConnectionHandlerTest, FallbackToWildcardListener) { handler_->addListener(*test_listener2); Network::MockListenerFilter* test_filter = new Network::MockListenerFilter(); + EXPECT_CALL(*test_filter, destroy_()); Network::MockConnectionSocket* accepted_socket = new NiceMock(); bool redirected = false; EXPECT_CALL(factory_, createListenerFilterChain(_)) @@ -498,6 +599,7 @@ TEST_F(ConnectionHandlerTest, WildcardListenerWithOriginalDst) { cb.socket().restoreLocalAddress(original_dst_address); return Network::FilterStatus::Continue; })); + EXPECT_CALL(*test_filter, destroy_()); EXPECT_CALL(*accepted_socket, restoreLocalAddress(original_dst_address)); EXPECT_CALL(*accepted_socket, localAddressRestored()).WillOnce(Return(true)); EXPECT_CALL(*accepted_socket, localAddress()).WillRepeatedly(ReturnRef(original_dst_address)); @@ -529,6 +631,7 @@ TEST_F(ConnectionHandlerTest, WildcardListenerWithNoOriginalDst) { handler_->addListener(*test_listener1); Network::MockListenerFilter* test_filter = new Network::MockListenerFilter(); + EXPECT_CALL(*test_filter, destroy_()); Network::MockConnectionSocket* accepted_socket = new NiceMock(); EXPECT_CALL(factory_, createListenerFilterChain(_)) .WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool { @@ -586,6 +689,7 @@ TEST_F(ConnectionHandlerTest, TransportProtocolCustom) { handler_->addListener(*test_listener); Network::MockListenerFilter* test_filter = new Network::MockListenerFilter(); + EXPECT_CALL(*test_filter, destroy_()); EXPECT_CALL(factory_, createListenerFilterChain(_)) .WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool { manager.addAcceptFilter(Network::ListenerFilterPtr{test_filter}); @@ -644,6 +748,7 @@ TEST_F(ConnectionHandlerTest, ListenerFilterTimeout) { EXPECT_CALL(*timeout, disableTimer()); timeout->invokeCallback(); + EXPECT_CALL(*test_filter, destroy_()); dispatcher_.clearDeferredDeleteList(); EXPECT_EQ(0UL, downstream_pre_cx_active.value()); EXPECT_EQ(1UL, stats_store_.counter("downstream_pre_cx_timeout").value()); @@ -672,7 +777,7 @@ TEST_F(ConnectionHandlerTest, ContinueOnListenerFilterTimeout) { EXPECT_CALL(test_listener->socket_, localAddress()); handler_->addListener(*test_listener); - Network::MockListenerFilter* test_filter = new Network::MockListenerFilter(); + Network::MockListenerFilter* test_filter = new NiceMock(); EXPECT_CALL(factory_, createListenerFilterChain(_)) .WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool { manager.addAcceptFilter(Network::ListenerFilterPtr{test_filter}); @@ -691,7 +796,8 @@ TEST_F(ConnectionHandlerTest, ContinueOnListenerFilterTimeout) { Stats::Gauge& downstream_pre_cx_active = stats_store_.gauge("downstream_pre_cx_active", Stats::Gauge::ImportMode::Accumulate); EXPECT_EQ(1UL, downstream_pre_cx_active.value()); - + EXPECT_CALL(*test_filter, destroy_()); + // Barrier: test_filter must be destructed before findFilterChain EXPECT_CALL(manager_, findFilterChain(_)).WillOnce(Return(nullptr)); EXPECT_CALL(*timeout, disableTimer()); timeout->invokeCallback(); @@ -708,7 +814,6 @@ TEST_F(ConnectionHandlerTest, ContinueOnListenerFilterTimeout) { // Timeout is disabled once the listener filters complete. TEST_F(ConnectionHandlerTest, ListenerFilterTimeoutResetOnSuccess) { InSequence s; - TestListener* test_listener = addListener(1, true, false, "test_listener"); Network::MockListener* listener = new Network::MockListener(); Network::ListenerCallbacks* listener_callbacks; @@ -740,7 +845,7 @@ TEST_F(ConnectionHandlerTest, ListenerFilterTimeoutResetOnSuccess) { Event::MockTimer* timeout = new Event::MockTimer(&dispatcher_); EXPECT_CALL(*timeout, enableTimer(std::chrono::milliseconds(15000), _)); listener_callbacks->onAccept(Network::ConnectionSocketPtr{accepted_socket}); - + EXPECT_CALL(*test_filter, destroy_()); EXPECT_CALL(manager_, findFilterChain(_)).WillOnce(Return(nullptr)); EXPECT_CALL(*timeout, disableTimer()); listener_filter_cb->continueFilterChain(true); @@ -777,6 +882,7 @@ TEST_F(ConnectionHandlerTest, ListenerFilterDisabledTimeout) { return Network::FilterStatus::StopIteration; })); EXPECT_CALL(dispatcher_, createTimer_(_)).Times(0); + EXPECT_CALL(*test_filter, destroy_()); Network::MockConnectionSocket* accepted_socket = new NiceMock(); listener_callbacks->onAccept(Network::ConnectionSocketPtr{accepted_socket}); @@ -801,7 +907,6 @@ TEST_F(ConnectionHandlerTest, ListenerFilterReportError) { Network::MockListenerFilter* first_filter = new Network::MockListenerFilter(); Network::MockListenerFilter* last_filter = new Network::MockListenerFilter(); - EXPECT_CALL(factory_, createListenerFilterChain(_)) .WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool { manager.addAcceptFilter(Network::ListenerFilterPtr{first_filter}); @@ -816,6 +921,8 @@ TEST_F(ConnectionHandlerTest, ListenerFilterReportError) { })); // The last filter won't be invoked EXPECT_CALL(*last_filter, onAccept(_)).Times(0); + EXPECT_CALL(*first_filter, destroy_()); + EXPECT_CALL(*last_filter, destroy_()); Network::MockConnectionSocket* accepted_socket = new NiceMock(); listener_callbacks->onAccept(Network::ConnectionSocketPtr{accepted_socket}); diff --git a/test/test_common/logging.cc b/test/test_common/logging.cc index 636bb56c4badb..d2cb5fa69cfed 100644 --- a/test/test_common/logging.cc +++ b/test/test_common/logging.cc @@ -2,6 +2,8 @@ #include "common/common/assert.h" +#include "absl/synchronization/mutex.h" + namespace Envoy { LogLevelSetter::LogLevelSetter(spdlog::level::level_enum log_level) { @@ -27,6 +29,8 @@ LogRecordingSink::~LogRecordingSink() = default; void LogRecordingSink::log(absl::string_view msg) { previous_delegate()->log(msg); + + absl::MutexLock ml(&mtx_); messages_.push_back(std::string(msg)); } diff --git a/test/test_common/logging.h b/test/test_common/logging.h index 62fe9f6d7d538..49d5b8ed5a0e4 100644 --- a/test/test_common/logging.h +++ b/test/test_common/logging.h @@ -8,6 +8,7 @@ #include "absl/strings/str_join.h" #include "absl/strings/str_split.h" +#include "absl/synchronization/mutex.h" #include "spdlog/spdlog.h" namespace Envoy { @@ -58,7 +59,8 @@ class LogRecordingSink : public Logger::SinkDelegate { const std::vector& messages() const { return messages_; } private: - std::vector messages_; + absl::Mutex mtx_; + std::vector messages_ ABSL_GUARDED_BY(mtx_); }; using StringPair = std::pair;