diff --git a/.bazelrc b/.bazelrc index 12995e93c..6023b902e 100644 --- a/.bazelrc +++ b/.bazelrc @@ -1,20 +1,21 @@ -# The following .bazelrc content is forked from the main Envoy repository. This is necessary since -# this needs to be available before we can access the Envoy repository contents via Bazel. - -build:clang-asan --test_timeout=900 -build:clang-tsan --test_timeout=900 -# See https://github.com/envoyproxy/nighthawk/issues/405 -build:macos --copt -UDEBUG - +# The following .bazelrc content is forked from the main Envoy repository. # unique +# This is necessary since this needs to be available before we can access # unique +# the Envoy repository contents via Bazel. # unique + # unique +build:clang-asan --test_timeout=900 # unique +build:clang-tsan --test_timeout=900 # unique +# See https://github.com/envoyproxy/nighthawk/issues/405 # unique +build:macos --copt -UDEBUG # unique + # unique # Envoy specific Bazel build/test options. # Bazel doesn't need more than 200MB of memory for local build based on memory profiling: # https://docs.bazel.build/versions/master/skylark/performance.html#memory-profiling # The default JVM max heapsize is 1/4 of physical memory up to 32GB which could be large -# enough to consume all memory constrained by cgroup in large host, which is the case in CircleCI. +# enough to consume all memory constrained by cgroup in large host. # Limiting JVM heapsize here to let it do GC more when approaching the limit to # leave room for compiler/linker. -# The number 2G is choosed heuristically to both support in CircleCI and large enough for RBE. +# The number 2G is chosen heuristically to both support large VM and small VM with RBE. # Startup options cannot be selected via config. startup --host_jvm_args=-Xmx2g @@ -27,7 +28,8 @@ build --host_javabase=@bazel_tools//tools/jdk:remote_jdk11 build --javabase=@bazel_tools//tools/jdk:remote_jdk11 build --enable_platform_specific_config -# Enable position independent code, this option is not supported on Windows and default on on macOS. +# Enable position independent code (this is the default on macOS and Windows) +# (Workaround for https://github.com/bazelbuild/rules_foreign_cc/issues/421) build:linux --copt=-fPIC build:linux --cxxopt=-std=c++17 build:linux --conlyopt=-fexceptions @@ -43,9 +45,6 @@ build --action_env=CXX build --action_env=LLVM_CONFIG build --action_env=PATH -# Skip system ICU linking. -build --@com_googlesource_googleurl//build_config:system_icu=0 - # Common flags for sanitizers build:sanitizer --define tcmalloc=disabled build:sanitizer --linkopt -ldl @@ -62,8 +61,10 @@ build:asan --config=sanitizer # ASAN install its signal handler, disable ours so the stacktrace will be printed by ASAN build:asan --define signal_trace=disabled build:asan --define ENVOY_CONFIG_ASAN=1 -build:asan --copt -fsanitize=address,undefined -build:asan --linkopt -fsanitize=address,undefined +# The following two lines were manually edited due to #593. # unique +# Flag undefined was dropped from both the lines to allow CI/ASAN to pass. # unique +build:asan --copt -fsanitize=address # unique +build:asan --linkopt -fsanitize=address # unique # vptr and function sanitizer are enabled in clang-asan if it is set up via bazel/setup_clang.sh. build:asan --copt -fno-sanitize=vptr,function build:asan --linkopt -fno-sanitize=vptr,function @@ -72,6 +73,9 @@ build:asan --copt -D__SANITIZE_ADDRESS__ build:asan --test_env=ASAN_OPTIONS=handle_abort=1:allow_addr2line=true:check_initialization_order=true:strict_init_order=true:detect_odr_violation=1 build:asan --test_env=UBSAN_OPTIONS=halt_on_error=true:print_stacktrace=1 build:asan --test_env=ASAN_SYMBOLIZER_PATH +# ASAN needs -O1 to get reasonable performance. +build:asan --copt -O1 +build:asan --copt -fno-optimize-sibling-calls # Clang ASAN/UBSAN build:clang-asan --config=asan @@ -120,7 +124,8 @@ build:libc++ --config=clang build:libc++ --action_env=CXXFLAGS=-stdlib=libc++ build:libc++ --action_env=LDFLAGS=-stdlib=libc++ build:libc++ --action_env=BAZEL_CXXOPTS=-stdlib=libc++ -build:libc++ --action_env=BAZEL_LINKLIBS=-l%:libc++.a:-l%:libc++abi.a:-lm +build:libc++ --action_env=BAZEL_LINKLIBS=-l%:libc++.a:-l%:libc++abi.a +build:libc++ --action_env=BAZEL_LINKOPTS=-lm:-pthread build:libc++ --define force_libcpp=enabled # Optimize build for binary size reduction. @@ -149,10 +154,10 @@ build:coverage --strategy=CoverageReport=sandboxed,local build:coverage --experimental_use_llvm_covmap build:coverage --collect_code_coverage build:coverage --test_tag_filters=-nocoverage -build:coverage --instrumentation_filter="//source(?!/extensions/quic_listeners/quiche/platform)[/:],//include[/:]" -coverage:test-coverage --test_arg="-l trace" -coverage:fuzz-coverage --config=plain-fuzzer -coverage:fuzz-coverage --run_under=@envoy//bazel/coverage:fuzz_coverage_wrapper.sh +build:coverage --instrumentation_filter="//source(?!/common/chromium_url|/extensions/quic_listeners/quiche/platform)[/:],//include[/:]" +build:test-coverage --test_arg="-l trace" +build:fuzz-coverage --config=plain-fuzzer +build:fuzz-coverage --run_under=@envoy//bazel/coverage:fuzz_coverage_wrapper.sh # Remote execution: https://docs.bazel.build/versions/master/remote-execution.html build:rbe-toolchain --action_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1 @@ -174,6 +179,16 @@ build:rbe-toolchain-clang-libc++ --action_env=CXXFLAGS=-stdlib=libc++ build:rbe-toolchain-clang-libc++ --action_env=LDFLAGS=-stdlib=libc++ build:rbe-toolchain-clang-libc++ --define force_libcpp=enabled +# Do not inherit from "clang-asan" to avoid picking up flags from local clang.bazelrc. +build:rbe-toolchain-asan --config=asan +build:rbe-toolchain-asan --linkopt -fuse-ld=lld +build:rbe-toolchain-asan --action_env=ENVOY_UBSAN_VPTR=1 +build:rbe-toolchain-asan --copt=-fsanitize=vptr,function +build:rbe-toolchain-asan --linkopt=-fsanitize=vptr,function +build:rbe-toolchain-asan --linkopt=-L/opt/llvm/lib/clang/10.0.0/lib/linux +build:rbe-toolchain-asan --linkopt=-l:libclang_rt.ubsan_standalone-x86_64.a +build:rbe-toolchain-asan --linkopt=-l:libclang_rt.ubsan_standalone_cxx-x86_64.a + build:rbe-toolchain-msan --linkopt=-L/opt/libcxx_msan/lib build:rbe-toolchain-msan --linkopt=-Wl,-rpath,/opt/libcxx_msan/lib build:rbe-toolchain-msan --config=clang-msan @@ -224,6 +239,10 @@ build:remote-clang-libc++ --config=rbe-toolchain-clang-libc++ build:remote-gcc --config=remote build:remote-gcc --config=rbe-toolchain-gcc +build:remote-asan --config=remote +build:remote-asan --config=rbe-toolchain-clang-libc++ +build:remote-asan --config=rbe-toolchain-asan + build:remote-msan --config=remote build:remote-msan --config=rbe-toolchain-clang-libc++ build:remote-msan --config=rbe-toolchain-msan @@ -237,8 +256,8 @@ build:remote-clang-cl --config=clang-cl build:remote-clang-cl --config=rbe-toolchain-clang-cl # Docker sandbox -# NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8 -build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:e7ea4e81bbd5028abb9d3a2f2c0afe063d9b62c0 +# NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/main/toolchains/rbe_toolchains_config.bzl#L8 +build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:c8fa4235714003ba0896287ee2f91cae06e0e407 build:docker-sandbox --spawn_strategy=docker build:docker-sandbox --strategy=Javac=docker build:docker-sandbox --strategy=Closure=docker @@ -269,23 +288,42 @@ build:remote-ci --remote_cache=grpcs://remotebuildexecution.googleapis.com build:remote-ci --remote_executor=grpcs://remotebuildexecution.googleapis.com # Fuzz builds -# -DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION is passed in in the bazel build target -# rules for fuzz tests. Passing it in the CLI will cause dependencies to be build -# with the macro. Causing issues in RouteMatcherTest.TestRoutes that expect prod -# behavior from RE2 library. -build:asan-fuzzer --config=asan -build:asan-fuzzer --define=FUZZING_ENGINE=libfuzzer -build:asan-fuzzer --copt=-fsanitize=fuzzer-no-link -build:asan-fuzzer --copt=-fno-omit-frame-pointer -# Remove UBSAN halt_on_error to avoid crashing on protobuf errors. -build:asan-fuzzer --test_env=UBSAN_OPTIONS=print_stacktrace=1 + +# Shared fuzzing configuration. +build:fuzzing --define=ENVOY_CONFIG_ASAN=1 +build:fuzzing --copt=-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION +build:fuzzing --config=libc++ # Fuzzing without ASAN. This is useful for profiling fuzzers without any ASAN artifacts. +build:plain-fuzzer --config=fuzzing build:plain-fuzzer --define=FUZZING_ENGINE=libfuzzer -build:plain-fuzzer --define ENVOY_CONFIG_ASAN=1 +# The fuzzing rules provide their own instrumentation, but it is currently +# disabled due to bazelbuild/bazel#12888. Instead, we provide instrumentation at +# the top level through these options. build:plain-fuzzer --copt=-fsanitize=fuzzer-no-link build:plain-fuzzer --linkopt=-fsanitize=fuzzer-no-link +build:asan-fuzzer --config=plain-fuzzer +build:asan-fuzzer --config=asan +build:asan-fuzzer --copt=-fno-omit-frame-pointer +# Remove UBSAN halt_on_error to avoid crashing on protobuf errors. +build:asan-fuzzer --test_env=UBSAN_OPTIONS=print_stacktrace=1 + +build:oss-fuzz --config=fuzzing +build:oss-fuzz --define=FUZZING_ENGINE=oss-fuzz +build:oss-fuzz --@rules_fuzzing//fuzzing:cc_engine_instrumentation=oss-fuzz +build:oss-fuzz --@rules_fuzzing//fuzzing:cc_engine_sanitizer=none +build:oss-fuzz --dynamic_mode=off +build:oss-fuzz --strip=never +build:oss-fuzz --copt=-fno-sanitize=vptr +build:oss-fuzz --linkopt=-fno-sanitize=vptr +build:oss-fuzz --define=tcmalloc=disabled +build:oss-fuzz --define=signal_trace=disabled +build:oss-fuzz --copt=-D_LIBCPP_DISABLE_DEPRECATION_WARNINGS +build:oss-fuzz --define=force_libcpp=enabled +build:oss-fuzz --linkopt=-lc++ +build:oss-fuzz --linkopt=-pthread + # Compile database generation config build:compdb --build_tag_filters=-nocompdb @@ -294,30 +332,37 @@ build:windows --action_env=TMPDIR build:windows --define signal_trace=disabled build:windows --define hot_restart=disabled build:windows --define tcmalloc=disabled +build:windows --define wasm=disabled build:windows --define manual_stamp=manual_stamp +build:windows --cxxopt="/std:c++17" -# Should not be required after upstream fix to bazel, -# and already a no-op to linux/macos builds -# see issue https://github.com/bazelbuild/rules_foreign_cc/issues/301 +# TODO(wrowe,sunjayBhatia): Resolve bugs upstream in curl and rules_foreign_cc +# See issue https://github.com/bazelbuild/rules_foreign_cc/issues/301 build:windows --copt="-DCARES_STATICLIB" build:windows --copt="-DNGHTTP2_STATICLIB" build:windows --copt="-DCURL_STATICLIB" -build:windows --cxxopt="/std:c++17" -# Required to work around build defects on Windows MSVC cl -# Unguarded gcc pragmas in quiche are not recognized by MSVC -build:msvc-cl --copt="/wd4068" -# Allows 'nodiscard' function return values to be discarded -build:msvc-cl --copt="/wd4834" -# Allows inline functions to be undefined -build:msvc-cl --copt="/wd4506" -build:msvc-cl --copt="-D_SILENCE_EXPERIMENTAL_FILESYSTEM_DEPRECATION_WARNING" +# Override any clang preference if building msvc-cl +# Drop the determinism feature (-DDATE etc are a no-op in msvc-cl) +build:msvc-cl --action_env=USE_CLANG_CL="" +build:msvc-cl --define clang_cl=0 +build:msvc-cl --features=-determinism + +# Windows build behaviors when using clang-cl +build:clang-cl --action_env=USE_CLANG_CL=1 +build:clang-cl --define clang_cl=1 # Required to work around Windows clang-cl build defects # Ignore conflicting definitions of _WIN32_WINNT -# Overriding __TIME__ etc is problematic (and is actually an invalid no-op) +# Override determinism flags (DATE etc) is valid on clang-cl compiler build:clang-cl --copt="-Wno-macro-redefined" build:clang-cl --copt="-Wno-builtin-macro-redefined" +# Workaround problematic missing override declarations of mocks +# TODO: resolve this class of problematic mocks, e.g. +# ./test/mocks/http/stream.h(16,21): error: 'addCallbacks' +# overrides a member function but is not marked 'override' +# MOCK_METHOD(void, addCallbacks, (StreamCallbacks & callbacks)); +build:clang-cl --copt="-Wno-inconsistent-missing-override" build:clang-cl --action_env=USE_CLANG_CL=1 # Defaults to 'auto' - Off for windows, so override to linux behavior diff --git a/.bazelversion b/.bazelversion index 47b322c97..0b2eb36f5 100644 --- a/.bazelversion +++ b/.bazelversion @@ -1 +1 @@ -3.4.1 +3.7.2 diff --git a/.circleci/config.yml b/.circleci/config.yml index e34bc1422..f82b4cc15 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,6 +1,6 @@ references: - envoy-build-image: &envoy-build-image # September 9th, 2020 - envoyproxy/envoy-build-ubuntu:e7ea4e81bbd5028abb9d3a2f2c0afe063d9b62c0 + envoy-build-image: &envoy-build-image # February 14th, 2021 + envoyproxy/envoy-build-ubuntu:c8fa4235714003ba0896287ee2f91cae06e0e407 version: 2 jobs: build: @@ -30,7 +30,9 @@ jobs: resource_class: xlarge steps: - checkout - - run: ci/do_ci.sh test_gcc + - run: + command: ci/do_ci.sh test_gcc + no_output_timeout: 30m coverage: docker: - image: *envoy-build-image diff --git a/.github/ISSUE_TEMPLATE/PULL_REQUEST_TEMPLATE.md b/.github/ISSUE_TEMPLATE/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000..a91d54f04 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,16 @@ +**Description** + + + +This PR is related to # + + +**Notes for Reviewers** + + \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/bug.md b/.github/ISSUE_TEMPLATE/bug.md new file mode 100644 index 000000000..6ee5fa021 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug.md @@ -0,0 +1,32 @@ +--- +name: Non-{crash,security} bug +about: Bugs which are not crashes, DoS or other security issue +title: '' +labels: bug,triage +assignees: '' + +--- + +*Title*: *One line description* + +*Description*: +>What issue is being seen? Describe what should be happening instead of +the bug, for example: Nighthawk should not crash, the expected value isn't +returned, etc. + +*Reproduction steps*: +> Include sample requests, environment, etc. All data and inputs +required to reproduce the bug. + +>**Note**: If there are privacy concerns, sanitize the data prior to +sharing. + +*Logs*: +>Include the Nighthawk logs. + +>**Note**: If there are privacy concerns, sanitize the data prior to +sharing. + +*Call Stack*: +> If the Envoy binary is crashing, a call stack is **required**. +Please refer to the [Bazel Stack trace documentation](https://github.com/envoyproxy/envoy/tree/master/bazel#stack-trace-symbol-resolution). \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 000000000..ef966b857 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,17 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: enhancement,triage +assignees: '' + +--- + +*Title*: *One line description* + +*Description*: +>Describe the the desired behavior, what scenario it enables and how it +would be used. + +[optional *Relevant Links*:] +>Any extra documentation required to understand the issue. \ No newline at end of file diff --git a/BUILD b/BUILD index 662004eec..1b849c544 100644 --- a/BUILD +++ b/BUILD @@ -11,6 +11,7 @@ envoy_package() filegroup( name = "nighthawk", srcs = [ + ":nighthawk_adaptive_load_client", ":nighthawk_client", ":nighthawk_output_transform", ":nighthawk_service", @@ -18,6 +19,14 @@ filegroup( ], ) +envoy_cc_binary( + name = "nighthawk_adaptive_load_client", + repository = "@envoy", + deps = [ + "//source/exe:adaptive_load_client_entry_lib", + ], +) + envoy_cc_binary( name = "nighthawk_client", repository = "@envoy", diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9eabaf0a3..4ca44db8f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -8,7 +8,7 @@ maximize the chances of your PR being merged. # Coding style -* Coding style mirrors [Envoy's policy](https://github.com/envoyproxy/envoy/blob/master/STYLE.md) +* Coding style mirrors [Envoy's policy](https://github.com/envoyproxy/envoy/blob/main/STYLE.md) # Breaking change policy @@ -16,22 +16,22 @@ Both API and implementation stability are important to Nighthawk. Since the API # Submitting a PR -* Generally Nighthawk mirrors [Envoy's policy](https://github.com/envoyproxy/envoy/blob/master/CONTRIBUTING.md#submitting-a-pr) with respect to PR submission policy. +* Generally Nighthawk mirrors [Envoy's policy](https://github.com/envoyproxy/envoy/blob/main/CONTRIBUTING.md#submitting-a-pr) with respect to PR submission policy. * Any PR that changes user-facing behavior **must** have associated documentation in [docs](docs) as well as [release notes](docs/root/version_history.md). # PR review policy for maintainers -* Generally Nighthawk mirrors [Envoy's policy](https://github.com/envoyproxy/envoy/blob/master/CONTRIBUTING.md#pr-review-policy-for-maintainers) with respect to maintainer review policy. +* Generally Nighthawk mirrors [Envoy's policy](https://github.com/envoyproxy/envoy/blob/main/CONTRIBUTING.md#pr-review-policy-for-maintainers) with respect to maintainer review policy. * See [OWNERS.md](OWNERS.md) for the current list of maintainers. * It is helpful if you apply the label `waiting-for-review` to any PRs that are ready to be reviewed by a maintainer. * Reviewers will change the label to `waiting-for-changes` when responding. # DCO: Sign your work -Commits need to be signed off. See [here](https://github.com/envoyproxy/envoy/blob/master/CONTRIBUTING.md#dco-sign-your-work). +Commits need to be signed off. See [here](https://github.com/envoyproxy/envoy/blob/main/CONTRIBUTING.md#dco-sign-your-work). ## Triggering CI re-run without making changes -See [here](https://github.com/envoyproxy/envoy/blob/master/CONTRIBUTING.md#triggering-ci-re-run-without-making-changes). \ No newline at end of file +See [here](https://github.com/envoyproxy/envoy/blob/main/CONTRIBUTING.md#triggering-ci-re-run-without-making-changes). diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 8535ce1a3..b4f8a5c40 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -4,31 +4,79 @@ This document aims to assist [maintainers](OWNERS.md). ## Envoy domain expertise -As a guideline, concepts in Nighthawk that are derived from Envoy -require someone with Envoy domain expertise in review. Notable examples -are the way Nighthawk internally computes cluster configuration, its -connection pool derivations, the `StreamDecoder` class, as well as anything related to the Nighthawk test server. +As a guideline, concepts in Nighthawk that are derived from Envoy require +someone with Envoy domain expertise in review. Notable examples are the way +Nighthawk internally computes cluster configuration, its connection pool +derivations, the `StreamDecoder` class, as well as anything related to the +Nighthawk test server. -See [OWNERS.md](OWNERS.md) to find maintainers with expertise of -Envoy internals. +See [OWNERS.md](OWNERS.md) to find maintainers with expertise of Envoy +internals. ## Pre-merge checklist -- Does the PR have breaking changes? Then that should be explicitly mentioned in the [version history](docs/root/version_history.md). -- New features should be added to the [version history](docs/root/version_history.md). +- Does the PR have breaking changes? Then that should be explicitly mentioned in + the [version history](docs/root/version_history.md). +- New features should be added to the + [version history](docs/root/version_history.md). - Breaking changes to the [protobuf APIs](api/) are not allowed. -- When merging, clean up the commit message so we get a nice history. By default, - github will compile a message from all the commits that are squashed. - The PR title and description should be a good starting point for the final commit message. - (If it is not, it may be worth asking the PR author to update the description). +- When merging, clean up the commit message so we get a nice history. By + default, github will compile a message from all the commits that are squashed. + The PR title and description should be a good starting point for the final + commit message. (If it is not, it may be worth asking the PR author to update + the description). - Make sure that the DCO signoff is included in the final commit message. - - As a convention, it is appropriate to exclude content in the PR description that occurs after the signoff. + - As a convention, it is appropriate to exclude content in the PR description + that occurs after the signoff. ## Updates to the Envoy dependency -We try to [regularly synchronize our Envoy dependency](https://github.com/envoyproxy/nighthawk/pulls?utf8=%E2%9C%93&q=is%3Apr+is%3Aclosed+%22update+envoy%22+) with the latest revision. Nighthawk reuses large parts of Envoy's build system and CI infrastructure. When we update, that looks like: +We aim to +[synchronize our Envoy dependency](https://github.com/envoyproxy/nighthawk/pulls?utf8=%E2%9C%93&q=is%3Apr+is%3Aclosed+%22update+envoy%22+) +with the latest revision weekly. Nighthawk reuses large parts of Envoy's build +system and codebase, so keeping Nighthawk up to date with Envoy's changes is an +important maintenance task. When performing the update, follow this procedure: -- A change to [repositories.bzl](bazel/repositories.bzl) to update the commit and SHA. -- A sync of [.bazelrc](.bazelrc) with [Envoy's version](https://github.com/envoyproxy/envoy/blob/master/.bazelrc) to update our build configurations. -- A sync of the build image sha used in the [ci configuration](.circleci/config.yml) with [Envoy's version](https://github.com/envoyproxy/envoy/blob/master/.circleci/config.yml) to sync our CI testing environment. -- Sometimes the dependency update comes with changes that break our build. We include any changes required to Nighthawk to fix that. +1. Create a fork of Nighthawk, or fetch upstream and merge changes into your + fork if you already have one. +1. Create a new branch from `main`, e.g. `envoy-update`. +1. Edit [bazel/repositories.bzl](bazel/repositories.bzl). + 1. Update `ENVOY_COMMIT` to the latest Envoy's commit from + [this page](https://github.com/envoyproxy/envoy/commits/main). (Clicking on the + short commit id opens a page that contains the fully expanded commit id). + 1. Set `ENVOY_SHA` to an empty string initially, we will get the correct + sha256 after the first bazel execution. + Example content of `bazel/repositories.bzl` after the edits: + ``` + ENVOY_COMMIT = "9753819331d1547c4b8294546a6461a3777958f5"  # Jan 24th, 2021 + ENVOY_SHA = "" + ``` + 1. Run `ci/do_ci.sh build`, notice the sha256 value at the top of the output, + example: + ``` + INFO: SHA256 (https://github.com/envoyproxy/envoy/archive/9753819331d1547c4b8294546a6461a3777958f5.tar.gz) = f4d26c7e78c0a478d959ea8bc877f260d4658a8b44e294e3a400f20ad44d41a3 + ``` +1. Update `ENVOY_SHA` in [bazel/repositories.bzl](bazel/repositories.bzl) to + this value. +1. Sync (copy) [.bazelrc](.bazelrc) from + [Envoy's version](https://github.com/envoyproxy/envoy/blob/main/.bazelrc) to + update our build configurations. Be sure to retain our local modifications, + all lines that are unique to Nighthawk are marked with comment `# unique`. +1. In the updated [.bazelrc](.bazelrc) search for `experimental_docker_image`. + Copy the SHA and update `envoy-build-image:sha` over at the top of [.circleci/config.yml](.circleci/config.yml). +1. Sync (copy) [.bazelversion](.bazelversion) from + [Envoy's version](https://github.com/envoyproxy/envoy/blob/main/.bazelversion) + to ensure we are using the same build system version. +1. Sync (copy) [ci/run_envoy_docker.sh](ci/run_envoy_docker.sh) from + [Envoy's version](https://github.com/envoyproxy/envoy/blob/main/ci/run_envoy_docker.sh). +1. Run `ci/do_ci.sh test`. Sometimes the dependency update comes with changes + that break our build. Include any changes required to Nighthawk to fix that + in the same PR. +1. Create a PR with a title like `Update Envoy to 9753819 (Jan 24th 2021)`, + describe all performed changes in the PR's descriotion. +1. If the PR ends up modifying any c++ files, execute `ci/do_ci.sh fix_format` + to reformat the files and avoid a CI failure. +1. If the PR ends up modifying any CLI arguments, execute + `tools/update_cli_readme_documentation.sh --mode fix` to regenerate the + portion of our documentation that captures the CLI help output. This will + prevent a CI failure. diff --git a/PROFILING.md b/PROFILING.md index 23e5ddb6a..aa9a508d1 100644 --- a/PROFILING.md +++ b/PROFILING.md @@ -41,7 +41,7 @@ The interface served at localhost:8888 gives you various means to help with anal ### Envoy build -See [building Envoy with Bazel](https://github.com/envoyproxy/envoy/tree/master/bazel#building-envoy-with-bazel). +See [building Envoy with Bazel](https://github.com/envoyproxy/envoy/tree/main/bazel#building-envoy-with-bazel). Envoy’s static build is set up for profiling and can be build with: @@ -49,7 +49,7 @@ Envoy’s static build is set up for profiling and can be build with: bazel build //source/exe:envoy-static ``` -More context: https://github.com/envoyproxy/envoy/blob/master/bazel/PPROF.md +More context: https://github.com/envoyproxy/envoy/blob/main/bazel/PPROF.md ### Nighthawk build diff --git a/README.md b/README.md index e5bdb24ba..dbbd6d238 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ Nighthawk currently offers: ### Ubuntu -First, follow steps 1 and 2 over at [Quick start Bazel build for developers](https://github.com/envoyproxy/envoy/blob/master/bazel/README.md#quick-start-bazel-build-for-developers). +First, follow steps 1 and 2 over at [Quick start Bazel build for developers](https://github.com/envoyproxy/envoy/blob/main/bazel/README.md#quick-start-bazel-build-for-developers). ## Building and using the Nighthawk client CLI @@ -43,10 +43,12 @@ bazel build -c opt //:nighthawk ``` USAGE: -bazel-bin/nighthawk_client [--latency-response-header-name ] +bazel-bin/nighthawk_client [--allow-envoy-deprecated-v2-api] +[--latency-response-header-name ] [--stats-flush-interval ] [--stats-sinks ] ... [--no-duration] [--simple-warmup] +[--request-source-plugin-config ] [--request-source ] [--label ] ... [--multi-target-use-https] [--multi-target-path ] @@ -82,6 +84,10 @@ format> Where: +--allow-envoy-deprecated-v2-api +Set to allow usage of the v2 api. (Not recommended, support will stop +in Q1 2021). Default: false + --latency-response-header-name Set an optional header name that will be returned in responses, whose values will be tracked in a latency histogram if set. Can be used in @@ -109,10 +115,20 @@ Perform a simple single warmup request (per worker) before starting execution. Note that this will be reflected in the counters that Nighthawk writes to the output. Default is false. +--request-source-plugin-config +[Request +Source](https://github.com/envoyproxy/nighthawk/blob/main/docs/root/ov +erview.md#requestsource) plugin configuration in json or compact yaml. +Mutually exclusive with --request-source. Example (json): +{name:"nighthawk.stub-request-source-plugin" +,typed_config:{"@type":"type.googleapis.com/nighthawk.request_source.S +tubPluginConfig",test_value:"3"}} + --request-source Remote gRPC source that will deliver to-be-replayed traffic. Each worker will separately connect to this source. For example -grpc://127.0.0.1:8443/. +grpc://127.0.0.1:8443/. Mutually exclusive with +--request_source_plugin_config. --label (accepted multiple times) Label. Allows specifying multiple labels which will be persisted in @@ -187,8 +203,8 @@ any other value will allow client-side queuing of requests). Transport socket configuration in json or compact yaml. Mutually exclusive with --tls-context. Example (json): {name:"envoy.transport_sockets.tls" -,typed_config:{"@type":"type.googleapis.com/envoy.api.v2.auth.Upstream -TlsContext" +,typed_config:{"@type":"type.googleapis.com/envoy.extensions.transport +_sockets.tls.v3.UpstreamTlsContext" ,common_tls_context:{tls_params:{cipher_suites:["-ALL:ECDHE-RSA-AES128 -SHA"]}}}} diff --git a/RELEASE_PROCEDURE.md b/RELEASE_PROCEDURE.md index e8a3542d0..2950da642 100644 --- a/RELEASE_PROCEDURE.md +++ b/RELEASE_PROCEDURE.md @@ -14,7 +14,7 @@ ## Release steps -1. Speculatively bump the version in [version_info.h](source/common/version_info.h) to the version you determined earlier. This may result in version gaps if a release attempt fails, but avoids having to freeze merges to master and/or having to work with release branches. In short it helps keeping the release procedure lean and mean and eliminates the need for blocking others while this procedure is in-flight. +1. Speculatively bump the version in [version_info.h](source/common/version_info.h) to the version you determined earlier. This may result in version gaps if a release attempt fails, but avoids having to freeze merges to main and/or having to work with release branches. In short it helps keeping the release procedure lean and mean and eliminates the need for blocking others while this procedure is in-flight. 2. Draft a [GitHub tagged release](https://github.com/envoyproxy/nighthawk/releases/new). Earlier releases are tagged like `v0.1`, but as of `0.3.0`we are using [semantic versioning](https://semver.org/spec/v2.0.0.html) 3. Perform thorough testing of the targeted revision to double down on stability [1] 4. Create an optimized build for comparing with the previous release. Changes in performance diff --git a/api/adaptive_load/BUILD b/api/adaptive_load/BUILD index 009bbea75..6cfd818ec 100644 --- a/api/adaptive_load/BUILD +++ b/api/adaptive_load/BUILD @@ -15,7 +15,7 @@ api_cc_py_proto_library( ], visibility = ["//visibility:public"], deps = [ + "//api/client:base", "@envoy_api//envoy/config/core/v3:pkg", - "@nighthawk//api/client:base", ], ) diff --git a/api/adaptive_load/adaptive_load.proto b/api/adaptive_load/adaptive_load.proto index 43d2456e7..2bf0c71ed 100644 --- a/api/adaptive_load/adaptive_load.proto +++ b/api/adaptive_load/adaptive_load.proto @@ -31,7 +31,7 @@ message AdaptiveLoadSessionSpec { // visualization. Optional. repeated MetricSpec informational_metric_specs = 3; // A proto describing Nighthawk Service traffic. See - // https://github.com/envoyproxy/nighthawk/blob/master/api/client/options.proto + // https://github.com/envoyproxy/nighthawk/blob/main/api/client/options.proto // // The adaptive load controller will return an error if the |duration| field is set within // |nighthawk_traffic_template|. diff --git a/api/client/options.proto b/api/client/options.proto index 6e2aa1841..3274dd015 100644 --- a/api/client/options.proto +++ b/api/client/options.proto @@ -3,10 +3,12 @@ syntax = "proto3"; package nighthawk.client; import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; import "google/protobuf/wrappers.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/config/metrics/v3/stats.proto"; import "envoy/extensions/transport_sockets/tls/v3/cert.proto"; +import "envoy/config/core/v3/extension.proto"; import "validate/validate.proto"; // Allows for static configuration of requests that should be send by the load generator. @@ -18,6 +20,12 @@ message RequestOptions { google.protobuf.UInt32Value request_body_size = 3 [(validate.rules).uint32 = {lte: 4194304}]; } +// Used for providing multiple request options, especially for RequestSourcePlugins. +message RequestOptionsList { + // Each option is used for a separate request to be sent by the requestSource. + repeated RequestOptions options = 1; +} + // Configures a remote gRPC source that will deliver to-be-replayed request data to Nighthawks // workers. message RequestSource { @@ -98,6 +106,7 @@ message H1ConnectionReuseStrategy { // TODO(oschaaf): Ultimately this will be a load test specification. The fact that it // can arrive via CLI is just a concrete detail. Change this to reflect that. +// highest unused number is 39 message CommandLineOptions { // The target requests-per-second rate. Default: 5. google.protobuf.UInt32Value requests_per_second = 1 @@ -142,6 +151,9 @@ message CommandLineOptions { // Remote gRPC source that will deliver to-be-replayed traffic. Each worker will separately // connect to this source. RequestSource request_source = 26; + // A plugin config that is to be parsed by a RequestSourcePluginConfigFactory and used to create + // an in memory request source. + envoy.config.core.v3.TypedExtensionConfig request_source_plugin_config = 37; } // DEPRECATED, use --transport-socket instead. Tls context configuration in json or compact yaml. // Mutually exclusive with --transport-socket. @@ -208,4 +220,9 @@ message CommandLineOptions { // "emit_previous_request_delta_in_response_header" to record elapsed time between request // arrivals. google.protobuf.StringValue latency_response_header_name = 36; + // Set to allow usage of the v2 api. (Not recommended, support will stop in Q1 2021). + google.protobuf.BoolValue allow_envoy_deprecated_v2_api = 38 [deprecated = true]; + // Provide an execution starting date and time. Optional, any value specified must be in the + // future. + google.protobuf.Timestamp scheduled_start = 105; } diff --git a/api/client/output.proto b/api/client/output.proto index 8e74f34f4..cc3e339db 100644 --- a/api/client/output.proto +++ b/api/client/output.proto @@ -49,6 +49,7 @@ message Result { repeated Statistic statistics = 2; repeated Counter counters = 3; google.protobuf.Duration execution_duration = 4; + google.protobuf.Timestamp execution_start = 5; } message Output { diff --git a/api/request_source/BUILD b/api/request_source/BUILD index 4e808f691..2473d6a2e 100644 --- a/api/request_source/BUILD +++ b/api/request_source/BUILD @@ -13,6 +13,19 @@ api_cc_py_proto_library( "@envoy_api//envoy/api/v2/auth:pkg", "@envoy_api//envoy/api/v2/cluster:pkg", "@envoy_api//envoy/api/v2/core:pkg", + "@envoy_api//envoy/config/core/v3:pkg", + ], +) + +api_cc_py_proto_library( + name = "request_source_plugin", + srcs = [ + "request_source_plugin.proto", + ], + visibility = ["//visibility:public"], + deps = [ + "@envoy_api//envoy/config/core/v3:pkg", + "@nighthawk//api/client:base", ], ) diff --git a/api/request_source/request_source_plugin.proto b/api/request_source/request_source_plugin.proto new file mode 100644 index 000000000..5d56802d9 --- /dev/null +++ b/api/request_source/request_source_plugin.proto @@ -0,0 +1,53 @@ +// Config protos for the Request Source Plugin Config Factories. +syntax = "proto3"; + +package nighthawk.request_source; + +import "google/protobuf/wrappers.proto"; +import "validate/validate.proto"; +import "api/client/options.proto"; + +// Configuration for OptionsListFromFileRequestSourceFactory (plugin name: +// "nighthawk.file-options-list-request-source-plugin") +// The factory will load the RequestOptionsList from the file, and then passes it to the +// requestSource it generates. The resulting request source will loop over the RequestOptionsList it +// is passed. +message FileBasedOptionsListRequestSourceConfig { + // The file_path is the path to a file that contains a RequestOptionList in json or yaml format. + // This field is required. + string file_path = 1; + // The pluginfactory makes requestSources that will generate requests from the RequestOptionList + // up to num_requests number of times. If num_requests exceeds the number of RequestOptions in the + // RequestOptionList located in the file at file_path, it will loop. num_requests = 0 means it + // will loop indefinitely, though it will still terminate by normal mechanisms. + uint32 num_requests = 2; + // The pluginfactory will load the file located in file_path as long as it is below max_file_size + // in bytes, if it's too large it will throw an error. This field is optional with a default of + // 1000000. + google.protobuf.UInt32Value max_file_size = 3 [(validate.rules).uint32 = {lte: 1000000}]; +} + +// Configuration for OptionsListFromProtoRequestSourceFactory (plugin name: +// "nighthawk.in-line-options-list-request-source-plugin") +// The resulting request source will loop over the RequestOptionsList it +// is passed. +message InLineOptionsListRequestSourceConfig { + // The options_list will be used to generate Requests in the RequestSource. This field is + // required. + nighthawk.client.RequestOptionsList options_list = 1; + // The pluginfactory makes requestSources that will generate requests from the RequestOptionList + // up to num_requests number of times. If num_requests exceeds the number of RequestOptions in the + // options_list, it will loop. num_requests = 0 means it will loop indefinitely, though it will + // still terminate by normal mechanisms. + uint32 num_requests = 2; +} + +// Configuration for StubPluginRequestSource (plugin name: "nighthawk.stub-request-source-plugin") +// The plugin does nothing. This is for testing and comparison of the Request Source Plugin Factory +// mechanism using a minimal version of plugin that does not require a more complicated proto or +// file reading. +message StubPluginConfig { + // test input value which is the only output value in the headers produced from the + // requestGenerator for the StubRequestSource. + google.protobuf.DoubleValue test_value = 1; +} diff --git a/api/request_source/service.proto b/api/request_source/service.proto index 9ab934558..e1acdb135 100644 --- a/api/request_source/service.proto +++ b/api/request_source/service.proto @@ -2,8 +2,9 @@ syntax = "proto3"; package nighthawk.request_source; -import "google/protobuf/wrappers.proto"; import "envoy/api/v2/core/base.proto"; +import "envoy/config/core/v3/base.proto"; +import "google/protobuf/wrappers.proto"; // Used to request a RequestStreamResponse. message RequestStreamRequest { @@ -37,9 +38,18 @@ message RequestSpecifier { // Request content length. The client will transfer the number of bytes specified here for the // request body. google.protobuf.UInt32Value content_length = 4; - // Request header replacements. Any existing header(s) with the same name will be removed - // before setting. - envoy.api.v2.core.HeaderMap headers = 5; + + oneof oneof_headers { + // Request header replacements. Any existing header(s) with the same name will be removed + // before setting. + // + // Envoy deprecated its v2 API, prefer to use v3_headers instead. + envoy.api.v2.core.HeaderMap headers = 5 [deprecated = true]; + + // Request header replacements. Any existing header(s) with the same name will be removed + // before setting. + envoy.config.core.v3.HeaderMap v3_headers = 6; + } // TODO(oschaaf): nice to have // google.protobuf.StringValue sni_hostname = 10; } diff --git a/api/server/BUILD b/api/server/BUILD index 9b00942d9..312e283e5 100644 --- a/api/server/BUILD +++ b/api/server/BUILD @@ -13,5 +13,6 @@ api_cc_py_proto_library( srcs = ["response_options.proto"], deps = [ "@envoy_api//envoy/api/v2/core:pkg", + "@envoy_api//envoy/config/core/v3:pkg", ], ) diff --git a/api/server/response_options.proto b/api/server/response_options.proto index 79c9e2dc9..bf64b916d 100644 --- a/api/server/response_options.proto +++ b/api/server/response_options.proto @@ -2,10 +2,11 @@ syntax = "proto3"; package nighthawk.server; -import "google/protobuf/wrappers.proto"; -import "validate/validate.proto"; import "envoy/api/v2/core/base.proto"; +import "envoy/config/core/v3/base.proto"; import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; +import "validate/validate.proto"; message ConcurrencyBasedLinearDelay { // Minimal delay to add to replies. @@ -20,7 +21,15 @@ message ConcurrencyBasedLinearDelay { // configuration will override. message ResponseOptions { // List of additional response headers. - repeated envoy.api.v2.core.HeaderValueOption response_headers = 1; + // + // Envoy deprecated its v2 API, prefer to use v3_response_headers instead. + // Mutually exclusive with v3_response_headers. + repeated envoy.api.v2.core.HeaderValueOption response_headers = 1 [deprecated = true]; + + // List of additional response headers. + // Mutually exclusive with response_headers. + repeated envoy.config.core.v3.HeaderValueOption v3_response_headers = 7; + // Number of 'a' characters in the the response body. uint32 response_body_size = 2 [(validate.rules).uint32 = {lte: 4194304}]; // If true, then echo request headers in the response body. diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 51e9f0566..66798f15d 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,17 +1,19 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -ENVOY_COMMIT = "5a87f1e59b42ad546698d389f6ccac9406534e17" # September 25th, 2020 -ENVOY_SHA = "739c62249bae60f633f91dee846825f1d5ddcc469d45ef370e57f1a010c13258" +ENVOY_COMMIT = "a96ebf4a9b4dae50c17469c64b179930e57c802e" # Feb 16th, 2021 +ENVOY_SHA = "dbfa3325ce843e184950c794f549fbec70663a46e85331f78cf1d4a13aa22398" -HDR_HISTOGRAM_C_VERSION = "0.11.1" # September 17th, 2020 -HDR_HISTOGRAM_C_SHA = "8550071d4ae5c8229448f9b68469d6d42c620cd25111b49c696d00185e5f8329" +HDR_HISTOGRAM_C_VERSION = "0.11.2" # October 12th, 2020 +HDR_HISTOGRAM_C_SHA = "637f28b5f64de2e268131e4e34e6eef0b91cf5ff99167db447d9b2825eae6bad" def nighthawk_dependencies(): http_archive( name = "envoy", sha256 = ENVOY_SHA, strip_prefix = "envoy-%s" % ENVOY_COMMIT, + # // clang-format off: Envoy's format check: Only repository_locations.bzl may contains URL references url = "https://github.com/envoyproxy/envoy/archive/%s.tar.gz" % ENVOY_COMMIT, + # // clang-format on ) http_archive( name = "dep_hdrhistogram_c", @@ -50,5 +52,7 @@ cc_library( """, sha256 = HDR_HISTOGRAM_C_SHA, strip_prefix = "HdrHistogram_c-%s" % HDR_HISTOGRAM_C_VERSION, + # // clang-format off url = "https://github.com/HdrHistogram/HdrHistogram_c/archive/%s.tar.gz" % HDR_HISTOGRAM_C_VERSION, + # // clang-format on ) diff --git a/benchmarks/README.md b/benchmarks/README.md index 94a145be7..33244c8bf 100644 --- a/benchmarks/README.md +++ b/benchmarks/README.md @@ -26,7 +26,7 @@ It will run a selection of an example [benchmarks](test/test_discovery.py) extracted from `/benchmarks`, which injects Envoy between the benchmark client and test server. ```bash -git clone https://github.com/oschaaf/nighthawk.git benchmark-test +git clone https://github.com/envoyproxy/nighthawk.git benchmark-test cd benchmark-test bazel build //benchmarks:benchmarks @@ -54,7 +54,7 @@ client and server. If not set, the benchmark suite will fall back to configuring Nighthawk's test server for that. Note that the build can be a lengthy process. ```bash -git clone https://github.com/oschaaf/nighthawk.git benchmark-test +git clone https://github.com/envoyproxy/nighthawk.git benchmark-test cd benchmark-test bazel test \ --test_summary=detailed \ @@ -67,6 +67,7 @@ bazel test \ --compilation_mode=opt \ --cxxopt=-g \ --cxxopt=-ggdb3 \ + --define tcmalloc=gperftools \ //benchmarks:* ``` @@ -127,4 +128,4 @@ docker run -it --rm \ - Have a mode where nighthawk_test_server provides high-res control timings in its access logs - The ability to repeat the runs multiple times and obtain stats, e.g. how much variance there is, mean, etc. -- The ability to do A/B testing, similar to https://github.com/envoyproxy/envoy-perf/blob/master/siege/siege.py#L3. +- The ability to do A/B testing, similar to https://github.com/envoyproxy/envoy-perf/blob/main/siege/siege.py#L3. diff --git a/benchmarks/benchmarks.py b/benchmarks/benchmarks.py index 654c5c324..b4c22b358 100644 --- a/benchmarks/benchmarks.py +++ b/benchmarks/benchmarks.py @@ -9,5 +9,8 @@ if __name__ == '__main__': path = os.path.dirname(os.path.realpath(__file__)) - r = pytest.main(["--rootdir=" + path, "-x", path, "-p", "no:cacheprovider", *sys.argv]) + r = pytest.main([ + "--rootdir=" + path, "-x", path, "-p", "no:cacheprovider", "--log-level", "INFO", + "--log-cli-level", "INFO", *sys.argv + ]) exit(r) diff --git a/benchmarks/configurations/envoy_proxy.yaml b/benchmarks/configurations/envoy_proxy.yaml index a7e984dc3..700217df4 100644 --- a/benchmarks/configurations/envoy_proxy.yaml +++ b/benchmarks/configurations/envoy_proxy.yaml @@ -11,10 +11,11 @@ static_resources: port_value: 0 filter_chains: - filters: - - name: envoy.http_connection_manager - config: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager generate_request_id: false - codec_type: auto + codec_type: AUTO stat_prefix: ingress_http route_config: name: local_route @@ -28,15 +29,21 @@ static_resources: route: cluster: local_service http_filters: - - name: envoy.router - config: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router dynamic_stats: false clusters: - name: local_service connect_timeout: 0.25s - type: strict_dns - lb_policy: round_robin - hosts: - - socket_address: - address: $server_ip - port_value: $server_port + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: local_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: $server_ip + port_value: $server_port diff --git a/benchmarks/envoy_proxy.py b/benchmarks/envoy_proxy.py index 06ce8bc08..8a48ad362 100644 --- a/benchmarks/envoy_proxy.py +++ b/benchmarks/envoy_proxy.py @@ -28,13 +28,21 @@ class EnvoyProxyServer(NighthawkTestServer): See InjectHttpProxyIntegrationTestBase below for usage. """ - def __init__(self, config_template_path, server_ip, ip_version, parameters=dict(), tag=""): + def __init__(self, + config_template_path, + server_ip, + ip_version, + request, + parameters=dict(), + tag=""): """Initialize an EnvoyProxyServer instance. Arguments: config_template_path: Configuration template for the proxy. server_ip: IP address for the proxy to use. ip_version: IP version that the proxy should use when listening. + request: The pytest `request` test fixture used to determine information + about the currently executing test case. parameters: Dictionary. Supply this to provide template parameter replacement values (optional). tag: String. Supply this to get recognizeable output locations (optional). """ @@ -43,6 +51,7 @@ def __init__(self, config_template_path, server_ip, ip_version, parameters=dict( config_template_path, server_ip, ip_version, + request, parameters=parameters, tag=tag) self.docker_image = os.getenv("ENVOY_DOCKER_IMAGE_TO_TEST", "") @@ -61,15 +70,16 @@ class InjectHttpProxyIntegrationTestBase(HttpIntegrationTestBase): which directs traffic to that. Both will be listing for plain http traffic. """ - def __init__(self, ip_version, server_config, proxy_config): + def __init__(self, request, server_config, proxy_config): """Initialize an InjectHttpProxyIntegrationTestBase. Arguments: - ip_version: Use ipv4 or ipv6 + request: The pytest `request` test fixture used to determine information + about the currently executing test case. server_config: Path to the server configuration. proxy_config: Path to the proxy configuration. """ - super(InjectHttpProxyIntegrationTestBase, self).__init__(ip_version, server_config) + super(InjectHttpProxyIntegrationTestBase, self).__init__(request, server_config) self._proxy_config = proxy_config def setUp(self): @@ -85,6 +95,7 @@ def setUp(self): proxy_server = EnvoyProxyServer(self._proxy_config, self.server_ip, self.ip_version, + self.request, parameters=self.parameters, tag=self.tag) assert (proxy_server.start()) @@ -92,9 +103,9 @@ def setUp(self): port=proxy_server.server_port)) self.proxy_server = proxy_server - def tearDown(self): + def tearDown(self, caplog): """Tear down the proxy and test server. Assert that both exit succesfully.""" - super(InjectHttpProxyIntegrationTestBase, self).tearDown() + super(InjectHttpProxyIntegrationTestBase, self).tearDown(caplog) assert (self.proxy_server.stop() == 0) def getTestServerRootUri(self): @@ -106,7 +117,7 @@ def getTestServerRootUri(self): @pytest.fixture(params=determineIpVersionsFromEnvironment()) -def inject_envoy_http_proxy_fixture(request, server_config, proxy_config): +def inject_envoy_http_proxy_fixture(request, server_config, proxy_config, caplog): """Injects an Envoy proxy in front of the test server. NOTE: Depends on the proxy_config fixture, which must be explicitly imported @@ -116,10 +127,11 @@ def inject_envoy_http_proxy_fixture(request, server_config, proxy_config): request: supplies the ip version. server_config: path to the server configuration template. proxy_config: path to the proxy configuration template. + caplog: The pytest `caplog` test fixture used to examine logged messages. Yields: a successfully set up InjectHttpProxyIntegrationTestBase instance. """ - fixture = InjectHttpProxyIntegrationTestBase(request.param, server_config, proxy_config) + fixture = InjectHttpProxyIntegrationTestBase(request, server_config, proxy_config) fixture.setUp() yield fixture - fixture.tearDown() + fixture.tearDown(caplog) diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 8d430b706..0fe4ac51a 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -24,8 +24,8 @@ function do_build () { } function do_opt_build () { - bazel build $BAZEL_BUILD_OPTIONS -c opt //:nighthawk - bazel build $BAZEL_BUILD_OPTIONS -c opt //benchmarks:benchmarks + bazel build $BAZEL_BUILD_OPTIONS -c opt --define tcmalloc=gperftools //:nighthawk + bazel build $BAZEL_BUILD_OPTIONS -c opt --define tcmalloc=gperftools //benchmarks:benchmarks } function do_test() { @@ -34,12 +34,14 @@ function do_test() { } function do_clang_tidy() { - ci/run_clang_tidy.sh + # clang-tidy will warn on standard library issues with libc++ + BAZEL_BUILD_OPTIONS=("--config=clang" "${BAZEL_BUILD_OPTIONS[@]}") + BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS[*]}" NUM_CPUS=4 ci/run_clang_tidy.sh } function do_unit_test_coverage() { export TEST_TARGETS="//test/... -//test:python_test" - export COVERAGE_THRESHOLD=93.2 + export COVERAGE_THRESHOLD=94.3 echo "bazel coverage build with tests ${TEST_TARGETS}" test/run_nighthawk_bazel_coverage.sh ${TEST_TARGETS} exit 0 @@ -47,7 +49,8 @@ function do_unit_test_coverage() { function do_integration_test_coverage() { export TEST_TARGETS="//test:python_test" - export COVERAGE_THRESHOLD=78.0 + #TODO(#564): Revert this to 78.6 + export COVERAGE_THRESHOLD=75.0 echo "bazel coverage build with tests ${TEST_TARGETS}" test/run_nighthawk_bazel_coverage.sh ${TEST_TARGETS} exit 0 @@ -127,6 +130,7 @@ function do_benchmark_with_own_binaries() { --compilation_mode=opt \ --cxxopt=-g \ --cxxopt=-ggdb3 \ + --define tcmalloc=gperftools \ //benchmarks:* } diff --git a/ci/docker/Dockerfile-nighthawk b/ci/docker/Dockerfile-nighthawk index a2c681afa..153113e8b 100644 --- a/ci/docker/Dockerfile-nighthawk +++ b/ci/docker/Dockerfile-nighthawk @@ -1,4 +1,4 @@ -FROM frolvlad/alpine-glibc +FROM frolvlad/alpine-glibc:alpine-3.13_glibc-2.32 ADD nighthawk_client /usr/local/bin/nighthawk_client ADD nighthawk_test_server /usr/local/bin/nighthawk_test_server diff --git a/ci/docker/Dockerfile-nighthawk-benchmark b/ci/docker/Dockerfile-nighthawk-benchmark index b93d2ae1c..4929e15e9 100644 --- a/ci/docker/Dockerfile-nighthawk-benchmark +++ b/ci/docker/Dockerfile-nighthawk-benchmark @@ -1,4 +1,4 @@ -FROM frolvlad/alpine-glibc +FROM frolvlad/alpine-glibc:alpine-3.13_glibc-2.32 CMD ["./benchmarks" "--help"] ENV RUNFILES_DIR=/usr/local/bin/benchmarks/benchmarks.runfiles @@ -8,7 +8,7 @@ WORKDIR /usr/local/bin/benchmarks COPY benchmarks /usr/local/bin/benchmarks/ -RUN apk add --no-cache docker=19.03.12-r0 openrc=0.42.1-r11 python3>=3.8.5 +RUN apk add --no-cache docker=20.10.3-r0 openrc=0.42.1-r19 python3>=3.8.7-r0 RUN rc-update add docker boot RUN if [ ! -e /usr/bin/python ]; then ln -sf python3 /usr/bin/python; fi && \ diff --git a/ci/docker/default-config.yaml b/ci/docker/default-config.yaml index 8c931f188..741c9800c 100644 --- a/ci/docker/default-config.yaml +++ b/ci/docker/default-config.yaml @@ -28,7 +28,7 @@ static_resources: - name: test-server config: response_body_size: 10 - response_headers: + v3_response_headers: - { header: { key: "x-nh", value: "1"}} - name: envoy.router config: diff --git a/ci/docker/docker_push.sh b/ci/docker/docker_push.sh index 9fd065a87..e5091e523 100755 --- a/ci/docker/docker_push.sh +++ b/ci/docker/docker_push.sh @@ -12,8 +12,8 @@ fi DOCKER_IMAGE_PREFIX="${DOCKER_IMAGE_PREFIX:-envoyproxy/nighthawk}" -# push the nighthawk image on tags or merge to master -if [[ "$CIRCLE_BRANCH" = 'master' ]]; then +# push the nighthawk image on tags or merge to main +if [[ "$CIRCLE_BRANCH" = 'main' ]]; then docker login -u "$DOCKERHUB_USERNAME" -p "$DOCKERHUB_PASSWORD" docker push "${DOCKER_IMAGE_PREFIX}-dev:latest" docker tag "${DOCKER_IMAGE_PREFIX}-dev:latest" "${DOCKER_IMAGE_PREFIX}-dev:${CIRCLE_SHA1}" @@ -26,6 +26,6 @@ else docker tag "${DOCKER_IMAGE_PREFIX}:${TAG}" "${DOCKER_IMAGE_PREFIX}:${TAG}" docker push "${DOCKER_IMAGE_PREFIX}:${TAG}" else - echo 'Ignoring non-master branch for docker push.' + echo 'Ignoring non-main branch for docker push.' fi fi diff --git a/ci/run_clang_tidy.sh b/ci/run_clang_tidy.sh index c363d6877..6412bedc1 100755 --- a/ci/run_clang_tidy.sh +++ b/ci/run_clang_tidy.sh @@ -1,6 +1,7 @@ #!/bin/bash set -eo pipefail + # ENVOY_SRCDIR should point to where Envoy source lives, while SRCDIR could be a downstream build # (for example envoy-filter-example). [[ -z "${ENVOY_SRCDIR}" ]] && ENVOY_SRCDIR="${PWD}" @@ -30,66 +31,100 @@ echo "Generating compilation database..." # Do not run clang-tidy against win32 impl # TODO(scw00): We should run clang-tidy against win32 impl once we have clang-cl support for Windows function exclude_win32_impl() { - grep -v source/common/filesystem/win32/ | grep -v source/common/common/win32 | grep -v source/exe/win32 | grep -v source/common/api/win32 + grep -v source/common/filesystem/win32/ | grep -v source/common/common/win32 | grep -v source/exe/win32 | grep -v source/common/api/win32 | grep -v source/common/event/win32 } # Do not run clang-tidy against macOS impl # TODO: We should run clang-tidy against macOS impl for completeness function exclude_macos_impl() { - grep -v source/common/filesystem/kqueue/ + grep -v source/common/filesystem/kqueue/ | grep -v source/common/network/apple_dns_impl | grep -v test/common/network/apple_dns_impl_test } # Do not run incremental clang-tidy on check_format testdata files. -function exclude_testdata() { +function exclude_check_format_testdata() { grep -v tools/testdata/check_format/ } +# Do not run clang-tidy on envoy_headersplit testdata files. +function exclude_headersplit_testdata() { + grep -v tools/envoy_headersplit/ +} + +# Do not run clang-tidy against Chromium URL import, this needs to largely +# reflect the upstream structure. +function exclude_chromium_url() { + grep -v source/common/chromium_url/ +} + # Exclude files in third_party which are temporary forks from other OSS projects. function exclude_third_party() { grep -v third_party/ } +# Exclude files which are part of the Wasm emscripten environment +function exclude_wasm_emscripten() { + grep -v source/extensions/common/wasm/ext +} + +# Exclude files which are part of the Wasm SDK +function exclude_wasm_sdk() { + grep -v proxy_wasm_cpp_sdk +} + +# Exclude files which are part of the Wasm Host environment +function exclude_wasm_host() { + grep -v proxy_wasm_cpp_host +} + +# Exclude proxy-wasm test_data. +function exclude_wasm_test_data() { + grep -v wasm/test_data +} + +# Exclude files which are part of the Wasm examples +function exclude_wasm_examples() { + grep -v examples/wasm +} + function filter_excludes() { - exclude_testdata | exclude_win32_impl | exclude_macos_impl | exclude_third_party + exclude_check_format_testdata | exclude_headersplit_testdata | exclude_chromium_url | exclude_win32_impl | exclude_macos_impl | exclude_third_party | exclude_wasm_emscripten | exclude_wasm_sdk | exclude_wasm_host | exclude_wasm_test_data | exclude_wasm_examples } function run_clang_tidy() { python3 "${LLVM_PREFIX}/share/clang/run-clang-tidy.py" \ - -clang-tidy-binary=${CLANG_TIDY} -header-filter='-external' \ - -clang-apply-replacements-binary=${CLANG_APPLY_REPLACEMENTS} \ - -export-fixes=${FIX_YAML} -j ${NUM_CPUS:-0} -p ${SRCDIR} -quiet \ - ${APPLY_CLANG_TIDY_FIXES:+-fix} $@ + -clang-tidy-binary="${CLANG_TIDY}" \ + -clang-apply-replacements-binary="${CLANG_APPLY_REPLACEMENTS}" \ + -export-fixes=${FIX_YAML} -j "${NUM_CPUS:-0}" -p "${SRCDIR}" -quiet \ + ${APPLY_CLANG_TIDY_FIXES:+-fix} "$@" } function run_clang_tidy_diff() { - git diff $1 | filter_excludes | \ + git diff "$1" | filter_excludes | \ python3 "${LLVM_PREFIX}/share/clang/clang-tidy-diff.py" \ - -clang-tidy-binary=${CLANG_TIDY} \ - -export-fixes=${FIX_YAML} -j ${NUM_CPUS:-0} -p 1 -quiet + -clang-tidy-binary="${CLANG_TIDY}" \ + -export-fixes="${FIX_YAML}" -j "${NUM_CPUS:-0}" -p 1 -quiet } if [[ $# -gt 0 ]]; then - echo "Running clang-tidy on: $@" - run_clang_tidy $@ + echo "Running clang-tidy on: $*" + run_clang_tidy "$@" elif [[ "${RUN_FULL_CLANG_TIDY}" == 1 ]]; then echo "Running a full clang-tidy" run_clang_tidy else if [[ -z "${DIFF_REF}" ]]; then - if [[ "${BUILD_REASON}" == "PullRequest" ]]; then - DIFF_REF="remotes/origin/${SYSTEM_PULLREQUEST_TARGETBRANCH}" - elif [[ "${BUILD_REASON}" == *CI ]]; then + if [[ "${BUILD_REASON}" == *CI ]]; then DIFF_REF="HEAD^" else - DIFF_REF=$(${ENVOY_SRCDIR}/tools/git/last_github_commit.sh) + DIFF_REF=$("${ENVOY_SRCDIR}"/tools/git/last_github_commit.sh) fi fi - echo "Running clang-tidy-diff against ${DIFF_REF} ($(git rev-parse ${DIFF_REF})), current HEAD ($(git rev-parse HEAD))" - run_clang_tidy_diff ${DIFF_REF} + echo "Running clang-tidy-diff against ${DIFF_REF} ($(git rev-parse "${DIFF_REF}")), current HEAD ($(git rev-parse HEAD))" + run_clang_tidy_diff "${DIFF_REF}" fi if [[ -s "${FIX_YAML}" ]]; then echo "clang-tidy check failed, potentially fixed by clang-apply-replacements:" - cat ${FIX_YAML} + cat "${FIX_YAML}" exit 1 fi diff --git a/ci/run_envoy_docker.sh b/ci/run_envoy_docker.sh index c6b91fae5..0ba9c5ec9 100755 --- a/ci/run_envoy_docker.sh +++ b/ci/run_envoy_docker.sh @@ -2,26 +2,99 @@ set -e -. ci/envoy_build_sha.sh +# shellcheck source=ci/envoy_build_sha.sh +. "$(dirname "$0")"/envoy_build_sha.sh -# We run as root and later drop permissions. This is required to setup the USER -# in useradd below, which is need for correct Python execution in the Docker -# environment. -USER=root -USER_GROUP=root +function is_windows() { + [[ "$(uname -s)" == *NT* ]] +} + +read -ra ENVOY_DOCKER_OPTIONS <<< "${ENVOY_DOCKER_OPTIONS:-}" + +# TODO(phlax): uppercase these env vars +export HTTP_PROXY="${http_proxy:-}" +export HTTPS_PROXY="${https_proxy:-}" +export NO_PROXY="${no_proxy:-}" + +if is_windows; then + [[ -z "${IMAGE_NAME}" ]] && IMAGE_NAME="envoyproxy/envoy-build-windows2019" + # TODO(sunjayBhatia): Currently ENVOY_DOCKER_OPTIONS is ignored on Windows because + # CI sets it to a Linux-specific value. Undo this once https://github.com/envoyproxy/envoy/issues/13272 + # is resolved. + ENVOY_DOCKER_OPTIONS=() + DEFAULT_ENVOY_DOCKER_BUILD_DIR=C:/Windows/Temp/envoy-docker-build + BUILD_DIR_MOUNT_DEST=C:/build + # Replace MSYS style drive letter (/c/) with driver letter designation (C:/) + SOURCE_DIR=$(echo "${PWD}" | sed -E "s#/([a-zA-Z])/#\1:/#") + SOURCE_DIR_MOUNT_DEST=C:/source + START_COMMAND=("bash" "-c" "cd source && $*") +else + [[ -z "${IMAGE_NAME}" ]] && IMAGE_NAME="envoyproxy/envoy-build-ubuntu" + # We run as root and later drop permissions. This is required to setup the USER + # in useradd below, which is need for correct Python execution in the Docker + # environment. + ENVOY_DOCKER_OPTIONS+=(-u root:root) + ENVOY_DOCKER_OPTIONS+=(-v /var/run/docker.sock:/var/run/docker.sock) + ENVOY_DOCKER_OPTIONS+=(--cap-add SYS_PTRACE --cap-add NET_RAW --cap-add NET_ADMIN) + DEFAULT_ENVOY_DOCKER_BUILD_DIR=/tmp/envoy-docker-build + BUILD_DIR_MOUNT_DEST=/build + SOURCE_DIR="${PWD}" + SOURCE_DIR_MOUNT_DEST=/source + START_COMMAND=("/bin/bash" "-lc" "groupadd --gid $(id -g) -f envoygroup \ + && useradd -o --uid $(id -u) --gid $(id -g) --no-create-home --home-dir /build envoybuild \ + && usermod -a -G pcap envoybuild \ + && chown envoybuild:envoygroup /build \ + && sudo -EHs -u envoybuild bash -c 'cd /source && $*'") +fi -[[ -z "${IMAGE_NAME}" ]] && IMAGE_NAME="envoyproxy/envoy-build-ubuntu" # The IMAGE_ID defaults to the CI hash but can be set to an arbitrary image ID (found with 'docker # images'). [[ -z "${IMAGE_ID}" ]] && IMAGE_ID="${ENVOY_BUILD_SHA}" -[[ -z "${ENVOY_DOCKER_BUILD_DIR}" ]] && ENVOY_DOCKER_BUILD_DIR=/tmp/envoy-docker-build +[[ -z "${ENVOY_DOCKER_BUILD_DIR}" ]] && ENVOY_DOCKER_BUILD_DIR="${DEFAULT_ENVOY_DOCKER_BUILD_DIR}" +# Replace backslash with forward slash for Windows style paths +ENVOY_DOCKER_BUILD_DIR="${ENVOY_DOCKER_BUILD_DIR//\\//}" +mkdir -p "${ENVOY_DOCKER_BUILD_DIR}" -[[ -f .git ]] && [[ ! -d .git ]] && GIT_VOLUME_OPTION="-v $(git rev-parse --git-common-dir):$(git rev-parse --git-common-dir)" +[[ -t 1 ]] && ENVOY_DOCKER_OPTIONS+=("-it") +[[ -f .git ]] && [[ ! -d .git ]] && ENVOY_DOCKER_OPTIONS+=(-v "$(git rev-parse --git-common-dir):$(git rev-parse --git-common-dir)") +[[ -n "${SSH_AUTH_SOCK}" ]] && ENVOY_DOCKER_OPTIONS+=(-v "${SSH_AUTH_SOCK}:${SSH_AUTH_SOCK}" -e SSH_AUTH_SOCK) + +export ENVOY_BUILD_IMAGE="${IMAGE_NAME}:${IMAGE_ID}" -mkdir -p "${ENVOY_DOCKER_BUILD_DIR}" # Since we specify an explicit hash, docker-run will pull from the remote repo if missing. -docker run --rm -t -i -e HTTP_PROXY=${http_proxy} -e HTTPS_PROXY=${https_proxy} \ - -u "${USER}":"${USER_GROUP}" -v "${ENVOY_DOCKER_BUILD_DIR}":/build ${GIT_VOLUME_OPTION} \ - -v "$PWD":/source -e NUM_CPUS --cap-add SYS_PTRACE --cap-add NET_RAW --cap-add NET_ADMIN "${IMAGE_NAME}":"${IMAGE_ID}" \ - /bin/bash -lc "groupadd --gid $(id -g) -f envoygroup && useradd -o --uid $(id -u) --gid $(id -g) --no-create-home \ - --home-dir /source envoybuild && usermod -a -G pcap envoybuild && su envoybuild -c \"cd source && $*\"" +docker run --rm \ + "${ENVOY_DOCKER_OPTIONS[@]}" \ + -v "${ENVOY_DOCKER_BUILD_DIR}":"${BUILD_DIR_MOUNT_DEST}" \ + -v "${SOURCE_DIR}":"${SOURCE_DIR_MOUNT_DEST}" \ + -e AZP_BRANCH \ + -e HTTP_PROXY \ + -e HTTPS_PROXY \ + -e NO_PROXY \ + -e BAZEL_STARTUP_OPTIONS \ + -e BAZEL_BUILD_EXTRA_OPTIONS \ + -e BAZEL_EXTRA_TEST_OPTIONS \ + -e BAZEL_REMOTE_CACHE \ + -e ENVOY_STDLIB \ + -e BUILD_REASON \ + -e BAZEL_REMOTE_INSTANCE \ + -e GCP_SERVICE_ACCOUNT_KEY \ + -e NUM_CPUS \ + -e ENVOY_RBE \ + -e ENVOY_BUILD_IMAGE \ + -e ENVOY_SRCDIR \ + -e ENVOY_BUILD_TARGET \ + -e SYSTEM_PULLREQUEST_PULLREQUESTNUMBER \ + -e GCS_ARTIFACT_BUCKET \ + -e GITHUB_TOKEN \ + -e BUILD_SOURCEBRANCHNAME \ + -e BAZELISK_BASE_URL \ + -e ENVOY_BUILD_ARCH \ + -e SLACK_TOKEN \ + -e BUILD_URI\ + -e REPO_URI \ + -e SYSTEM_STAGEDISPLAYNAME \ + -e SYSTEM_JOBDISPLAYNAME \ + -e SYSTEM_PULLREQUEST_PULLREQUESTID \ + "${ENVOY_BUILD_IMAGE}" \ + "${START_COMMAND[@]}" + \ No newline at end of file diff --git a/docs/root/overview.md b/docs/root/overview.md index 58fa5f117..46cb147ce 100644 --- a/docs/root/overview.md +++ b/docs/root/overview.md @@ -58,7 +58,7 @@ back reports per phase. ## Key concept descriptions -*The c++ interface definitions for the concepts below can be found [here](https://github.com/envoyproxy/nighthawk/tree/master/include/nighthawk)*. +*The c++ interface definitions for the concepts below can be found [here](https://github.com/envoyproxy/nighthawk/tree/main/include/nighthawk)*. ### Process @@ -189,3 +189,11 @@ other formats (e.g. human, fortio). It can be very useful to always store the json output format, yet be able to easily get to one of the other output formats. It’s like having the cake and eating it too! +## User-specified Nighthawk logging + +Users of Nighthawk can specify custom format and destination (logging sink +delegate) for all Nighthawk logging messages. Nighthawk utilizes the Envoy's +logging mechanism by performing all logging via the **ENVOY_LOG** macro. To +customize this mechanism, users need to perform two steps: +1. Create a logging sink delegate inherited from [Envoy SinkDelegate](https://github.com/envoyproxy/envoy/blob/main/source/common/common/logger.h). +2. Construct a ServiceImpl object with an [Envoy Logger Context](https://github.com/envoyproxy/envoy/blob/main/source/common/common/logger.h) which contains user-specified log level and format. diff --git a/docs/root/statistics.md b/docs/root/statistics.md index 932e5e53f..f8419419b 100644 --- a/docs/root/statistics.md +++ b/docs/root/statistics.md @@ -61,7 +61,7 @@ histogram values are sent directly to the sinks. A stat is an interface that takes generic stat data and translates it into a backend-specific wire format. Currently Envoy supports the TCP and UDP [statsd](https://github.com/b/statsd_spec) protocol (implemented in -[statsd.h](https://github.com/envoyproxy/envoy/blob/master/source/extensions/stat_sinks/common/statsd/statsd.h)). +[statsd.h](https://github.com/envoyproxy/envoy/blob/main/source/extensions/stat_sinks/common/statsd/statsd.h)). Users can create their own Sink subclass to translate Envoy metrics into backend-specific format. @@ -90,7 +90,7 @@ stats.upstream_cx_length_.recordValue(...); Currently Envoy metrics don't support key-value map. As a result, for metrics to be broken down by certain dimensions, we need to define a separate metric for each dimension. For example, currently Nighthawk defines -[separate counters](https://github.com/envoyproxy/nighthawk/blob/master/source/client/benchmark_client_impl.h#L35-L40) +[separate counters](https://github.com/envoyproxy/nighthawk/blob/main/source/client/benchmark_client_impl.h#L35-L40) to monitor the number of responses with corresponding response code. ## Envoy Metrics Flush @@ -128,7 +128,7 @@ key-value map. ## Reference - [Nighthawk: architecture and key - concepts](https://github.com/envoyproxy/nighthawk/blob/master/docs/root/overview.md) + concepts](https://github.com/envoyproxy/nighthawk/blob/main/docs/root/overview.md) - [Envoy Stats - System](https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md) + System](https://github.com/envoyproxy/envoy/blob/main/source/docs/stats.md) - [Envoy Stats blog](https://blog.envoyproxy.io/envoy-stats-b65c7f363342) diff --git a/extensions_build_config.bzl b/extensions_build_config.bzl index 050877713..a15024419 100644 --- a/extensions_build_config.bzl +++ b/extensions_build_config.bzl @@ -1,4 +1,4 @@ -# See https://github.com/envoyproxy/envoy/blob/master/bazel/README.md#disabling-extensions for details on how this system works. +# See https://github.com/envoyproxy/envoy/blob/main/bazel/README.md#disabling-extensions for details on how this system works. EXTENSIONS = { "envoy.filters.http.router": "//source/extensions/filters/http/router:config", "envoy.filters.http.fault": "//source/extensions/filters/http/fault:config", @@ -8,6 +8,9 @@ EXTENSIONS = { "envoy.transport_sockets.raw_buffer": "//source/extensions/transport_sockets/raw_buffer:config", } +DISABLED_BY_DEFAULT_EXTENSIONS = { +} + # These can be changed to ["//visibility:public"], for downstream builds which # need to directly reference Envoy extensions. EXTENSION_CONFIG_VISIBILITY = ["//visibility:public"] diff --git a/include/nighthawk/client/options.h b/include/nighthawk/client/options.h index 6e73637df..a04292a86 100644 --- a/include/nighthawk/client/options.h +++ b/include/nighthawk/client/options.h @@ -6,6 +6,7 @@ #include #include "envoy/common/pure.h" +#include "envoy/common/time.h" #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/metrics/v3/stats.pb.h" @@ -54,6 +55,8 @@ class Options { virtual nighthawk::client::SequencerIdleStrategy::SequencerIdleStrategyOptions sequencerIdleStrategy() const PURE; virtual std::string requestSource() const PURE; + virtual const absl::optional& + requestSourcePluginConfig() const PURE; virtual std::string trace() const PURE; virtual nighthawk::client::H1ConnectionReuseStrategy::H1ConnectionReuseStrategyOptions h1ConnectionReuseStrategy() const PURE; @@ -72,7 +75,9 @@ class Options { virtual std::vector statsSinks() const PURE; virtual uint32_t statsFlushInterval() const PURE; virtual std::string responseHeaderWithLatencyInput() const PURE; + virtual bool allowEnvoyDeprecatedV2Api() const PURE; + virtual absl::optional scheduled_start() const PURE; /** * Converts an Options instance to an equivalent CommandLineOptions instance in terms of option * values. diff --git a/include/nighthawk/client/output_collector.h b/include/nighthawk/client/output_collector.h index 1ff274821..bd457681e 100644 --- a/include/nighthawk/client/output_collector.h +++ b/include/nighthawk/client/output_collector.h @@ -3,9 +3,12 @@ #include #include "envoy/common/pure.h" +#include "envoy/common/time.h" #include "nighthawk/common/statistic.h" +#include "absl/types/optional.h" + namespace Nighthawk { namespace Client { @@ -23,10 +26,12 @@ class OutputCollector { * @param statistics Reference to a vector of statistics to add to the output. * @param counters Reference to a map of counter values, keyed by name, to add to the output. * @param execution_duration Execution duration associated to the to-be-added result. + * @param first_acquisition_time Timing of the first rate limiter acquisition. */ virtual void addResult(absl::string_view name, const std::vector& statistics, const std::map& counters, - const std::chrono::nanoseconds execution_duration) PURE; + const std::chrono::nanoseconds execution_duration, + const absl::optional& first_acquisition_time) PURE; /** * Directly sets the output value. * diff --git a/include/nighthawk/common/BUILD b/include/nighthawk/common/BUILD index 20f89ef3d..1bf10fea4 100644 --- a/include/nighthawk/common/BUILD +++ b/include/nighthawk/common/BUILD @@ -32,6 +32,7 @@ envoy_basic_cc_library( "@envoy//include/envoy/upstream:cluster_manager_interface_with_external_headers", "@envoy//source/common/common:minimal_logger_lib", "@envoy//source/common/common:non_copyable_with_external_headers", + "@envoy//source/common/common:statusor_lib_with_external_headers", "@envoy//source/common/event:dispatcher_lib_with_external_headers", "@envoy//source/common/network:utility_lib_with_external_headers", ], diff --git a/include/nighthawk/common/factories.h b/include/nighthawk/common/factories.h index 20d5eb6a6..c93097b79 100644 --- a/include/nighthawk/common/factories.h +++ b/include/nighthawk/common/factories.h @@ -24,7 +24,7 @@ class SequencerFactory { const SequencerTarget& sequencer_target, TerminationPredicatePtr&& termination_predicate, Envoy::Stats::Scope& scope, - const Envoy::SystemTime scheduled_starting_time) const PURE; + const Envoy::MonotonicTime scheduled_starting_time) const PURE; }; class StatisticFactory { @@ -46,7 +46,7 @@ class TerminationPredicateFactory { virtual ~TerminationPredicateFactory() = default; virtual TerminationPredicatePtr create(Envoy::TimeSource& time_source, Envoy::Stats::Scope& scope, - const Envoy::SystemTime scheduled_starting_time) const PURE; + const Envoy::MonotonicTime scheduled_starting_time) const PURE; }; /** diff --git a/include/nighthawk/common/rate_limiter.h b/include/nighthawk/common/rate_limiter.h index c5f5f69e8..0c2eadad8 100644 --- a/include/nighthawk/common/rate_limiter.h +++ b/include/nighthawk/common/rate_limiter.h @@ -33,6 +33,12 @@ class RateLimiter { * @return Envoy::TimeSource& time_source used to track time. */ virtual Envoy::TimeSource& timeSource() PURE; + + /** + * @return absl::optional Time of the first acquisition, if any. + */ + virtual absl::optional firstAcquisitionTime() const PURE; + /** * @return std::chrono::nanoseconds elapsed since the first call to tryAcquireOne(). Used by some * rate limiter implementations to compute acquisition rate. diff --git a/include/nighthawk/common/sequencer.h b/include/nighthawk/common/sequencer.h index afc548518..56e177032 100644 --- a/include/nighthawk/common/sequencer.h +++ b/include/nighthawk/common/sequencer.h @@ -7,6 +7,7 @@ #include "envoy/common/pure.h" #include "nighthawk/common/operation_callback.h" +#include "nighthawk/common/rate_limiter.h" #include "nighthawk/common/statistic.h" namespace Nighthawk { @@ -35,6 +36,11 @@ class Sequencer { */ virtual std::chrono::nanoseconds executionDuration() const PURE; + /** + * @return RateLimiter& reference to the rate limiter associated to this sequencer. + */ + virtual const RateLimiter& rate_limiter() const PURE; + /** * @return double an up-to-date completions per second rate. */ diff --git a/include/nighthawk/common/statistic.h b/include/nighthawk/common/statistic.h index 85b88e082..b7508e15f 100644 --- a/include/nighthawk/common/statistic.h +++ b/include/nighthawk/common/statistic.h @@ -11,6 +11,7 @@ #include "api/client/output.pb.h" +#include "absl/status/statusor.h" #include "absl/strings/string_view.h" namespace Nighthawk { @@ -115,6 +116,23 @@ class Statistic : Envoy::NonCopyable { * @param id The id that should be set for the Statistic instance. */ virtual void setId(absl::string_view id) PURE; + + /** + * Build a string representation of this Statistic instance. + * + * @return absl::StatusOr> Status or a stream that will yield + * a serialized representation of this Statistic instance. + */ + virtual absl::StatusOr> serializeNative() const PURE; + + /** + * Reconstruct this Statistic instance using the serialization delivered by the input stream. + * + * @param input_stream Stream that will deliver a serialized representation. + * @return absl::Status Status indicating success or failure. Upon success the statistic + * instance this was called for will now represent what the stream contained. + */ + virtual absl::Status deserializeNative(std::istream& input_stream) PURE; }; } // namespace Nighthawk \ No newline at end of file diff --git a/include/nighthawk/request_source/BUILD b/include/nighthawk/request_source/BUILD new file mode 100644 index 000000000..7185a6dae --- /dev/null +++ b/include/nighthawk/request_source/BUILD @@ -0,0 +1,24 @@ +load( + "@envoy//bazel:envoy_build_system.bzl", + "envoy_basic_cc_library", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_basic_cc_library( + name = "request_source_plugin_config_factory_lib", + hdrs = [ + "request_source_plugin_config_factory.h", + ], + include_prefix = "nighthawk/request_source", + deps = [ + "//api/request_source:request_source_plugin_cc_proto", + "//include/nighthawk/common:request_source_lib", + "@envoy//include/envoy/common:base_includes", + "@envoy//include/envoy/config:typed_config_interface", + "@envoy//source/common/api:api_lib_with_external_headers", + ], +) diff --git a/include/nighthawk/request_source/request_source_plugin_config_factory.h b/include/nighthawk/request_source/request_source_plugin_config_factory.h new file mode 100644 index 000000000..3feccffc9 --- /dev/null +++ b/include/nighthawk/request_source/request_source_plugin_config_factory.h @@ -0,0 +1,39 @@ +#pragma once + +#include "envoy/api/api.h" +#include "envoy/common/pure.h" +#include "envoy/config/typed_config.h" + +#include "nighthawk/common/request_source.h" + +namespace Nighthawk { + +// A factory that must be implemented for each RequestSourcePlugin. It instantiates the specific +// RequestSourcePlugin class after unpacking the plugin-specific config proto. +class RequestSourcePluginConfigFactory : public Envoy::Config::TypedFactory { +public: + ~RequestSourcePluginConfigFactory() override = default; + // All request source plugins will be in this category. + std::string category() const override { return "nighthawk.request_source_plugin"; } + + // Instantiates the specific RequestSourcePlugin class. Casts |message| to Any, unpacks it to the + // plugin-specific proto, and passes the strongly typed proto to the plugin constructor. + // + // @param typed_config Any typed_config proto taken from the TypedExtensionConfig. This should be + // a type listed in request_source_plugin_config.proto + // + // @param api Api parameter that contains timesystem, filesystem, and threadfactory. + // + // @param header RequestHeaderMapPtr parameter that acts as a template header for the + // requestSource to modify when generating requests. + // + // @return RequestSourcePtr Pointer to the new instance of RequestSource. + // + // @throw Envoy::EnvoyException If the Any proto cannot be unpacked as the type expected by the + // plugin. + virtual RequestSourcePtr createRequestSourcePlugin(const Envoy::Protobuf::Message& typed_config, + Envoy::Api::Api& api, + Envoy::Http::RequestHeaderMapPtr header) PURE; +}; + +} // namespace Nighthawk diff --git a/source/adaptive_load/BUILD b/source/adaptive_load/BUILD index f6d8ccd8c..3d6428b05 100644 --- a/source/adaptive_load/BUILD +++ b/source/adaptive_load/BUILD @@ -32,6 +32,35 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "adaptive_load_client_main", + srcs = [ + "adaptive_load_client_main.cc", + ], + hdrs = [ + "adaptive_load_client_main.h", + ], + repository = "@envoy", + visibility = ["//visibility:public"], + deps = [ + ":adaptive_load_controller_impl", + ":config_validator_impl", + ":input_variable_setter_impl", + ":metrics_evaluator_impl", + ":metrics_plugin_impl", + ":scoring_function_impl", + ":session_spec_proto_helper_impl", + ":step_controller_impl", + "//api/client:base_cc_proto", + "//api/client:grpc_service_lib", + "//source/common:nighthawk_common_lib", + "@envoy//source/common/common:assert_lib_with_external_headers", + "@envoy//source/common/common:minimal_logger_lib_with_external_headers", + "@envoy//source/common/event:real_time_system_lib_with_external_headers", + "@envoy//source/common/grpc:google_grpc_utils_lib_with_external_headers", + ], +) + envoy_cc_library( name = "config_validator_impl", srcs = [ diff --git a/source/adaptive_load/adaptive_load_client_main.cc b/source/adaptive_load/adaptive_load_client_main.cc new file mode 100644 index 000000000..be23d1051 --- /dev/null +++ b/source/adaptive_load/adaptive_load_client_main.cc @@ -0,0 +1,137 @@ +#include "adaptive_load/adaptive_load_client_main.h" + +#include +#include +#include +#include + +#include "envoy/common/exception.h" + +#include "nighthawk/adaptive_load/adaptive_load_controller.h" +#include "nighthawk/common/exception.h" + +#include "external/envoy/source/common/grpc/google_grpc_utils.h" +#include "external/envoy/source/common/protobuf/protobuf.h" + +#include "api/adaptive_load/adaptive_load.pb.h" +#include "api/client/service.grpc.pb.h" +#include "api/client/service.pb.h" + +#include "common/utility.h" +#include "common/version_info.h" + +#include "fmt/ranges.h" +#include "google/rpc/status.pb.h" +#include "tclap/CmdLine.h" + +namespace Nighthawk { + +namespace { + +/** + * Writes a string to a file. + * + * @param filesystem Envoy abstraction around filesystem functions, to facilitate unit testing. + * @param path Relative or absolute path to the file to write. + * @param contents String to write to the file. + * + * @throw Nighthawk::NighthawkException For any filesystem error. + */ +void WriteFileOrThrow(Envoy::Filesystem::Instance& filesystem, absl::string_view path, + absl::string_view contents) { + Envoy::Filesystem::FilePtr file = filesystem.createFile(std::string(path)); + const Envoy::Api::IoCallBoolResult open_result = + file->open(((1 << Envoy::Filesystem::File::Operation::Write)) | + (1 << (Envoy::Filesystem::File::Operation::Create))); + if (!open_result.ok()) { + throw Nighthawk::NighthawkException(absl::StrCat("Unable to open output file \"", path, + "\": ", open_result.err_->getErrorDetails())); + } + const Envoy::Api::IoCallSizeResult write_result = file->write(contents); + if (!write_result.ok()) { + throw Nighthawk::NighthawkException(absl::StrCat("Unable to write to output file \"", path, + "\": ", write_result.err_->getErrorDetails())); + } + const Envoy::Api::IoCallBoolResult close_result = file->close(); + if (!close_result.ok()) { + throw Nighthawk::NighthawkException(absl::StrCat("Unable to close output file \"", path, + "\": ", close_result.err_->getErrorDetails())); + } +} + +} // namespace + +AdaptiveLoadClientMain::AdaptiveLoadClientMain(int argc, const char* const* argv, + AdaptiveLoadController& controller, + Envoy::Filesystem::Instance& filesystem) + : controller_{controller}, filesystem_{filesystem} { + TCLAP::CmdLine cmd("Adaptive Load tool that finds the optimal load on the target " // NOLINT + "through a series of Nighthawk Service benchmarks.", + /*delimiter=*/' ', VersionInfo::version()); + + TCLAP::ValueArg nighthawk_service_address( + /*flag=*/"", "nighthawk-service-address", + "host:port for Nighthawk Service. To enable TLS, set --use-tls.", + /*req=*/false, "localhost:8443", "string", cmd); + TCLAP::SwitchArg use_tls( + /*flag=*/"", "use-tls", + "Use TLS for the gRPC connection from this program to the Nighthawk Service. Set environment " + "variable GRPC_DEFAULT_SSL_ROOTS_FILE_PATH to override the default root certificates.", + cmd); + TCLAP::ValueArg spec_filename( + /*flag=*/"", "spec-file", + "Path to a textproto file describing the adaptive load session " + "(nighthawk::adaptive_load::AdaptiveLoadSessionSpec).", + /*req=*/true, /*val=*/"", "string", cmd); + TCLAP::ValueArg output_filename( + /*flag=*/"", "output-file", + "Path to write adaptive load session output textproto " + "(nighthawk::adaptive_load::AdaptiveLoadSessionOutput).", + /*req=*/true, /*val=*/"", "string", cmd); + + Nighthawk::Utility::parseCommand(cmd, argc, argv); + + nighthawk_service_address_ = nighthawk_service_address.getValue(); + use_tls_ = use_tls.getValue(); + spec_filename_ = spec_filename.getValue(); + output_filename_ = output_filename.getValue(); +} + +uint32_t AdaptiveLoadClientMain::Run() { + ENVOY_LOG(info, "Attempting adaptive load session: {}", DescribeInputs()); + std::string spec_textproto; + try { + spec_textproto = filesystem_.fileReadToEnd(spec_filename_); + } catch (const Envoy::EnvoyException& e) { + throw Nighthawk::NighthawkException("Failed to read spec textproto file \"" + spec_filename_ + + "\": " + e.what()); + } + nighthawk::adaptive_load::AdaptiveLoadSessionSpec spec; + if (!Envoy::Protobuf::TextFormat::ParseFromString(spec_textproto, &spec)) { + throw Nighthawk::NighthawkException("Unable to parse file \"" + spec_filename_ + + "\" as a text protobuf (type " + spec.GetTypeName() + ")"); + } + std::shared_ptr<::grpc::Channel> channel = grpc::CreateChannel( + nighthawk_service_address_, use_tls_ ? grpc::SslCredentials(grpc::SslCredentialsOptions()) + : grpc::InsecureChannelCredentials()); + std::unique_ptr stub( + nighthawk::client::NighthawkService::NewStub(channel)); + + absl::StatusOr output_or = + controller_.PerformAdaptiveLoadSession(stub.get(), spec); + if (!output_or.ok()) { + ENVOY_LOG(error, "Error in adaptive load session: {}", output_or.status().message()); + return 1; + } + nighthawk::adaptive_load::AdaptiveLoadSessionOutput output = output_or.value(); + WriteFileOrThrow(filesystem_, output_filename_, output.DebugString()); + return 0; +} + +std::string AdaptiveLoadClientMain::DescribeInputs() { + return "Nighthawk Service " + nighthawk_service_address_ + " using " + + (use_tls_ ? "TLS" : "insecure") + " connection, input file: " + spec_filename_ + + ", output file: " + output_filename_; +} + +} // namespace Nighthawk diff --git a/source/adaptive_load/adaptive_load_client_main.h b/source/adaptive_load/adaptive_load_client_main.h new file mode 100644 index 000000000..1e4dc4ab4 --- /dev/null +++ b/source/adaptive_load/adaptive_load_client_main.h @@ -0,0 +1,55 @@ +#pragma once + +#include "envoy/common/time.h" +#include "envoy/filesystem/filesystem.h" + +#include "nighthawk/adaptive_load/adaptive_load_controller.h" + +#include "external/envoy/source/common/common/logger.h" + +namespace Nighthawk { + +/** + * Main implementation of the CLI wrapper around the adaptive load controller library. + * Parses command line options, reads adaptive load session spec proto from a file, + * runs an adaptive load session, and writes the output proto to a file. + */ +class AdaptiveLoadClientMain : public Envoy::Logger::Loggable { +public: + /** + * Parses the command line arguments to class members. + * + * @param argc Standard argc passed through from the exe entry point. + * @param argv Standard argv passed through from the exe entry point. + * @param controller Adaptive load controller, passed in to allow unit testing of this class. + * @param filesystem Abstraction of the filesystem, passed in to allow unit testing of this + * class. + * + * @throw Nighthawk::Client::MalformedArgvException If command line constraints are violated. + */ + AdaptiveLoadClientMain(int argc, const char* const* argv, AdaptiveLoadController& controller, + Envoy::Filesystem::Instance& filesystem); + /** + * Loads the adaptive load session spec proto from a file, runs an adaptive load session, and + * writes the output proto to a file. File paths are taken from class members initialized in the + * constructor. + * + * @return Exit code for this process. + * @throw Nighthawk::NighthawkException If a file read or write error occurs. + */ + uint32_t Run(); + /** + * Describes the program inputs as parsed from the command line. + */ + std::string DescribeInputs(); + +private: + std::string nighthawk_service_address_; + bool use_tls_; + std::string spec_filename_; + std::string output_filename_; + AdaptiveLoadController& controller_; + Envoy::Filesystem::Instance& filesystem_; +}; + +} // namespace Nighthawk diff --git a/source/client/BUILD b/source/client/BUILD index 4723f1ac7..affc901d1 100644 --- a/source/client/BUILD +++ b/source/client/BUILD @@ -49,6 +49,7 @@ envoy_cc_library( "//source/common:request_source_impl_lib", "//source/common:nighthawk_common_lib", "//source/common:nighthawk_service_client_impl", + "//source/request_source:request_options_list_plugin_impl", "@envoy//source/common/common:random_generator_lib_with_external_headers", "@envoy//source/common/access_log:access_log_manager_lib_with_external_headers", "@envoy//source/common/api:api_lib_with_external_headers", @@ -90,6 +91,8 @@ envoy_cc_library( "@envoy//source/server:server_lib_with_external_headers", "@envoy//source/server/config_validation:admin_lib_with_external_headers", "@envoy//include/envoy/http:protocol_interface_with_external_headers", + "@envoy//source/common/common:statusor_lib_with_external_headers", + "@envoy//source/common/router:context_lib_with_external_headers", ] + select({ "//bazel:zipkin_disabled": [], "//conditions:default": [ @@ -154,6 +157,7 @@ envoy_cc_library( "//api/client:grpc_service_lib", "//api/request_source:grpc_request_source_service_lib", "@envoy//source/common/common:thread_lib_with_external_headers", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) diff --git a/source/client/benchmark_client_impl.cc b/source/client/benchmark_client_impl.cc index 405bfc134..faa564583 100644 --- a/source/client/benchmark_client_impl.cc +++ b/source/client/benchmark_client_impl.cc @@ -57,11 +57,12 @@ Http1PoolImpl::newStream(Envoy::Http::ResponseDecoder& response_decoder, // In prefetch mode we try to keep the amount of connections at the configured limit. if (prefetch_connections_) { while (host_->cluster().resourceManager(priority_).connections().canCreate()) { - // We cannot rely on ::tryCreateConnection here, because that might decline without - // updating connections().canCreate() above. We would risk an infinite loop. - Envoy::ConnectionPool::ActiveClientPtr client = instantiateActiveClient(); - connecting_stream_capacity_ += client->effectiveConcurrentStreamLimit(); - Envoy::LinkedList::moveIntoList(std::move(client), owningList(client->state_)); + // We pass in a high prefetch ratio, because we don't want to throttle the prefetched + // connection amount like Envoy does out of the box. + ConnPoolImplBase::ConnectionResult result = tryCreateNewConnection(10000.0); + if (result != ConnectionResult::CreatedNewConnection) { + break; + } } } @@ -75,7 +76,7 @@ Http1PoolImpl::newStream(Envoy::Http::ResponseDecoder& response_decoder, } // Vanilla Envoy pool behavior. - return ConnPoolImpl::newStream(response_decoder, callbacks); + return HttpConnPoolImplBase::newStream(response_decoder, callbacks); } BenchmarkClientHttpImpl::BenchmarkClientHttpImpl( @@ -106,9 +107,23 @@ BenchmarkClientHttpImpl::BenchmarkClientHttpImpl( } void BenchmarkClientHttpImpl::terminate() { - if (pool() != nullptr) { - pool()->addDrainedCallback([this]() -> void { dispatcher_.exit(); }); - pool()->drainConnections(); + if (pool() != nullptr && pool()->hasActiveConnections()) { + // We don't report what happens after this call in the output, but latencies may still be + // reported via callbacks. This may happen after a long time (60s), which HdrHistogram can't + // track the way we configure it today, as that exceeds the max that it can record. + // No harm is done, but it does result in log lines warning about it. Avoid that, by + // disabling latency measurement here. + setShouldMeasureLatencies(false); + pool()->addDrainedCallback([this]() -> void { + drain_timer_->disableTimer(); + dispatcher_.exit(); + }); + // Set up a timer with a callback which caps the time we wait for the pool to drain. + drain_timer_ = dispatcher_.createTimer([this]() -> void { + ENVOY_LOG(info, "Wait for the connection pool drain timed out, proceeding to hard shutdown."); + dispatcher_.exit(); + }); + drain_timer_->enableTimer(5s); dispatcher_.run(Envoy::Event::Dispatcher::RunType::RunUntilExit); } } diff --git a/source/client/benchmark_client_impl.h b/source/client/benchmark_client_impl.h index 0304d4460..442217b28 100644 --- a/source/client/benchmark_client_impl.h +++ b/source/client/benchmark_client_impl.h @@ -75,13 +75,13 @@ struct BenchmarkClientStatistic { StatisticPtr origin_latency_statistic; }; -class Http1PoolImpl : public Envoy::Http::Http1::ProdConnPoolImpl { +class Http1PoolImpl : public Envoy::Http::FixedHttpConnPoolImpl { public: enum class ConnectionReuseStrategy { MRU, LRU, }; - using Envoy::Http::Http1::ProdConnPoolImpl::ProdConnPoolImpl; + using Envoy::Http::FixedHttpConnPoolImpl::FixedHttpConnPoolImpl; Envoy::Http::ConnectionPool::Cancellable* newStream(Envoy::Http::ResponseDecoder& response_decoder, Envoy::Http::ConnectionPool::Callbacks& callbacks) override; @@ -137,8 +137,9 @@ class BenchmarkClientHttpImpl : public BenchmarkClient, // Helpers Envoy::Http::ConnectionPool::Instance* pool() { auto proto = use_h2_ ? Envoy::Http::Protocol::Http2 : Envoy::Http::Protocol::Http11; - return cluster_manager_->httpConnPoolForCluster( - cluster_name_, Envoy::Upstream::ResourcePriority::Default, proto, nullptr); + const auto thread_local_cluster = cluster_manager_->getThreadLocalCluster(cluster_name_); + return thread_local_cluster->httpConnPool(Envoy::Upstream::ResourcePriority::Default, proto, + nullptr); } private: @@ -164,6 +165,7 @@ class BenchmarkClientHttpImpl : public BenchmarkClient, const RequestGenerator request_generator_; const bool provide_resource_backpressure_; const std::string latency_response_header_name_; + Envoy::Event::TimerPtr drain_timer_; }; } // namespace Client diff --git a/source/client/client_worker_impl.cc b/source/client/client_worker_impl.cc index a6c23542c..a5d456763 100644 --- a/source/client/client_worker_impl.cc +++ b/source/client/client_worker_impl.cc @@ -19,7 +19,7 @@ ClientWorkerImpl::ClientWorkerImpl(Envoy::Api::Api& api, Envoy::ThreadLocal::Ins const SequencerFactory& sequencer_factory, const RequestSourceFactory& request_generator_factory, Envoy::Stats::Store& store, const int worker_number, - const Envoy::SystemTime starting_time, + const Envoy::MonotonicTime starting_time, Envoy::Tracing::HttpTracerSharedPtr& http_tracer, const HardCodedWarmupStyle hardcoded_warmup_style) : WorkerImpl(api, tls, store), diff --git a/source/client/client_worker_impl.h b/source/client/client_worker_impl.h index 0decef819..41b2660bc 100644 --- a/source/client/client_worker_impl.h +++ b/source/client/client_worker_impl.h @@ -33,7 +33,7 @@ class ClientWorkerImpl : public WorkerImpl, virtual public ClientWorker { const SequencerFactory& sequencer_factory, const RequestSourceFactory& request_generator_factory, Envoy::Stats::Store& store, const int worker_number, - const Envoy::SystemTime starting_time, + const Envoy::MonotonicTime starting_time, Envoy::Tracing::HttpTracerSharedPtr& http_tracer, const HardCodedWarmupStyle hardcoded_warmup_style); StatisticPtrMap statistics() const override; diff --git a/source/client/factories_impl.cc b/source/client/factories_impl.cc index 0b3644f9d..2031658c4 100644 --- a/source/client/factories_impl.cc +++ b/source/client/factories_impl.cc @@ -17,6 +17,8 @@ #include "client/output_collector_impl.h" #include "client/output_formatter_impl.h" +#include "request_source/request_options_list_plugin_impl.h" + using namespace std::chrono_literals; namespace Nighthawk { @@ -63,12 +65,10 @@ BenchmarkClientPtr BenchmarkClientFactoryImpl::create( SequencerFactoryImpl::SequencerFactoryImpl(const Options& options) : OptionBasedFactoryImpl(options) {} -SequencerPtr SequencerFactoryImpl::create(Envoy::TimeSource& time_source, - Envoy::Event::Dispatcher& dispatcher, - const SequencerTarget& sequencer_target, - TerminationPredicatePtr&& termination_predicate, - Envoy::Stats::Scope& scope, - const Envoy::SystemTime scheduled_starting_time) const { +SequencerPtr SequencerFactoryImpl::create( + Envoy::TimeSource& time_source, Envoy::Event::Dispatcher& dispatcher, + const SequencerTarget& sequencer_target, TerminationPredicatePtr&& termination_predicate, + Envoy::Stats::Scope& scope, const Envoy::MonotonicTime scheduled_starting_time) const { StatisticFactoryImpl statistic_factory(options_); Frequency frequency(options_.requestsPerSecond()); RateLimiterPtr rate_limiter = std::make_unique( @@ -117,8 +117,8 @@ OutputFormatterPtr OutputFormatterFactoryImpl::create( } } -RequestSourceFactoryImpl::RequestSourceFactoryImpl(const Options& options) - : OptionBasedFactoryImpl(options) {} +RequestSourceFactoryImpl::RequestSourceFactoryImpl(const Options& options, Envoy::Api::Api& api) + : OptionBasedFactoryImpl(options), api_(api) {} void RequestSourceFactoryImpl::setRequestHeader(Envoy::Http::RequestHeaderMap& header, absl::string_view key, @@ -168,16 +168,39 @@ RequestSourceFactoryImpl::create(const Envoy::Upstream::ClusterManagerPtr& clust for (const auto& option_header : request_options.request_headers()) { setRequestHeader(*header, option_header.header().key(), option_header.header().value()); } - - if (options_.requestSource() == "") { - return std::make_unique(std::move(header)); - } else { + if (!options_.requestSource().empty()) { RELEASE_ASSERT(!service_cluster_name.empty(), "expected cluster name to be set"); // We pass in options_.requestsPerSecond() as the header buffer length so the grpc client // will shoot for maintaining an amount of headers of at least one second. return std::make_unique(cluster_manager, dispatcher, scope, service_cluster_name, std::move(header), options_.requestsPerSecond()); + } else if (options_.requestSourcePluginConfig().has_value()) { + absl::StatusOr plugin_or = LoadRequestSourcePlugin( + options_.requestSourcePluginConfig().value(), api_, std::move(header)); + if (!plugin_or.ok()) { + throw NighthawkException( + absl::StrCat("Request Source plugin loading error should have been caught " + "during input validation: ", + plugin_or.status().message())); + } + RequestSourcePtr request_source = std::move(plugin_or.value()); + return request_source; + } else { + return std::make_unique(std::move(header)); + } +} +absl::StatusOr RequestSourceFactoryImpl::LoadRequestSourcePlugin( + const envoy::config::core::v3::TypedExtensionConfig& config, Envoy::Api::Api& api, + Envoy::Http::RequestHeaderMapPtr header) const { + try { + auto& config_factory = + Envoy::Config::Utility::getAndCheckFactoryByName( + config.name()); + return config_factory.createRequestSourcePlugin(config.typed_config(), api, std::move(header)); + } catch (const Envoy::EnvoyException& e) { + return absl::InvalidArgumentError( + absl::StrCat("Could not load plugin: ", config.name(), ": ", e.what())); } } @@ -186,7 +209,7 @@ TerminationPredicateFactoryImpl::TerminationPredicateFactoryImpl(const Options& TerminationPredicatePtr TerminationPredicateFactoryImpl::create(Envoy::TimeSource& time_source, Envoy::Stats::Scope& scope, - const Envoy::SystemTime scheduled_starting_time) const { + const Envoy::MonotonicTime scheduled_starting_time) const { // We'll always link a predicate which checks for requests to cancel. TerminationPredicatePtr root_predicate = std::make_unique( diff --git a/source/client/factories_impl.h b/source/client/factories_impl.h index fa7e6c3cd..2932a72bf 100644 --- a/source/client/factories_impl.h +++ b/source/client/factories_impl.h @@ -9,6 +9,9 @@ #include "nighthawk/common/termination_predicate.h" #include "nighthawk/common/uri.h" +#include "external/envoy/source/common/common/statusor.h" +#include "external/envoy/source/common/config/utility.h" + #include "common/platform_util_impl.h" namespace Nighthawk { @@ -41,7 +44,7 @@ class SequencerFactoryImpl : public OptionBasedFactoryImpl, public SequencerFact SequencerPtr create(Envoy::TimeSource& time_source, Envoy::Event::Dispatcher& dispatcher, const SequencerTarget& sequencer_target, TerminationPredicatePtr&& termination_predicate, Envoy::Stats::Scope& scope, - const Envoy::SystemTime scheduled_starting_time) const override; + const Envoy::MonotonicTime scheduled_starting_time) const override; }; class StatisticFactoryImpl : public OptionBasedFactoryImpl, public StatisticFactory { @@ -58,14 +61,31 @@ class OutputFormatterFactoryImpl : public OutputFormatterFactory { class RequestSourceFactoryImpl : public OptionBasedFactoryImpl, public RequestSourceFactory { public: - RequestSourceFactoryImpl(const Options& options); + RequestSourceFactoryImpl(const Options& options, Envoy::Api::Api& api); RequestSourcePtr create(const Envoy::Upstream::ClusterManagerPtr& cluster_manager, Envoy::Event::Dispatcher& dispatcher, Envoy::Stats::Scope& scope, absl::string_view service_cluster_name) const override; private: + Envoy::Api::Api& api_; void setRequestHeader(Envoy::Http::RequestHeaderMap& header, absl::string_view key, absl::string_view value) const; + /** + * Instantiates a RequestSource using a RequestSourcePluginFactory based on the plugin name in + * |config|, unpacking the plugin-specific config proto within |config|. Validates the config + * proto. + * + * @param config Proto containing plugin name and plugin-specific config proto. + * @param api Api parameter that contains timesystem, filesystem, and threadfactory. + * @param header Any headers in request specifiers yielded by the request + * source plugin will override what is specified here. + + * @return absl::StatusOr Initialized plugin or error status due to missing + * plugin or config proto validation error. + */ + absl::StatusOr + LoadRequestSourcePlugin(const envoy::config::core::v3::TypedExtensionConfig& config, + Envoy::Api::Api& api, Envoy::Http::RequestHeaderMapPtr header) const; }; class TerminationPredicateFactoryImpl : public OptionBasedFactoryImpl, @@ -73,7 +93,7 @@ class TerminationPredicateFactoryImpl : public OptionBasedFactoryImpl, public: TerminationPredicateFactoryImpl(const Options& options); TerminationPredicatePtr create(Envoy::TimeSource& time_source, Envoy::Stats::Scope& scope, - const Envoy::SystemTime scheduled_starting_time) const override; + const Envoy::MonotonicTime scheduled_starting_time) const override; TerminationPredicate* linkConfiguredPredicates( TerminationPredicate& last_predicate, const TerminationPredicateMap& predicates, const TerminationPredicate::Status termination_status, Envoy::Stats::Scope& scope) const; diff --git a/source/client/flush_worker_impl.cc b/source/client/flush_worker_impl.cc index a1a5bd26b..d369a74ae 100644 --- a/source/client/flush_worker_impl.cc +++ b/source/client/flush_worker_impl.cc @@ -37,7 +37,7 @@ void FlushWorkerImpl::flushStats() { // Create a snapshot and flush to all sinks. Even if there are no sinks, // creating the snapshot has the important property that it latches all counters on a periodic // basis. - Envoy::Server::MetricSnapshotImpl snapshot(store_); + Envoy::Server::MetricSnapshotImpl snapshot(store_, time_source_); for (std::unique_ptr& sink : stats_sinks_) { sink->flush(snapshot); } diff --git a/source/client/options_impl.cc b/source/client/options_impl.cc index 9dec9f379..4cf761a41 100644 --- a/source/client/options_impl.cc +++ b/source/client/options_impl.cc @@ -144,7 +144,8 @@ OptionsImpl::OptionsImpl(int argc, const char* const* argv) { "Transport socket configuration in json or compact yaml. " "Mutually exclusive with --tls-context. Example (json): " "{name:\"envoy.transport_sockets.tls\",typed_config:{" - "\"@type\":\"type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext\"," + "\"@type\":\"type.googleapis.com/" + "envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\"," "common_tls_context:{tls_params:{cipher_suites:[\"-ALL:ECDHE-RSA-AES128-SHA\"]}}}}", false, "", "string", cmd); @@ -263,9 +264,19 @@ OptionsImpl::OptionsImpl(int argc, const char* const* argv) { TCLAP::ValueArg request_source( "", "request-source", "Remote gRPC source that will deliver to-be-replayed traffic. Each worker will separately " - "connect to this source. For example grpc://127.0.0.1:8443/.", + "connect to this source. For example grpc://127.0.0.1:8443/. " + "Mutually exclusive with --request_source_plugin_config.", false, "", "uri format", cmd); - + TCLAP::ValueArg request_source_plugin_config( + "", "request-source-plugin-config", + "[Request " + "Source](https://github.com/envoyproxy/nighthawk/blob/main/docs/root/" + "overview.md#requestsource) plugin configuration in json or compact yaml. " + "Mutually exclusive with --request-source. Example (json): " + "{name:\"nighthawk.stub-request-source-plugin\",typed_config:{" + "\"@type\":\"type.googleapis.com/nighthawk.request_source.StubPluginConfig\"," + "test_value:\"3\"}}", + false, "", "string", cmd); TCLAP::SwitchArg simple_warmup( "", "simple-warmup", "Perform a simple single warmup request (per worker) before starting execution. Note that " @@ -304,6 +315,12 @@ OptionsImpl::OptionsImpl(int argc, const char* const* argv) { "Default: \"\"", false, "", "string", cmd); + TCLAP::SwitchArg allow_envoy_deprecated_v2_api( + "", "allow-envoy-deprecated-v2-api", + "Set to allow usage of the v2 api. (Not recommended, support will stop in Q1 2021). Default: " + "false", + cmd); + Utility::parseCommand(cmd, argc, argv); // --duration and --no-duration are mutually exclusive @@ -436,6 +453,7 @@ OptionsImpl::OptionsImpl(int argc, const char* const* argv) { } TCLAP_SET_IF_SPECIFIED(stats_flush_interval, stats_flush_interval_); TCLAP_SET_IF_SPECIFIED(latency_response_header_name, latency_response_header_name_); + TCLAP_SET_IF_SPECIFIED(allow_envoy_deprecated_v2_api, allow_envoy_deprecated_v2_api_); // CLI-specific tests. // TODO(oschaaf): as per mergconflicts's remark, it would be nice to aggregate @@ -496,6 +514,21 @@ OptionsImpl::OptionsImpl(int argc, const char* const* argv) { throw MalformedArgvException(e.what()); } } + if (!request_source.getValue().empty() && !request_source_plugin_config.getValue().empty()) { + throw MalformedArgvException( + "--request-source and --request_source_plugin_config cannot both be set."); + } + if (!request_source_plugin_config.getValue().empty()) { + try { + request_source_plugin_config_.emplace(envoy::config::core::v3::TypedExtensionConfig()); + Envoy::MessageUtil::loadFromJson(request_source_plugin_config.getValue(), + request_source_plugin_config_.value(), + Envoy::ProtobufMessage::getStrictValidationVisitor()); + } catch (const Envoy::EnvoyException& e) { + throw MalformedArgvException(e.what()); + } + } + validate(); } @@ -570,6 +603,9 @@ OptionsImpl::OptionsImpl(const nighthawk::client::CommandLineOptions& options) { } else if (options.has_request_source()) { const auto& request_source_options = options.request_source(); request_source_ = request_source_options.uri(); + } else if (options.has_request_source_plugin_config()) { + request_source_plugin_config_.emplace(envoy::config::core::v3::TypedExtensionConfig()); + request_source_plugin_config_.value().MergeFrom(options.request_source_plugin_config()); } max_pending_requests_ = @@ -623,7 +659,15 @@ OptionsImpl::OptionsImpl(const nighthawk::client::CommandLineOptions& options) { std::copy(options.labels().begin(), options.labels().end(), std::back_inserter(labels_)); latency_response_header_name_ = PROTOBUF_GET_WRAPPED_OR_DEFAULT( options, latency_response_header_name, latency_response_header_name_); - + allow_envoy_deprecated_v2_api_ = PROTOBUF_GET_WRAPPED_OR_DEFAULT( + options, allow_envoy_deprecated_v2_api, allow_envoy_deprecated_v2_api_); + if (options.has_scheduled_start()) { + const auto elapsed_since_epoch = std::chrono::duration_cast( + std::chrono::nanoseconds(options.scheduled_start().nanos()) + + std::chrono::seconds(options.scheduled_start().seconds())); + scheduled_start_ = + Envoy::SystemTime(std::chrono::time_point(elapsed_since_epoch)); + } validate(); } @@ -730,6 +774,9 @@ CommandLineOptionsPtr OptionsImpl::toCommandLineOptionsInternal() const { if (requestSource() != "") { auto request_source = command_line_options->mutable_request_source(); *request_source->mutable_uri() = request_source_; + } else if (request_source_plugin_config_.has_value()) { + *(command_line_options->mutable_request_source_plugin_config()) = + request_source_plugin_config_.value(); } else { auto request_options = command_line_options->mutable_request_options(); request_options->set_request_method(request_method_); @@ -797,6 +844,13 @@ CommandLineOptionsPtr OptionsImpl::toCommandLineOptionsInternal() const { command_line_options->mutable_stats_flush_interval()->set_value(stats_flush_interval_); command_line_options->mutable_latency_response_header_name()->set_value( latency_response_header_name_); + command_line_options->mutable_allow_envoy_deprecated_v2_api()->set_value( + allow_envoy_deprecated_v2_api_); + if (scheduled_start_.has_value()) { + *(command_line_options->mutable_scheduled_start()) = + Envoy::ProtobufUtil::TimeUtil::NanosecondsToTimestamp( + scheduled_start_.value().time_since_epoch().count()); + } return command_line_options; } diff --git a/source/client/options_impl.h b/source/client/options_impl.h index af529f7b8..b84d80d3e 100644 --- a/source/client/options_impl.h +++ b/source/client/options_impl.h @@ -60,6 +60,11 @@ class OptionsImpl : public Options, public Envoy::Logger::Loggable& + requestSourcePluginConfig() const override { + return request_source_plugin_config_; + } + std::string trace() const override { return trace_; } nighthawk::client::H1ConnectionReuseStrategy::H1ConnectionReuseStrategyOptions h1ConnectionReuseStrategy() const override { @@ -88,6 +93,8 @@ class OptionsImpl : public Options, public Envoy::Logger::Loggable scheduled_start() const override { return scheduled_start_; } private: void parsePredicates(const TCLAP::MultiArg& arg, @@ -116,6 +123,8 @@ class OptionsImpl : public Options, public Envoy::Logger::Loggable transport_socket_; + absl::optional request_source_plugin_config_; + uint32_t max_pending_requests_{0}; // This default is based the minimum recommendation for SETTINGS_MAX_CONCURRENT_STREAMS over at // https://tools.ietf.org/html/rfc7540#section-6.5.2 @@ -142,6 +151,8 @@ class OptionsImpl : public Options, public Envoy::Logger::Loggable stats_sinks_; uint32_t stats_flush_interval_{5}; std::string latency_response_header_name_; + bool allow_envoy_deprecated_v2_api_{false}; + absl::optional scheduled_start_; }; } // namespace Client diff --git a/source/client/output_collector_impl.cc b/source/client/output_collector_impl.cc index 2303c164a..b6749f0c9 100644 --- a/source/client/output_collector_impl.cc +++ b/source/client/output_collector_impl.cc @@ -23,12 +23,19 @@ OutputCollectorImpl::OutputCollectorImpl(Envoy::TimeSource& time_source, const O nighthawk::client::Output OutputCollectorImpl::toProto() const { return output_; } -void OutputCollectorImpl::addResult(absl::string_view name, - const std::vector& statistics, - const std::map& counters, - const std::chrono::nanoseconds execution_duration) { +void OutputCollectorImpl::addResult( + absl::string_view name, const std::vector& statistics, + const std::map& counters, + const std::chrono::nanoseconds execution_duration, + const absl::optional& first_acquisition_time) { auto result = output_.add_results(); result->set_name(name.data(), name.size()); + if (first_acquisition_time.has_value()) { + *(result->mutable_execution_start()) = Envoy::Protobuf::util::TimeUtil::NanosecondsToTimestamp( + std::chrono::duration_cast( + first_acquisition_time.value().time_since_epoch()) + .count()); + } for (auto& statistic : statistics) { // TODO(#292): Looking at if the statistic id ends with "_size" to determine how it should be // serialized is kind of hacky. Maybe we should have a lookup table of sorts, to determine how diff --git a/source/client/output_collector_impl.h b/source/client/output_collector_impl.h index 40109f936..ccc9d5bae 100644 --- a/source/client/output_collector_impl.h +++ b/source/client/output_collector_impl.h @@ -20,7 +20,8 @@ class OutputCollectorImpl : public OutputCollector { void addResult(absl::string_view name, const std::vector& statistics, const std::map& counters, - const std::chrono::nanoseconds execution_duration) override; + const std::chrono::nanoseconds execution_duration, + const absl::optional& first_acquisition_time) override; void setOutput(const nighthawk::client::Output& output) override { output_ = output; } nighthawk::client::Output toProto() const override; diff --git a/source/client/output_formatter_impl.cc b/source/client/output_formatter_impl.cc index aa4eedd96..45101ae54 100644 --- a/source/client/output_formatter_impl.cc +++ b/source/client/output_formatter_impl.cc @@ -143,7 +143,7 @@ std::string ConsoleOutputFormatterImpl::statIdtoFriendlyStatName(absl::string_vi } std::string JsonOutputFormatterImpl::formatProto(const nighthawk::client::Output& output) const { - return Envoy::MessageUtil::getJsonStringFromMessage(output, true, true); + return Envoy::MessageUtil::getJsonStringFromMessageOrDie(output, true, true); } std::string YamlOutputFormatterImpl::formatProto(const nighthawk::client::Output& output) const { @@ -320,7 +320,7 @@ std::string FortioOutputFormatterImpl::formatProto(const nighthawk::client::Outp if (statistic != nullptr) { fortio_output.mutable_headersizes()->CopyFrom(renderFortioDurationHistogram(*statistic)); } - return Envoy::MessageUtil::getJsonStringFromMessage(fortio_output, true, true); + return Envoy::MessageUtil::getJsonStringFromMessageOrDie(fortio_output, true, true); } const nighthawk::client::DurationHistogram FortioOutputFormatterImpl::renderFortioDurationHistogram( @@ -415,4 +415,4 @@ FortioPedanticOutputFormatterImpl::formatProto(const nighthawk::client::Output& } } // namespace Client -} // namespace Nighthawk \ No newline at end of file +} // namespace Nighthawk diff --git a/source/client/process_impl.cc b/source/client/process_impl.cc index 28f11e610..f19f00cfc 100644 --- a/source/client/process_impl.cc +++ b/source/client/process_impl.cc @@ -29,6 +29,7 @@ #include "external/envoy/source/server/server.h" #include "absl/strings/str_replace.h" +#include "absl/types/optional.h" // TODO(oschaaf): See if we can leverage a static module registration like Envoy does to avoid the // ifdefs in this file. @@ -52,8 +53,6 @@ #include "client/options_impl.h" #include "client/sni_utility.h" -#include "ares.h" - using namespace std::chrono_literals; namespace Nighthawk { @@ -65,20 +64,41 @@ class ClusterManagerFactory : public Envoy::Upstream::ProdClusterManagerFactory public: using Envoy::Upstream::ProdClusterManagerFactory::ProdClusterManagerFactory; - Envoy::Http::ConnectionPool::InstancePtr allocateConnPool( - Envoy::Event::Dispatcher& dispatcher, Envoy::Upstream::HostConstSharedPtr host, - Envoy::Upstream::ResourcePriority priority, Envoy::Http::Protocol protocol, - const Envoy::Network::ConnectionSocket::OptionsSharedPtr& options, - const Envoy::Network::TransportSocketOptionsSharedPtr& transport_socket_options) override { + Envoy::Http::ConnectionPool::InstancePtr + allocateConnPool(Envoy::Event::Dispatcher& dispatcher, Envoy::Upstream::HostConstSharedPtr host, + Envoy::Upstream::ResourcePriority priority, + std::vector& protocols, + const Envoy::Network::ConnectionSocket::OptionsSharedPtr& options, + const Envoy::Network::TransportSocketOptionsSharedPtr& transport_socket_options, + Envoy::Upstream::ClusterConnectivityState& state) override { + // This changed in + // https://github.com/envoyproxy/envoy/commit/93ee668a690d297ab5e8bd2cbf03771d852ebbda ALPN may + // be set up to negotiate a protocol, in which case we'd need a HttpConnPoolImplMixed. However, + // our integration tests pass, and for now this might suffice. In case we do run into the need + // for supporting multiple procols in a single pool, ensure we hear about it soon, by asserting. + RELEASE_ASSERT(protocols.size() == 1, "Expected a single protocol in protocols vector."); + const Envoy::Http::Protocol& protocol = protocols[0]; if (protocol == Envoy::Http::Protocol::Http11 || protocol == Envoy::Http::Protocol::Http10) { - auto* h1_pool = - new Http1PoolImpl(dispatcher, host, priority, options, transport_socket_options); + auto* h1_pool = new Http1PoolImpl( + host, priority, dispatcher, options, transport_socket_options, api_.randomGenerator(), + state, + [](Envoy::Http::HttpConnPoolImplBase* pool) { + return std::make_unique(*pool); + }, + [](Envoy::Upstream::Host::CreateConnectionData& data, + Envoy::Http::HttpConnPoolImplBase* pool) { + Envoy::Http::CodecClientPtr codec{new Envoy::Http::CodecClientProd( + Envoy::Http::CodecClient::Type::HTTP1, std::move(data.connection_), + data.host_description_, pool->dispatcher(), pool->randomGenerator())}; + return codec; + }, + protocols); h1_pool->setConnectionReuseStrategy(connection_reuse_strategy_); h1_pool->setPrefetchConnections(prefetch_connections_); return Envoy::Http::ConnectionPool::InstancePtr{h1_pool}; } return Envoy::Upstream::ProdClusterManagerFactory::allocateConnPool( - dispatcher, host, priority, protocol, options, transport_socket_options); + dispatcher, host, priority, protocols, options, transport_socket_options, state); } void setConnectionReuseStrategy( @@ -100,19 +120,23 @@ ProcessImpl::ProcessImpl(const Options& options, Envoy::Event::TimeSystem& time_ : process_wide), time_system_(time_system), stats_allocator_(symbol_table_), store_root_(stats_allocator_), api_(std::make_unique(platform_impl_.threadFactory(), store_root_, - time_system_, platform_impl_.fileSystem())), + time_system_, platform_impl_.fileSystem(), + generator_)), dispatcher_(api_->allocateDispatcher("main_thread")), benchmark_client_factory_(options), termination_predicate_factory_(options), sequencer_factory_(options), - request_generator_factory_(options), options_(options), init_manager_("nh_init_manager"), + request_generator_factory_(options, *api_), options_(options), + init_manager_("nh_init_manager"), local_info_(new Envoy::LocalInfo::LocalInfoImpl( - {}, Envoy::Network::Utility::getLocalAddress(Envoy::Network::Address::IpVersion::v4), + store_root_.symbolTable(), node_, node_context_params_, + Envoy::Network::Utility::getLocalAddress(Envoy::Network::Address::IpVersion::v4), "nighthawk_service_zone", "nighthawk_service_cluster", "nighthawk_service_node")), secret_manager_(config_tracker_), http_context_(store_root_.symbolTable()), grpc_context_(store_root_.symbolTable()), singleton_manager_(std::make_unique(api_->threadFactory())), access_log_manager_(std::chrono::milliseconds(1000), *api_, *dispatcher_, access_log_lock_, store_root_), - init_watcher_("Nighthawk", []() {}), validation_context_(false, false, false) { + init_watcher_("Nighthawk", []() {}), validation_context_(false, false, false), + router_context_(store_root_.symbolTable()) { // Any dispatchers created after the following call will use hr timers. setupForHRTimers(); std::string lower = absl::AsciiStrToLower( @@ -162,32 +186,37 @@ bool ProcessImpl::requestExecutionCancellation() { return true; } -void ProcessImpl::createWorkers(const uint32_t concurrency) { - // TODO(oschaaf): Expose kMinimalDelay in configuration. - const std::chrono::milliseconds kMinimalWorkerDelay = 500ms + (concurrency * 50ms); - ASSERT(workers_.empty()); +Envoy::MonotonicTime +ProcessImpl::computeFirstWorkerStart(Envoy::Event::TimeSystem& time_system, + const absl::optional& scheduled_start, + const uint32_t concurrency) { + const std::chrono::nanoseconds first_worker_delay = + scheduled_start.has_value() ? scheduled_start.value() - time_system.systemTime() + : 500ms + (concurrency * 50ms); + const Envoy::MonotonicTime monotonic_now = time_system.monotonicTime(); + const Envoy::MonotonicTime first_worker_start = monotonic_now + first_worker_delay; + return first_worker_start; +} - // We try to offset the start of each thread so that workers will execute tasks evenly spaced in - // time. Let's assume we have two workers w0/w1, which should maintain a combined global pace of - // 1000Hz. w0 and w1 both run at 500Hz, but ideally their execution is evenly spaced in time, - // and not overlapping. Workers start offsets can be computed like - // "worker_number*(1/global_frequency))", which would yield T0+[0ms, 1ms]. This helps reduce - // batching/queueing effects, both initially, but also by calibrating the linear rate limiter we - // currently have to a precise starting time, which helps later on. - // TODO(oschaaf): Arguably, this ought to be the job of a rate limiter with awareness of the - // global status quo, which we do not have right now. This has been noted in the - // track-for-future issue. - const auto first_worker_start = time_system_.systemTime() + kMinimalWorkerDelay; - const double inter_worker_delay_usec = - (1. / options_.requestsPerSecond()) * 1000000 / concurrency; +std::chrono::nanoseconds ProcessImpl::computeInterWorkerDelay(const uint32_t concurrency, + const uint32_t rps) { + const double inter_worker_delay_usec = (1. / rps) * 1000000 / concurrency; + return std::chrono::duration_cast(inter_worker_delay_usec * 1us); +} + +void ProcessImpl::createWorkers(const uint32_t concurrency, + const absl::optional& scheduled_start) { + ASSERT(workers_.empty()); + const Envoy::MonotonicTime first_worker_start = + computeFirstWorkerStart(time_system_, scheduled_start, concurrency); + const std::chrono::nanoseconds inter_worker_delay = + computeInterWorkerDelay(concurrency, options_.requestsPerSecond()); int worker_number = 0; while (workers_.size() < concurrency) { - const auto worker_delay = std::chrono::duration_cast( - ((inter_worker_delay_usec * worker_number) * 1us)); workers_.push_back(std::make_unique( *api_, tls_, cluster_manager_, benchmark_client_factory_, termination_predicate_factory_, sequencer_factory_, request_generator_factory_, store_root_, worker_number, - first_worker_start + worker_delay, http_tracer_, + first_worker_start + (inter_worker_delay * worker_number), http_tracer_, options_.simpleWarmup() ? ClientWorkerImpl::HardCodedWarmupStyle::ON : ClientWorkerImpl::HardCodedWarmupStyle::OFF)); worker_number++; @@ -268,10 +297,30 @@ ProcessImpl::mergeWorkerStatistics(const std::vector& workers) return merged_statistics; } +void ProcessImpl::allowEnvoyDeprecatedV2Api(envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto* admin_layer = bootstrap.mutable_layered_runtime()->add_layers(); + admin_layer->set_name("admin layer"); + admin_layer->mutable_admin_layer(); + envoy::config::bootstrap::v3::RuntimeLayer* runtime_layer = + bootstrap.mutable_layered_runtime()->add_layers(); + runtime_layer->set_name("static_layer"); + Envoy::ProtobufWkt::Value proto_true; + proto_true.set_string_value("true"); + (*runtime_layer->mutable_static_layer() + ->mutable_fields())["envoy.reloadable_features.enable_deprecated_v2_api"] = proto_true; + (*runtime_layer->mutable_static_layer() + ->mutable_fields())["envoy.reloadable_features.allow_prefetch"] = proto_true; +} + void ProcessImpl::createBootstrapConfiguration(envoy::config::bootstrap::v3::Bootstrap& bootstrap, const std::vector& uris, const UriPtr& request_source_uri, - int number_of_clusters) const { + int number_of_clusters, + bool allow_envoy_deprecated_v2_api) const { + if (allow_envoy_deprecated_v2_api) { + allowEnvoyDeprecatedV2Api(bootstrap); + } + for (int i = 0; i < number_of_clusters; i++) { auto* cluster = bootstrap.mutable_static_resources()->add_clusters(); RELEASE_ASSERT(!uris.empty(), "illegal configuration with zero endpoints"); @@ -443,7 +492,13 @@ void ProcessImpl::setupStatsSinks(const envoy::config::bootstrap::v3::Bootstrap& } bool ProcessImpl::runInternal(OutputCollector& collector, const std::vector& uris, - const UriPtr& request_source_uri, const UriPtr& tracing_uri) { + const UriPtr& request_source_uri, const UriPtr& tracing_uri, + const absl::optional& scheduled_start) { + const Envoy::SystemTime now = time_system_.systemTime(); + if (scheduled_start.value_or(now) < now) { + ENVOY_LOG(error, "Scheduled execution date already transpired."); + return false; + } { auto guard = std::make_unique(workers_lock_); if (cancelled_) { @@ -452,28 +507,29 @@ bool ProcessImpl::runInternal(OutputCollector& collector, const std::vector( Envoy::Runtime::LoaderPtr{new Envoy::Runtime::LoaderImpl( - *dispatcher_, tls_, {}, *local_info_, store_root_, generator_, + *dispatcher_, tls_, bootstrap.layered_runtime(), *local_info_, store_root_, generator_, Envoy::ProtobufMessage::getStrictValidationVisitor(), *api_)}); ssl_context_manager_ = std::make_unique( time_system_); cluster_manager_factory_ = std::make_unique( - admin_, Envoy::Runtime::LoaderSingleton::get(), store_root_, tls_, generator_, + admin_, Envoy::Runtime::LoaderSingleton::get(), store_root_, tls_, dispatcher_->createDnsResolver({}, false), *ssl_context_manager_, *dispatcher_, *local_info_, secret_manager_, validation_context_, *api_, http_context_, grpc_context_, - access_log_manager_, *singleton_manager_); + router_context_, access_log_manager_, *singleton_manager_); cluster_manager_factory_->setConnectionReuseStrategy( options_.h1ConnectionReuseStrategy() == nighthawk::client::H1ConnectionReuseStrategy::LRU ? Http1PoolImpl::ConnectionReuseStrategy::LRU @@ -520,15 +576,26 @@ bool ProcessImpl::runInternal(OutputCollector& collector, const std::vector first_acquisition_time = absl::nullopt; + for (auto& worker : workers_) { auto sequencer_execution_duration = worker->phase().sequencer().executionDuration(); + absl::optional worker_first_acquisition_time = + worker->phase().sequencer().rate_limiter().firstAcquisitionTime(); + if (worker_first_acquisition_time.has_value()) { + first_acquisition_time = + first_acquisition_time.has_value() + ? std::min(first_acquisition_time.value(), worker_first_acquisition_time.value()) + : worker_first_acquisition_time.value(); + } // We don't write per-worker results if we only have a single worker, because the global // results will be precisely the same. if (workers_.size() > 1) { StatisticFactoryImpl statistic_factory(options_); collector.addResult(fmt::format("worker_{}", i), vectorizeStatisticPtrMap(worker->statistics()), - worker->threadLocalCounterValues(), sequencer_execution_duration); + worker->threadLocalCounterValues(), sequencer_execution_duration, + worker_first_acquisition_time); } total_execution_duration += sequencer_execution_duration; i++; @@ -543,7 +610,7 @@ bool ProcessImpl::runInternal(OutputCollector& collector, const std::vector 0; }); StatisticFactoryImpl statistic_factory(options_); collector.addResult("global", mergeWorkerStatistics(workers_), counters, - total_execution_duration / workers_.size()); + total_execution_duration / workers_.size(), first_acquisition_time); return counters.find("sequencer.failed_terminations") == counters.end(); } @@ -583,7 +650,8 @@ bool ProcessImpl::run(OutputCollector& collector) { } try { - return runInternal(collector, uris, request_source_uri, tracing_uri); + return runInternal(collector, uris, request_source_uri, tracing_uri, + options_.scheduled_start()); } catch (Envoy::EnvoyException& ex) { ENVOY_LOG(error, "Fatal exception: {}", ex.what()); throw; diff --git a/source/client/process_impl.h b/source/client/process_impl.h index f09bb05cd..5936007fc 100644 --- a/source/client/process_impl.h +++ b/source/client/process_impl.h @@ -21,6 +21,7 @@ #include "external/envoy/source/common/grpc/context_impl.h" #include "external/envoy/source/common/http/context_impl.h" #include "external/envoy/source/common/protobuf/message_validator_impl.h" +#include "external/envoy/source/common/router/context_impl.h" #include "external/envoy/source/common/secret/secret_manager_impl.h" #include "external/envoy/source/common/stats/allocator_impl.h" #include "external/envoy/source/common/stats/thread_local_store.h" @@ -30,6 +31,7 @@ #include "external/envoy/source/exe/process_wide.h" #include "external/envoy/source/extensions/transport_sockets/tls/context_manager_impl.h" #include "external/envoy/source/server/config_validation/admin.h" +#include "external/envoy_api/envoy/config/bootstrap/v3/bootstrap.pb.h" #include "client/benchmark_client_impl.h" #include "client/factories_impl.h" @@ -84,6 +86,12 @@ class ProcessImpl : public Process, public Envoy::Logger::Loggable& uris, - const UriPtr& request_source_uri, int number_of_workers) const; + const UriPtr& request_source_uri, int number_of_workers, + bool allow_envoy_deprecated_v2_api) const; void maybeCreateTracingDriver(const envoy::config::trace::v3::Tracing& configuration); - void configureComponentLogLevels(spdlog::level::level_enum level); /** * Prepare the ProcessImpl instance by creating and configuring the workers it needs for execution @@ -109,7 +117,7 @@ class ProcessImpl : public Process, public Envoy::Logger::Loggable& schedule); std::vector vectorizeStatisticPtrMap(const StatisticPtrMap& statistics) const; std::vector mergeWorkerStatistics(const std::vector& workers) const; @@ -124,8 +132,43 @@ class ProcessImpl : public Process, public Envoy::Logger::Loggable>& stats_sinks); bool runInternal(OutputCollector& collector, const std::vector& uris, - const UriPtr& request_source_uri, const UriPtr& tracing_uri); + const UriPtr& request_source_uri, const UriPtr& tracing_uri, + const absl::optional& schedule); + + /** + * Compute the offset at which execution should start. We adhere to the scheduled start passed in + * as an argument when specified, otherwise we need a delay that will be sufficient for all the + * workers to get up and running. + * + * @param time_system Time system used to obtain the current time. + * @param scheduled_start Optional scheduled start. + * @param concurrency The number of workers that will be used during execution. + * @return Envoy::MonotonicTime Time at which execution should start. + */ + static Envoy::MonotonicTime + computeFirstWorkerStart(Envoy::Event::TimeSystem& time_system, + const absl::optional& scheduled_start, + const uint32_t concurrency); + + /** + * We offset the start of each thread so that workers will execute tasks evenly spaced in + * time. Let's assume we have two workers w0/w1, which should maintain a combined global pace of + * 1000Hz. w0 and w1 both run at 500Hz, but ideally their execution is evenly spaced in time, + * and not overlapping. Workers start offsets can be computed like + * "worker_number*(1/global_frequency))", which would yield T0+[0ms, 1ms]. This helps reduce + * batching/queueing effects, both initially, but also by calibrating the linear rate limiter we + * currently have to a precise starting time, which helps later on. + * + * @param concurrency The number of workers that will be used during execution. + * @param rps Anticipated requests per second during execution. + * @return std::chrono::nanoseconds The delay that should be used as an offset between each + * independent worker execution start. + */ + static std::chrono::nanoseconds computeInterWorkerDelay(const uint32_t concurrency, + const uint32_t rps); + const envoy::config::core::v3::Node node_; + const Envoy::Protobuf::RepeatedPtrField node_context_params_; std::shared_ptr process_wide_; Envoy::PlatformImpl platform_impl_; Envoy::Event::TimeSystem& time_system_; @@ -167,6 +210,7 @@ class ProcessImpl : public Process, public Envoy::Logger::Loggable flush_worker_; + Envoy::Router::ContextImpl router_context_; }; } // namespace Client diff --git a/source/client/service_impl.cc b/source/client/service_impl.cc index 693955a79..fda3d69d7 100644 --- a/source/client/service_impl.cc +++ b/source/client/service_impl.cc @@ -2,6 +2,8 @@ #include +#include "envoy/config/core/v3/base.pb.h" + #include "common/request_source_impl.h" #include "client/client.h" @@ -106,7 +108,7 @@ ::grpc::Status ServiceImpl::ExecutionStream( } namespace { -void addHeader(envoy::api::v2::core::HeaderMap* map, absl::string_view key, +void addHeader(envoy::config::core::v3::HeaderMap* map, absl::string_view key, absl::string_view value) { auto* request_header = map->add_headers(); request_header->set_key(std::string(key)); @@ -144,7 +146,7 @@ ::grpc::Status RequestSourceServiceImpl::RequestStream( HeaderMapPtr headers = request->header(); nighthawk::request_source::RequestStreamResponse response; auto* request_specifier = response.mutable_request_specifier(); - auto* request_headers = request_specifier->mutable_headers(); + auto* request_headers = request_specifier->mutable_v3_headers(); headers->iterate([&request_headers](const Envoy::Http::HeaderEntry& header) -> Envoy::Http::HeaderMap::Iterate { addHeader(request_headers, header.key().getStringView(), header.value().getStringView()); @@ -163,4 +165,4 @@ ::grpc::Status RequestSourceServiceImpl::RequestStream( } } // namespace Client -} // namespace Nighthawk \ No newline at end of file +} // namespace Nighthawk diff --git a/source/client/stream_decoder.cc b/source/client/stream_decoder.cc index 21bf877f9..c57555950 100644 --- a/source/client/stream_decoder.cc +++ b/source/client/stream_decoder.cc @@ -21,9 +21,11 @@ void StreamDecoder::decodeHeaders(Envoy::Http::ResponseHeaderMapPtr&& headers, b stream_info_.response_code_ = static_cast(response_code); if (!latency_response_header_name_.empty()) { const auto timing_header_name = Envoy::Http::LowerCaseString(latency_response_header_name_); - const Envoy::Http::HeaderEntry* timing_header = response_headers_->get(timing_header_name); - if (timing_header != nullptr) { - absl::string_view timing_value = timing_header->value().getStringView(); + const Envoy::Http::HeaderMap::GetResult& timing_header = + response_headers_->get(timing_header_name); + if (!timing_header.empty()) { + absl::string_view timing_value = + timing_header.size() == 1 ? timing_header[0]->value().getStringView() : "multiple values"; int64_t origin_delta; if (absl::SimpleAtoi(timing_value, &origin_delta) && origin_delta >= 0) { origin_latency_statistic_.addValue(origin_delta); @@ -101,11 +103,19 @@ void StreamDecoder::onPoolFailure(Envoy::Http::ConnectionPool::PoolFailureReason void StreamDecoder::onPoolReady(Envoy::Http::RequestEncoder& encoder, Envoy::Upstream::HostDescriptionConstSharedPtr, - const Envoy::StreamInfo::StreamInfo&) { + const Envoy::StreamInfo::StreamInfo&, + absl::optional) { // Make sure we hear about stream resets on the encoder. encoder.getStream().addCallbacks(*this); upstream_timing_.onFirstUpstreamTxByteSent(time_source_); // XXX(oschaaf): is this correct? - encoder.encodeHeaders(*request_headers_, request_body_size_ == 0); + const Envoy::Http::Status status = + encoder.encodeHeaders(*request_headers_, request_body_size_ == 0); + if (!status.ok()) { + ENVOY_LOG_EVERY_POW_2(error, + "Request header encoding failure. Might be missing one or more required " + "HTTP headers in {}.", + request_headers_); + } if (request_body_size_ > 0) { // TODO(https://github.com/envoyproxy/nighthawk/issues/138): This will show up in the zipkin UI // as 'response_size'. We add it here, optimistically assuming it will all be send. Ideally, @@ -142,6 +152,7 @@ StreamDecoder::streamResetReasonToResponseFlag(Envoy::Http::StreamResetReason re return Envoy::StreamInfo::ResponseFlag::LocalReset; case Envoy::Http::StreamResetReason::Overflow: return Envoy::StreamInfo::ResponseFlag::UpstreamOverflow; + case Envoy::Http::StreamResetReason::ConnectError: case Envoy::Http::StreamResetReason::RemoteReset: case Envoy::Http::StreamResetReason::RemoteRefusedStreamReset: return Envoy::StreamInfo::ResponseFlag::UpstreamRemoteReset; @@ -171,11 +182,7 @@ void StreamDecoder::setupForTracing() { // segfault without it. const auto remote_address = Envoy::Network::Address::InstanceConstSharedPtr{ new Envoy::Network::Address::Ipv4Instance("127.0.0.1")}; - stream_info_.setDownstreamDirectRemoteAddress(remote_address); - // For good measure, we also set DownstreamRemoteAddress, as the associated getter will crash - // if we don't. So this is just in case anyone calls that (or Envoy starts doing so in the - // future). - stream_info_.setDownstreamRemoteAddress(remote_address); + downstream_address_setter_->setDirectRemoteAddressForTest(remote_address); } } // namespace Client diff --git a/source/client/stream_decoder.h b/source/client/stream_decoder.h index f641d171f..cdb0653a0 100644 --- a/source/client/stream_decoder.h +++ b/source/client/stream_decoder.h @@ -57,9 +57,12 @@ class StreamDecoder : public Envoy::Http::ResponseDecoder, origin_latency_statistic_(origin_latency_statistic), request_headers_(std::move(request_headers)), connect_start_(time_source_.monotonicTime()), complete_(false), measure_latencies_(measure_latencies), - request_body_size_(request_body_size), stream_info_(time_source_), - random_generator_(random_generator), http_tracer_(http_tracer), - latency_response_header_name_(latency_response_header_name) { + request_body_size_(request_body_size), + downstream_address_setter_(std::make_shared( + // The two addresses aren't used in an execution of Nighthawk. + /* downstream_local_address = */ nullptr, /* downstream_remote_address = */ nullptr)), + stream_info_(time_source_, downstream_address_setter_), random_generator_(random_generator), + http_tracer_(http_tracer), latency_response_header_name_(latency_response_header_name) { if (measure_latencies_ && http_tracer_ != nullptr) { setupForTracing(); } @@ -84,7 +87,8 @@ class StreamDecoder : public Envoy::Http::ResponseDecoder, Envoy::Upstream::HostDescriptionConstSharedPtr host) override; void onPoolReady(Envoy::Http::RequestEncoder& encoder, Envoy::Upstream::HostDescriptionConstSharedPtr host, - const Envoy::StreamInfo::StreamInfo& stream_info) override; + const Envoy::StreamInfo::StreamInfo& stream_info, + absl::optional protocol) override; static Envoy::StreamInfo::ResponseFlag streamResetReasonToResponseFlag(Envoy::Http::StreamResetReason reset_reason); @@ -116,6 +120,7 @@ class StreamDecoder : public Envoy::Http::ResponseDecoder, bool measure_latencies_; const uint32_t request_body_size_; Envoy::Tracing::EgressConfigImpl config_; + std::shared_ptr downstream_address_setter_; Envoy::StreamInfo::StreamInfoImpl stream_info_; Envoy::Random::RandomGenerator& random_generator_; Envoy::Tracing::HttpTracerSharedPtr& http_tracer_; diff --git a/source/common/BUILD b/source/common/BUILD index fd8cc3701..951c2a411 100644 --- a/source/common/BUILD +++ b/source/common/BUILD @@ -60,6 +60,8 @@ envoy_cc_library( "@envoy//source/common/grpc:typed_async_client_lib_with_external_headers", "@envoy//source/common/http:header_map_lib_with_external_headers", "@envoy//source/common/http:headers_lib_with_external_headers", + "@envoy_api//envoy/api/v2/core:pkg_cc_proto", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) diff --git a/source/common/rate_limiter_impl.cc b/source/common/rate_limiter_impl.cc index 7d4aad4fc..15f1fd71b 100644 --- a/source/common/rate_limiter_impl.cc +++ b/source/common/rate_limiter_impl.cc @@ -53,16 +53,16 @@ void BurstingRateLimiter::releaseOne() { } ScheduledStartingRateLimiter::ScheduledStartingRateLimiter( - RateLimiterPtr&& rate_limiter, const Envoy::SystemTime scheduled_starting_time) + RateLimiterPtr&& rate_limiter, const Envoy::MonotonicTime scheduled_starting_time) : ForwardingRateLimiterImpl(std::move(rate_limiter)), scheduled_starting_time_(scheduled_starting_time) { - if (timeSource().systemTime() >= scheduled_starting_time_) { + if (timeSource().monotonicTime() >= scheduled_starting_time_) { ENVOY_LOG(error, "Scheduled starting time exceeded. This may cause unintended bursty traffic."); } } bool ScheduledStartingRateLimiter::tryAcquireOne() { - if (timeSource().systemTime() < scheduled_starting_time_) { + if (timeSource().monotonicTime() < scheduled_starting_time_) { aquisition_attempted_ = true; return false; } @@ -76,7 +76,7 @@ bool ScheduledStartingRateLimiter::tryAcquireOne() { } void ScheduledStartingRateLimiter::releaseOne() { - if (timeSource().systemTime() < scheduled_starting_time_) { + if (timeSource().monotonicTime() < scheduled_starting_time_) { throw NighthawkException("Unexpected call to releaseOne()"); } return rate_limiter_->releaseOne(); diff --git a/source/common/rate_limiter_impl.h b/source/common/rate_limiter_impl.h index 70a42e0ae..1d22e5a43 100644 --- a/source/common/rate_limiter_impl.h +++ b/source/common/rate_limiter_impl.h @@ -30,14 +30,20 @@ class RateLimiterBaseImpl : public RateLimiter { // TODO(oschaaf): consider adding an explicit start() call to the interface. const auto now = time_source_.monotonicTime(); if (start_time_ == absl::nullopt) { + first_acquisition_time_ = time_source_.systemTime(); start_time_ = now; } return now - start_time_.value(); } + absl::optional firstAcquisitionTime() const override { + return first_acquisition_time_; + } + private: Envoy::TimeSource& time_source_; absl::optional start_time_; + absl::optional first_acquisition_time_; }; /** @@ -86,6 +92,9 @@ class ForwardingRateLimiterImpl : public RateLimiter { : rate_limiter_(std::move(rate_limiter)) {} Envoy::TimeSource& timeSource() override { return rate_limiter_->timeSource(); } std::chrono::nanoseconds elapsed() override { return rate_limiter_->elapsed(); } + absl::optional firstAcquisitionTime() const override { + return rate_limiter_->firstAcquisitionTime(); + } protected: const RateLimiterPtr rate_limiter_; @@ -125,12 +134,12 @@ class ScheduledStartingRateLimiter : public ForwardingRateLimiterImpl, * @param scheduled_starting_time The starting time */ ScheduledStartingRateLimiter(RateLimiterPtr&& rate_limiter, - const Envoy::SystemTime scheduled_starting_time); + const Envoy::MonotonicTime scheduled_starting_time); bool tryAcquireOne() override; void releaseOne() override; private: - const Envoy::SystemTime scheduled_starting_time_; + const Envoy::MonotonicTime scheduled_starting_time_; bool aquisition_attempted_{false}; }; diff --git a/source/common/request_source_impl.cc b/source/common/request_source_impl.cc index d7a23bf9d..c2ce7d4b5 100644 --- a/source/common/request_source_impl.cc +++ b/source/common/request_source_impl.cc @@ -36,7 +36,8 @@ RemoteRequestSourceImpl::RemoteRequestSourceImpl( void RemoteRequestSourceImpl::connectToRequestStreamGrpcService() { Envoy::TimeSource& time_source = dispatcher_.timeSource(); const auto clusters = cluster_manager_->clusters(); - const bool have_cluster = clusters.find(service_cluster_name_) != clusters.end(); + const bool have_cluster = + clusters.active_clusters_.find(service_cluster_name_) != clusters.active_clusters_.end(); ASSERT(have_cluster); const std::chrono::seconds STREAM_SETUP_TIMEOUT = 60s; envoy::config::core::v3::GrpcService grpc_service; diff --git a/source/common/request_stream_grpc_client_impl.cc b/source/common/request_stream_grpc_client_impl.cc index da0095042..31db65a69 100644 --- a/source/common/request_stream_grpc_client_impl.cc +++ b/source/common/request_stream_grpc_client_impl.cc @@ -2,16 +2,22 @@ #include +#include "envoy/api/v2/core/base.pb.h" +#include "envoy/config/core/v3/base.pb.h" #include "envoy/stats/scope.h" #include "external/envoy/source/common/common/assert.h" #include "external/envoy/source/common/http/header_map_impl.h" #include "external/envoy/source/common/http/headers.h" +#include "api/request_source/service.pb.h" + #include "common/request_impl.h" namespace Nighthawk { +using ::nighthawk::request_source::RequestSpecifier; + const std::string RequestStreamGrpcClientImpl::METHOD_NAME = "nighthawk.request_source.NighthawkRequestSourceService.RequestStream"; @@ -75,15 +81,27 @@ RequestPtr ProtoRequestHelper::messageToRequest( RequestPtr request = std::make_unique(header); if (message.has_request_specifier()) { - const auto& request_specifier = message.request_specifier(); - if (request_specifier.has_headers()) { - const auto& message_request_headers = request_specifier.headers(); - for (const auto& message_header : message_request_headers.headers()) { + const RequestSpecifier& request_specifier = message.request_specifier(); + + if (request_specifier.has_v3_headers()) { + const envoy::config::core::v3::HeaderMap& message_request_headers = + request_specifier.v3_headers(); + for (const envoy::config::core::v3::HeaderValue& message_header : + message_request_headers.headers()) { + Envoy::Http::LowerCaseString header_name(message_header.key()); + header->remove(header_name); + header->addCopy(header_name, message_header.value()); + } + } else if (request_specifier.has_headers()) { + const envoy::api::v2::core::HeaderMap& message_request_headers = request_specifier.headers(); + for (const envoy::api::v2::core::HeaderValue& message_header : + message_request_headers.headers()) { Envoy::Http::LowerCaseString header_name(message_header.key()); header->remove(header_name); header->addCopy(header_name, message_header.value()); } } + if (request_specifier.has_content_length()) { std::string s_content_length = absl::StrCat("", request_specifier.content_length().value()); header->remove(Envoy::Http::Headers::get().ContentLength); diff --git a/source/common/sequencer_impl.h b/source/common/sequencer_impl.h index ff226b6d3..40f245d98 100644 --- a/source/common/sequencer_impl.h +++ b/source/common/sequencer_impl.h @@ -62,6 +62,8 @@ class SequencerImpl : public Sequencer, public Envoy::Logger::Loggableelapsed(); } + const RateLimiter& rate_limiter() const override { return *rate_limiter_; } + double completionsPerSecond() const override { const double usec = std::chrono::duration_cast(executionDuration()).count(); diff --git a/source/common/signal_handler.cc b/source/common/signal_handler.cc index aa9316e66..59c86bc11 100644 --- a/source/common/signal_handler.cc +++ b/source/common/signal_handler.cc @@ -23,7 +23,9 @@ SignalHandler::SignalHandler(const std::function& signal_callback) { RELEASE_ASSERT(close(pipe_fds_[0]) == 0, "read side close failed"); RELEASE_ASSERT(close(pipe_fds_[1]) == 0, "write side close failed"); pipe_fds_.clear(); - signal_callback(); + if (!destructing_) { + signal_callback(); + } }); signal_handler_delegate = [this](int) { onSignal(); }; @@ -32,6 +34,7 @@ SignalHandler::SignalHandler(const std::function& signal_callback) { } SignalHandler::~SignalHandler() { + destructing_ = true; initiateShutdown(); if (shutdown_thread_.joinable()) { shutdown_thread_.join(); @@ -47,4 +50,4 @@ void SignalHandler::initiateShutdown() { void SignalHandler::onSignal() { initiateShutdown(); } -} // namespace Nighthawk \ No newline at end of file +} // namespace Nighthawk diff --git a/source/common/signal_handler.h b/source/common/signal_handler.h index 3ff36374f..c69a45869 100644 --- a/source/common/signal_handler.h +++ b/source/common/signal_handler.h @@ -69,8 +69,9 @@ class SignalHandler final : public Envoy::Logger::Loggable pipe_fds_; + bool destructing_{false}; }; using SignalHandlerPtr = std::unique_ptr; -} // namespace Nighthawk \ No newline at end of file +} // namespace Nighthawk diff --git a/source/common/statistic_impl.cc b/source/common/statistic_impl.cc index 0810b99ae..1a0067066 100644 --- a/source/common/statistic_impl.cc +++ b/source/common/statistic_impl.cc @@ -2,8 +2,10 @@ #include #include +#include #include +#include "external/dep_hdrhistogram_c/src/hdr_histogram_log.h" #include "external/envoy/source/common/common/assert.h" #include "external/envoy/source/common/protobuf/utility.h" @@ -68,6 +70,14 @@ uint64_t StatisticImpl::min() const { return min_; }; uint64_t StatisticImpl::max() const { return max_; }; +absl::StatusOr> StatisticImpl::serializeNative() const { + return absl::Status(absl::StatusCode::kUnimplemented, "serializeNative not implemented."); +} + +absl::Status StatisticImpl::deserializeNative(std::istream&) { + return absl::Status(absl::StatusCode::kUnimplemented, "deserializeNative not implemented."); +} + void SimpleStatistic::addValue(uint64_t value) { StatisticImpl::addValue(value); sum_x_ += value; @@ -236,6 +246,35 @@ nighthawk::client::Statistic HdrStatistic::toProto(SerializationDomain domain) c return proto; } +absl::StatusOr> HdrStatistic::serializeNative() const { + char* data; + if (hdr_log_encode(histogram_, &data) == 0) { + auto write_stream = std::make_unique(); + *write_stream << absl::string_view(data, strlen(data)); + // Free the memory allocated by hrd_log_encode. + free(data); + return write_stream; + } + ENVOY_LOG(error, "Failed to write HdrHistogram data."); + return absl::Status(absl::StatusCode::kInternal, "Failed to write HdrHistogram data"); +} + +absl::Status HdrStatistic::deserializeNative(std::istream& stream) { + std::string s(std::istreambuf_iterator(stream), {}); + struct hdr_histogram* new_histogram = nullptr; + // hdr_log_decode allocates memory for the new hdr histogram. + if (hdr_log_decode(&new_histogram, const_cast(s.c_str()), s.length()) == 0) { + // Free the memory allocated by our current hdr histogram. + hdr_close(histogram_); + // Swap in the new histogram. + // NOTE: Our destructor will eventually call hdr_close on the new one. + histogram_ = new_histogram; + return absl::OkStatus(); + } + ENVOY_LOG(error, "Failed to read back HdrHistogram data."); + return absl::Status(absl::StatusCode::kInternal, "Failed to read back HdrHistogram data"); +} + CircllhistStatistic::CircllhistStatistic() { histogram_ = hist_alloc(); ASSERT(histogram_ != nullptr); diff --git a/source/common/statistic_impl.h b/source/common/statistic_impl.h index ea6883750..0dcdab7a8 100644 --- a/source/common/statistic_impl.h +++ b/source/common/statistic_impl.h @@ -26,6 +26,8 @@ class StatisticImpl : public Statistic, public Envoy::Logger::Loggable> serializeNative() const override; + absl::Status deserializeNative(std::istream&) override; protected: std::string id_; @@ -146,6 +148,9 @@ class HdrStatistic : public StatisticImpl { return std::make_unique(); }; + absl::StatusOr> serializeNative() const override; + absl::Status deserializeNative(std::istream&) override; + private: static const int SignificantDigits; struct hdr_histogram* histogram_; diff --git a/source/common/termination_predicate_impl.cc b/source/common/termination_predicate_impl.cc index d468562f8..d32f2006b 100644 --- a/source/common/termination_predicate_impl.cc +++ b/source/common/termination_predicate_impl.cc @@ -16,8 +16,8 @@ TerminationPredicate::Status TerminationPredicateBaseImpl::evaluateChain() { } TerminationPredicate::Status DurationTerminationPredicateImpl::evaluate() { - return time_source_.systemTime() - start_ > duration_ ? TerminationPredicate::Status::TERMINATE - : TerminationPredicate::Status::PROCEED; + return time_source_.monotonicTime() - start_ > duration_ ? TerminationPredicate::Status::TERMINATE + : TerminationPredicate::Status::PROCEED; } TerminationPredicate::Status StatsCounterAbsoluteThresholdTerminationPredicateImpl::evaluate() { diff --git a/source/common/termination_predicate_impl.h b/source/common/termination_predicate_impl.h index 9a23a8f02..c1c761345 100644 --- a/source/common/termination_predicate_impl.h +++ b/source/common/termination_predicate_impl.h @@ -35,13 +35,13 @@ class DurationTerminationPredicateImpl : public TerminationPredicateBaseImpl { public: DurationTerminationPredicateImpl(Envoy::TimeSource& time_source, std::chrono::microseconds duration, - const Envoy::SystemTime start) + const Envoy::MonotonicTime start) : time_source_(time_source), start_(start), duration_(duration) {} TerminationPredicate::Status evaluate() override; private: Envoy::TimeSource& time_source_; - const Envoy::SystemTime start_; + const Envoy::MonotonicTime start_; std::chrono::microseconds duration_; }; diff --git a/source/common/thread_safe_monotonic_time_stopwatch.h b/source/common/thread_safe_monotonic_time_stopwatch.h index 2e7b57d44..0d2e1e487 100644 --- a/source/common/thread_safe_monotonic_time_stopwatch.h +++ b/source/common/thread_safe_monotonic_time_stopwatch.h @@ -34,7 +34,7 @@ class ThreadSafeMontonicTimeStopwatch : public Stopwatch { private: Envoy::Thread::MutexBasicLockable lock_; - Envoy::MonotonicTime start_ GUARDED_BY(lock_); + Envoy::MonotonicTime start_ ABSL_GUARDED_BY(lock_); }; -} // namespace Nighthawk \ No newline at end of file +} // namespace Nighthawk diff --git a/source/common/utility.cc b/source/common/utility.cc index 922aa5249..2e9430309 100644 --- a/source/common/utility.cc +++ b/source/common/utility.cc @@ -38,9 +38,9 @@ Utility::mapCountersFromStore(const Envoy::Stats::Store& store, size_t Utility::findPortSeparator(absl::string_view hostname) { if (hostname.size() > 0 && hostname[0] == '[') { - return hostname.find(":", hostname.find(']')); + return hostname.find(':', hostname.find(']')); } - return hostname.rfind(":"); + return hostname.rfind(':'); } Envoy::Network::DnsLookupFamily diff --git a/source/exe/BUILD b/source/exe/BUILD index d0385f278..18b672bad 100644 --- a/source/exe/BUILD +++ b/source/exe/BUILD @@ -8,6 +8,23 @@ licenses(["notice"]) # Apache 2 envoy_package() +envoy_cc_library( + name = "adaptive_load_client_entry_lib", + srcs = ["adaptive_load_client_main_entry.cc"], + external_deps = [ + "abseil_symbolize", + ], + repository = "@envoy", + visibility = ["//visibility:public"], + deps = [ + "//source/adaptive_load:adaptive_load_client_main", + "//source/common:nighthawk_service_client_impl", + "//source/common:version_linkstamp", + "@envoy//source/exe:platform_header_lib_with_external_headers", + "@envoy//source/exe:platform_impl_lib", + ], +) + envoy_cc_library( name = "nighthawk_client_entry_lib", srcs = ["client_main_entry.cc"], diff --git a/source/exe/adaptive_load_client_main_entry.cc b/source/exe/adaptive_load_client_main_entry.cc new file mode 100644 index 000000000..67d45b434 --- /dev/null +++ b/source/exe/adaptive_load_client_main_entry.cc @@ -0,0 +1,45 @@ +// Command line adaptive load controller driving a Nighthawk Service. +#include + +#include "nighthawk/common/exception.h" + +#include "external/envoy/source/common/event/real_time_system.h" +#include "external/envoy/source/exe/platform_impl.h" + +#include "common/nighthawk_service_client_impl.h" + +#include "absl/debugging/symbolize.h" +#include "adaptive_load/adaptive_load_client_main.h" +#include "adaptive_load/adaptive_load_controller_impl.h" +#include "adaptive_load/metrics_evaluator_impl.h" +#include "adaptive_load/session_spec_proto_helper_impl.h" + +// NOLINT(namespace-nighthawk) + +int main(int argc, char* argv[]) { +#ifndef __APPLE__ + // absl::Symbolize mostly works without this, but this improves corner case + // handling, such as running in a chroot jail. + absl::InitializeSymbolizer(argv[0]); +#endif + Nighthawk::NighthawkServiceClientImpl nighthawk_service_client; + Nighthawk::MetricsEvaluatorImpl metrics_evaluator; + Nighthawk::AdaptiveLoadSessionSpecProtoHelperImpl spec_proto_helper; + Envoy::Event::RealTimeSystem time_system; // NO_CHECK_FORMAT(real_time) + Nighthawk::AdaptiveLoadControllerImpl controller(nighthawk_service_client, metrics_evaluator, + spec_proto_helper, time_system); + Envoy::PlatformImpl platform_impl; + try { + Nighthawk::AdaptiveLoadClientMain program(argc, argv, controller, platform_impl.fileSystem()); + return program.Run(); + } catch (const Nighthawk::Client::NoServingException& e) { + return EXIT_SUCCESS; + } catch (const Nighthawk::Client::MalformedArgvException& e) { + std::cerr << "Invalid args: " << e.what() << std::endl; + return EXIT_FAILURE; + } catch (const Nighthawk::NighthawkException& e) { + std::cerr << "Failure: " << e.what() << std::endl; + return EXIT_FAILURE; + } + return 0; +} diff --git a/source/request_source/BUILD b/source/request_source/BUILD new file mode 100644 index 000000000..9fdbf5151 --- /dev/null +++ b/source/request_source/BUILD @@ -0,0 +1,33 @@ +load( + "@envoy//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_library( + name = "request_options_list_plugin_impl", + srcs = [ + "request_options_list_plugin_impl.cc", + ], + hdrs = [ + "request_options_list_plugin_impl.h", + ], + repository = "@envoy", + visibility = ["//visibility:public"], + deps = [ + "//include/nighthawk/request_source:request_source_plugin_config_factory_lib", + "//source/common:nighthawk_common_lib", + "//source/common:request_impl_lib", + "//source/common:request_source_impl_lib", + "@envoy//source/common/common:thread_lib_with_external_headers", + "@envoy//source/common/protobuf:message_validator_lib_with_external_headers", + "@envoy//source/common/protobuf:protobuf_with_external_headers", + "@envoy//source/common/protobuf:utility_lib_with_external_headers", + "@envoy//source/exe:platform_header_lib_with_external_headers", + "@envoy//source/exe:platform_impl_lib", + ], +) diff --git a/source/request_source/request_options_list_plugin_impl.cc b/source/request_source/request_options_list_plugin_impl.cc new file mode 100644 index 000000000..acc459e3c --- /dev/null +++ b/source/request_source/request_options_list_plugin_impl.cc @@ -0,0 +1,122 @@ +#include "request_source/request_options_list_plugin_impl.h" + +#include "external/envoy/source/common/protobuf/message_validator_impl.h" +#include "external/envoy/source/common/protobuf/utility.h" +#include "external/envoy/source/exe/platform_impl.h" + +#include "api/client/options.pb.h" + +#include "common/request_impl.h" +#include "common/request_source_impl.h" + +namespace Nighthawk { +std::string FileBasedOptionsListRequestSourceFactory::name() const { + return "nighthawk.file-based-request-source-plugin"; +} + +Envoy::ProtobufTypes::MessagePtr +FileBasedOptionsListRequestSourceFactory::createEmptyConfigProto() { + return std::make_unique(); +} + +RequestSourcePtr FileBasedOptionsListRequestSourceFactory::createRequestSourcePlugin( + const Envoy::Protobuf::Message& message, Envoy::Api::Api& api, + Envoy::Http::RequestHeaderMapPtr header) { + const auto& any = dynamic_cast(message); + nighthawk::request_source::FileBasedOptionsListRequestSourceConfig config; + Envoy::MessageUtil util; + util.unpackTo(any, config); + uint32_t max_file_size = config.has_max_file_size() ? config.max_file_size().value() : 1000000; + if (api.fileSystem().fileSize(config.file_path()) > max_file_size) { + throw NighthawkException("file size must be less than max_file_size"); + } + + // Locking to avoid issues with multiple threads reading the same file. + { + Envoy::Thread::LockGuard lock_guard(file_lock_); + // Reading the file only the first time. + if (!options_list_.has_value()) { + nighthawk::client::RequestOptionsList loaded_list; + util.loadFromFile(config.file_path(), loaded_list, + Envoy::ProtobufMessage::getStrictValidationVisitor(), api, true); + options_list_ = loaded_list; + } + } + return std::make_unique(config.num_requests(), std::move(header), + options_list_.value()); +} + +REGISTER_FACTORY(FileBasedOptionsListRequestSourceFactory, RequestSourcePluginConfigFactory); + +std::string InLineOptionsListRequestSourceFactory::name() const { + return "nighthawk.in-line-options-list-request-source-plugin"; +} + +Envoy::ProtobufTypes::MessagePtr InLineOptionsListRequestSourceFactory::createEmptyConfigProto() { + return std::make_unique(); +} + +RequestSourcePtr InLineOptionsListRequestSourceFactory::createRequestSourcePlugin( + const Envoy::Protobuf::Message& message, Envoy::Api::Api&, + Envoy::Http::RequestHeaderMapPtr header) { + const auto& any = dynamic_cast(message); + nighthawk::request_source::InLineOptionsListRequestSourceConfig config; + Envoy::MessageUtil::unpackTo(any, config); + // Locking to avoid issues with multiple threads calling this at the same time and trying to set + // the options_list_ + { + Envoy::Thread::LockGuard lock_guard(options_list_lock_); + // Only loading the config into memory the first time. + if (!options_list_.has_value()) { + options_list_ = config.options_list(); + } + } + return std::make_unique(config.num_requests(), std::move(header), + options_list_.value()); +} + +REGISTER_FACTORY(InLineOptionsListRequestSourceFactory, RequestSourcePluginConfigFactory); + +OptionsListRequestSource::OptionsListRequestSource( + const uint32_t total_requests, Envoy::Http::RequestHeaderMapPtr header, + const nighthawk::client::RequestOptionsList& options_list) + : header_(std::move(header)), options_list_(options_list), total_requests_(total_requests) {} + +RequestGenerator OptionsListRequestSource::get() { + request_count_.push_back(0); + uint32_t& lambda_counter = request_count_.back(); + RequestGenerator request_generator = [this, lambda_counter]() mutable -> RequestPtr { + // if request_max is 0, then we never stop generating requests. + if (lambda_counter >= total_requests_ && total_requests_ != 0) { + return nullptr; + } + + // Increment the counter and get the request_option from the list for the current iteration. + const uint32_t index = lambda_counter % options_list_.options_size(); + nighthawk::client::RequestOptions request_option = options_list_.options().at(index); + ++lambda_counter; + + // Initialize the header with the values from the default header. + Envoy::Http::RequestHeaderMapPtr header = Envoy::Http::RequestHeaderMapImpl::create(); + Envoy::Http::HeaderMapImpl::copyFrom(*header, *header_); + + // Override the default values with the values from the request_option + header->setMethod(envoy::config::core::v3::RequestMethod_Name(request_option.request_method())); + const uint32_t content_length = request_option.request_body_size().value(); + if (content_length > 0) { + header->setContentLength( + content_length); // Content length is used later in stream_decoder to populate the body + } + for (const envoy::config::core::v3::HeaderValueOption& option_header : + request_option.request_headers()) { + auto lower_case_key = Envoy::Http::LowerCaseString(std::string(option_header.header().key())); + header->setCopy(lower_case_key, std::string(option_header.header().value())); + } + return std::make_unique(std::move(header)); + }; + return request_generator; +} + +void OptionsListRequestSource::initOnThread() {} + +} // namespace Nighthawk \ No newline at end of file diff --git a/source/request_source/request_options_list_plugin_impl.h b/source/request_source/request_options_list_plugin_impl.h new file mode 100644 index 000000000..8de3d7ef9 --- /dev/null +++ b/source/request_source/request_options_list_plugin_impl.h @@ -0,0 +1,118 @@ +// Implementations of RequestSourceConfigFactories that make a OptionsListRequestSource. +#pragma once + +#include "envoy/registry/registry.h" + +#include "nighthawk/request_source/request_source_plugin_config_factory.h" + +#include "external/envoy/source/common/common/lock_guard.h" +#include "external/envoy/source/common/common/thread.h" + +#include "api/client/options.pb.h" +#include "api/request_source/request_source_plugin.pb.h" + +#include "common/uri_impl.h" + +namespace Nighthawk { + +// Sample Request Source for small RequestOptionsLists. Loads a copy of the RequestOptionsList in +// memory and replays them. +// @param total_requests The number of requests the requestGenerator produced by get() will +// generate. 0 means it is unlimited. +// @param header the default header that will be overridden by values taken from the options_list, +// any values not overridden will be used. +// @param options_list This is const because it is intended to be shared by multiple threads. The +// RequestGenerator produced by get() will use options from the options_list to overwrite values in +// the default header, and create new requests. if total_requests is greater than the length of +// options_list, it will loop. This is not thread safe. +class OptionsListRequestSource : public RequestSource { +public: + OptionsListRequestSource(const uint32_t total_requests, Envoy::Http::RequestHeaderMapPtr header, + const nighthawk::client::RequestOptionsList& options_list); + + // This get function is not thread safe, because multiple threads calling get simultaneously will + // result in a collision. + RequestGenerator get() override; + + // default implementation + void initOnThread() override; + +private: + Envoy::Http::RequestHeaderMapPtr header_; + const nighthawk::client::RequestOptionsList& options_list_; + std::vector request_count_; + const uint32_t total_requests_; +}; + +// Factory that creates a OptionsListRequestSource from a FileBasedOptionsListRequestSourceConfig +// proto. Registered as an Envoy plugin. Implementation of RequestSourceConfigFactory which produces +// a RequestSource that keeps an RequestOptionsList in memory, and loads it with the RequestOptions +// taken from a file. All plugins configuration are specified in the request_source_plugin.proto. +// This class is thread-safe, +// Usage: assume you are passed an appropriate Any type object called config, an Api +// object called api, and a default header called header. auto& config_factory = +// Envoy::Config::Utility::getAndCheckFactoryByName( +// "nighthawk.file-based-request-source-plugin"); +// RequestSourcePtr plugin = +// config_factory.createRequestSourcePlugin(config, std::move(api), std::move(header)); +class FileBasedOptionsListRequestSourceFactory : public virtual RequestSourcePluginConfigFactory { +public: + std::string name() const override; + + Envoy::ProtobufTypes::MessagePtr createEmptyConfigProto() override; + + // This implementation is thread safe. There is currently a behaviour such that only the first + // call to createRequestSourcePlugin will load the options list into memory and subsequent calls + // just make a copy of the options_list that was already loaded. The + // FileBasedOptionsListRequestSourceFactory will not work with multiple different files for this + // reason. + // TODO: This memory saving is likely a premature optimization, and should be removed. + // This method will also error if the + // file can not be loaded correctly, e.g. the file is too large or could not be found. + RequestSourcePtr createRequestSourcePlugin(const Envoy::Protobuf::Message& message, + Envoy::Api::Api& api, + Envoy::Http::RequestHeaderMapPtr header) override; + +private: + Envoy::Thread::MutexBasicLockable file_lock_; + absl::optional options_list_; +}; + +// This factory will be activated through RequestSourceFactory in factories.h +DECLARE_FACTORY(FileBasedOptionsListRequestSourceFactory); + +// Factory that creates a OptionsListRequestSource from a InLineOptionsListRequestSourceConfig +// proto. Registered as an Envoy plugin. Implementation of RequestSourceConfigFactory which produces +// a RequestSource that keeps an RequestOptionsList in memory, and loads it with the RequestOptions +// passed to it from the config. All plugins configuration are specified in the +// request_source_plugin.proto. +// This class is thread-safe, +// Usage: assume you are passed an appropriate Any type object called +// config, an Api object called api, and a default header called header. auto& config_factory = +// Envoy::Config::Utility::getAndCheckFactoryByName( +// "nighthawk.in-line-options-list-request-source-plugin"); +// RequestSourcePtr plugin = +// config_factory.createRequestSourcePlugin(config, std::move(api), std::move(header)); + +class InLineOptionsListRequestSourceFactory : public virtual RequestSourcePluginConfigFactory { +public: + std::string name() const override; + Envoy::ProtobufTypes::MessagePtr createEmptyConfigProto() override; + + // This implementation is thread safe. There is currently a behaviour such that only the first + // call to createRequestSourcePlugin will load the options list into memory and subsequent calls + // just make a copy of the options_list that was already loaded. + // TODO: This memory saving is likely a premature optimization, and should be removed. + RequestSourcePtr createRequestSourcePlugin(const Envoy::Protobuf::Message& message, + Envoy::Api::Api& api, + Envoy::Http::RequestHeaderMapPtr header) override; + +private: + Envoy::Thread::MutexBasicLockable options_list_lock_; + absl::optional options_list_; +}; + +// This factory will be activated through RequestSourceFactory in factories.h +DECLARE_FACTORY(InLineOptionsListRequestSourceFactory); + +} // namespace Nighthawk \ No newline at end of file diff --git a/source/server/BUILD b/source/server/BUILD index 33e9a6dfc..c0af0f63e 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -37,6 +37,8 @@ envoy_cc_library( "@envoy//source/common/protobuf:message_validator_lib_with_external_headers", "@envoy//source/common/protobuf:utility_lib_with_external_headers", "@envoy//source/common/singleton:const_singleton_with_external_headers", + "@envoy_api//envoy/api/v2/core:pkg_cc_proto", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) diff --git a/source/server/README.md b/source/server/README.md index d98631126..81ada3815 100644 --- a/source/server/README.md +++ b/source/server/README.md @@ -15,7 +15,7 @@ bazel build -c opt :nighthawk_test_server ``` It is possible to -[enable additional envoy extension](https://github.com/envoyproxy/envoy/blob/master/source/extensions/extensions_build_config.bzl) by adding them [here](../../extensions_build_config.bzl) before the build. +[enable additional envoy extension](https://github.com/envoyproxy/envoy/blob/main/source/extensions/extensions_build_config.bzl) by adding them [here](../../extensions_build_config.bzl) before the build. By default, Nighthawk's test server is set up with the minimum extension set needed for it to operate as documented. @@ -34,10 +34,11 @@ static_resources: port_value: 10000 filter_chains: - filters: - - name: envoy.http_connection_manager - config: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager generate_request_id: false - codec_type: auto + codec_type: AUTO stat_prefix: ingress_http route_config: name: local_route @@ -47,20 +48,23 @@ static_resources: - "*" http_filters: - name: dynamic-delay - config: + typed_config: + "@type": type.googleapis.com/nighthawk.server.ResponseOptions static_delay: 0.5s - name: test-server # before envoy.router because order matters! - config: + typed_config: + "@type": type.googleapis.com/nighthawk.server.ResponseOptions response_body_size: 10 - response_headers: + v3_response_headers: - { header: { key: "foo", value: "bar" } } - { header: { key: "foo", value: "bar2" }, append: true, } - { header: { key: "x-nh", value: "1" } } - - name: envoy.router - config: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router dynamic_stats: false admin: access_log_path: /tmp/envoy.log @@ -146,7 +150,7 @@ same time. ``` # If you already have Envoy running, you might need to set --base-id to allow the test-server to start. -➜ /bazel-bin/nighthawk/source/server/server --config-path /path/to/test-server-server.yaml +➜ /bazel-bin/nighthawk/source/server/server --config-path /path/to/test-server.yaml # Verify the test server with a curl command similar to: ➜ curl -H "x-nighthawk-test-server-config: {response_body_size:20, static_delay: \"0s\"}" -vv 127.0.0.1:10000 @@ -161,8 +165,7 @@ USAGE: bazel-bin/nighthawk_test_server [--socket-mode ] [--socket-path ] [--disable-extensions -] [--use-fake-symbol-table -] [--cpuset-threads] +] [--cpuset-threads] [--enable-mutex-tracing] [--disable-hot-restart] [--mode ] [--parent-shutdown-time-s @@ -175,8 +178,7 @@ bazel-bin/nighthawk_test_server [--socket-mode ] [--socket-path [--hot-restart-version] [--restart-epoch ] [--log-path ] -[--log-format-prefix-with-location -] [--enable-fine-grain-logging] +[--enable-fine-grain-logging] [--log-format-escaped] [--log-format ] [--component-log-level ] [-l ] @@ -205,9 +207,6 @@ Path to hot restart socket file --disable-extensions Comma-separated list of extensions to disable ---use-fake-symbol-table -Use fake symbol table implementation - --cpuset-threads Get the default # of worker threads from cpuset size @@ -252,10 +251,6 @@ hot restart epoch # --log-path Path to logfile ---log-format-prefix-with-location -Prefix all occurrences of '%v' in log format with with '[%g:%#] ' -('[path/to/file.cc:99] '). - --enable-fine-grain-logging Logger mode: enable file level log control(Fancy Logger)or not diff --git a/source/server/configuration.cc b/source/server/configuration.cc index a6037bf14..ca17b6f46 100644 --- a/source/server/configuration.cc +++ b/source/server/configuration.cc @@ -2,6 +2,9 @@ #include +#include "envoy/api/v2/core/base.pb.h" +#include "envoy/config/core/v3/base.pb.h" + #include "external/envoy/source/common/protobuf/message_validator_impl.h" #include "external/envoy/source/common/protobuf/utility.h" @@ -30,8 +33,20 @@ bool mergeJsonConfig(absl::string_view json, nighthawk::server::ResponseOptions& void applyConfigToResponseHeaders(Envoy::Http::ResponseHeaderMap& response_headers, const nighthawk::server::ResponseOptions& response_options) { - for (const auto& header_value_option : response_options.response_headers()) { - const auto& header = header_value_option.header(); + + // The validation guarantees we only get one of the fields (response_headers, v3_response_headers) + // set. + validateResponseOptions(response_options); + nighthawk::server::ResponseOptions v3_only_response_options = response_options; + for (const envoy::api::v2::core::HeaderValueOption& header_value_option : + v3_only_response_options.response_headers()) { + *v3_only_response_options.add_v3_response_headers() = + upgradeDeprecatedEnvoyV2HeaderValueOptionToV3(header_value_option); + } + + for (const envoy::config::core::v3::HeaderValueOption& header_value_option : + v3_only_response_options.v3_response_headers()) { + const envoy::config::core::v3::HeaderValue& header = header_value_option.header(); auto lower_case_key = Envoy::Http::LowerCaseString(header.key()); if (!header_value_option.append().value()) { response_headers.remove(lower_case_key); @@ -40,6 +55,30 @@ void applyConfigToResponseHeaders(Envoy::Http::ResponseHeaderMap& response_heade } } +envoy::config::core::v3::HeaderValueOption upgradeDeprecatedEnvoyV2HeaderValueOptionToV3( + const envoy::api::v2::core::HeaderValueOption& v2_header_value_option) { + envoy::config::core::v3::HeaderValueOption v3_header_value_option; + if (v2_header_value_option.has_append()) { + *v3_header_value_option.mutable_append() = v2_header_value_option.append(); + } + if (v2_header_value_option.has_header()) { + envoy::config::core::v3::HeaderValue* v3_header = v3_header_value_option.mutable_header(); + v3_header->set_key(v2_header_value_option.header().key()); + v3_header->set_value(v2_header_value_option.header().value()); + } + return v3_header_value_option; +} + +void validateResponseOptions(const nighthawk::server::ResponseOptions& response_options) { + if (response_options.response_headers_size() > 0 && + response_options.v3_response_headers_size() > 0) { + throw Envoy::EnvoyException( + absl::StrCat("invalid configuration in nighthawk::server::ResponseOptions ", + "cannot specify both response_headers and v3_response_headers ", + "configuration was: ", response_options.ShortDebugString())); + } +} + } // namespace Configuration } // namespace Server } // namespace Nighthawk diff --git a/source/server/configuration.h b/source/server/configuration.h index e44cca4a6..81aaf50da 100644 --- a/source/server/configuration.h +++ b/source/server/configuration.h @@ -2,6 +2,8 @@ #include +#include "envoy/api/v2/core/base.pb.h" +#include "envoy/config/core/v3/base.pb.h" #include "envoy/http/header_map.h" #include "api/server/response_options.pb.h" @@ -27,10 +29,28 @@ bool mergeJsonConfig(absl::string_view json, nighthawk::server::ResponseOptions& * @param response_headers Response headers to transform to reflect the passed in response * options. * @param response_options Configuration specifying how to transform the header map. + * + * @throws Envoy::EnvoyException if invalid response_options are provided. */ void applyConfigToResponseHeaders(Envoy::Http::ResponseHeaderMap& response_headers, const nighthawk::server::ResponseOptions& response_options); +/** + * Upgrades Envoy's HeaderValueOption from the deprecated v2 API version to v3. + * + * @param v2_header_value_option The HeaderValueOption to be upgraded. + * @return a version of HeaderValueOption upgraded to Envoy API v3. + */ +envoy::config::core::v3::HeaderValueOption upgradeDeprecatedEnvoyV2HeaderValueOptionToV3( + const envoy::api::v2::core::HeaderValueOption& v2_header_value_option); + +/** + * Validates the ResponseOptions. + * + * @throws Envoy::EnvoyException on validation errors. + */ +void validateResponseOptions(const nighthawk::server::ResponseOptions& response_options); + } // namespace Configuration } // namespace Server } // namespace Nighthawk diff --git a/source/server/http_dynamic_delay_filter.cc b/source/server/http_dynamic_delay_filter.cc index ebc3254fc..9536b8434 100644 --- a/source/server/http_dynamic_delay_filter.cc +++ b/source/server/http_dynamic_delay_filter.cc @@ -47,7 +47,7 @@ HttpDynamicDelayDecoderFilter::decodeHeaders(Envoy::Http::RequestHeaderMap& head maybeRequestFaultFilterDelay(delay_ms, headers); } else { if (end_stream) { - config_->maybeSendErrorReply(*decoder_callbacks_); + config_->validateOrSendError(*decoder_callbacks_); return Envoy::Http::FilterHeadersStatus::StopIteration; } return Envoy::Http::FilterHeadersStatus::Continue; @@ -59,7 +59,7 @@ Envoy::Http::FilterDataStatus HttpDynamicDelayDecoderFilter::decodeData(Envoy::Buffer::Instance& buffer, bool end_stream) { if (!config_->getEffectiveConfiguration().ok()) { if (end_stream) { - config_->maybeSendErrorReply(*decoder_callbacks_); + config_->validateOrSendError(*decoder_callbacks_); return Envoy::Http::FilterDataStatus::StopIterationNoBuffer; } return Envoy::Http::FilterDataStatus::Continue; diff --git a/source/server/http_dynamic_delay_filter_config.cc b/source/server/http_dynamic_delay_filter_config.cc index 336a5da7b..87cda255b 100644 --- a/source/server/http_dynamic_delay_filter_config.cc +++ b/source/server/http_dynamic_delay_filter_config.cc @@ -7,6 +7,7 @@ #include "api/server/response_options.pb.h" #include "api/server/response_options.pb.validate.h" +#include "server/configuration.h" #include "server/http_dynamic_delay_filter.h" namespace Nighthawk { @@ -22,10 +23,11 @@ class HttpDynamicDelayDecoderFilterConfigFactory Envoy::Server::Configuration::FactoryContext& context) override { auto& validation_visitor = Envoy::ProtobufMessage::getStrictValidationVisitor(); - return createFilter( + const nighthawk::server::ResponseOptions& response_options = Envoy::MessageUtil::downcastAndValidate( - proto_config, validation_visitor), - context); + proto_config, validation_visitor); + validateResponseOptions(response_options); + return createFilter(response_options, context); } Envoy::ProtobufTypes::MessagePtr createEmptyConfigProto() override { diff --git a/source/server/http_filter_config_base.cc b/source/server/http_filter_config_base.cc index a8dc933e9..0c8396139 100644 --- a/source/server/http_filter_config_base.cc +++ b/source/server/http_filter_config_base.cc @@ -13,21 +13,27 @@ FilterConfigurationBase::FilterConfigurationBase( void FilterConfigurationBase::computeEffectiveConfiguration( const Envoy::Http::RequestHeaderMap& headers) { - const auto* request_config_header = headers.get(TestServer::HeaderNames::get().TestServerConfig); - if (request_config_header) { + const auto& request_config_header = headers.get(TestServer::HeaderNames::get().TestServerConfig); + if (request_config_header.size() == 1) { + // We could be more flexible and look for the first request header that has a value, + // but without a proper understanding of a real use case for that, we are assuming that any + // existence of duplicate headers here is an error. nighthawk::server::ResponseOptions response_options = *server_config_; std::string error_message; - if (Configuration::mergeJsonConfig(request_config_header->value().getStringView(), + if (Configuration::mergeJsonConfig(request_config_header[0]->value().getStringView(), response_options, error_message)) { effective_config_ = std::make_shared(std::move(response_options)); } else { effective_config_ = absl::InvalidArgumentError(error_message); } + } else if (request_config_header.size() > 1) { + effective_config_ = absl::InvalidArgumentError( + "Received multiple configuration headers in the request, expected only one."); } } -bool FilterConfigurationBase::maybeSendErrorReply( +bool FilterConfigurationBase::validateOrSendError( Envoy::Http::StreamDecoderFilterCallbacks& decoder_callbacks) const { if (!effective_config_.ok()) { decoder_callbacks.sendLocalReply(static_cast(500), diff --git a/source/server/http_filter_config_base.h b/source/server/http_filter_config_base.h index 9f3f700fe..7569df17a 100644 --- a/source/server/http_filter_config_base.h +++ b/source/server/http_filter_config_base.h @@ -52,7 +52,7 @@ class FilterConfigurationBase { * @param decoder_callbacks Decoder used to generate the reply. * @return true iff an error reply was generated. */ - bool maybeSendErrorReply(Envoy::Http::StreamDecoderFilterCallbacks& decoder_callbacks) const; + bool validateOrSendError(Envoy::Http::StreamDecoderFilterCallbacks& decoder_callbacks) const; /** * @brief Get the effective configuration. Depending on state ,this could be one of static diff --git a/source/server/http_test_server_filter.cc b/source/server/http_test_server_filter.cc index cf01105cf..bac840bc1 100644 --- a/source/server/http_test_server_filter.cc +++ b/source/server/http_test_server_filter.cc @@ -13,8 +13,8 @@ namespace Nighthawk { namespace Server { HttpTestServerDecoderFilterConfig::HttpTestServerDecoderFilterConfig( - nighthawk::server::ResponseOptions proto_config) - : server_config_(std::move(proto_config)) {} + const nighthawk::server::ResponseOptions& proto_config) + : FilterConfigurationBase(proto_config, "test-server") {} HttpTestServerDecoderFilter::HttpTestServerDecoderFilter( HttpTestServerDecoderFilterConfigSharedPtr config) @@ -22,43 +22,34 @@ HttpTestServerDecoderFilter::HttpTestServerDecoderFilter( void HttpTestServerDecoderFilter::onDestroy() {} -void HttpTestServerDecoderFilter::sendReply() { - if (!json_merge_error_) { - std::string response_body(base_config_.response_body_size(), 'a'); - if (request_headers_dump_.has_value()) { - response_body += *request_headers_dump_; - } - decoder_callbacks_->sendLocalReply( - static_cast(200), response_body, - [this](Envoy::Http::ResponseHeaderMap& direct_response_headers) { - Configuration::applyConfigToResponseHeaders(direct_response_headers, base_config_); - }, - absl::nullopt, ""); - } else { - decoder_callbacks_->sendLocalReply( - static_cast(500), - fmt::format("test-server didn't understand the request: {}", error_message_), nullptr, - absl::nullopt, ""); +void HttpTestServerDecoderFilter::sendReply(const nighthawk::server::ResponseOptions& options) { + std::string response_body(options.response_body_size(), 'a'); + if (request_headers_dump_.has_value()) { + response_body += *request_headers_dump_; } + decoder_callbacks_->sendLocalReply( + static_cast(200), response_body, + [options](Envoy::Http::ResponseHeaderMap& direct_response_headers) { + Configuration::applyConfigToResponseHeaders(direct_response_headers, options); + }, + absl::nullopt, ""); } Envoy::Http::FilterHeadersStatus HttpTestServerDecoderFilter::decodeHeaders(Envoy::Http::RequestHeaderMap& headers, bool end_stream) { - // TODO(oschaaf): Add functionality to clear fields - base_config_ = config_->server_config(); - const auto* request_config_header = headers.get(TestServer::HeaderNames::get().TestServerConfig); - if (request_config_header) { - json_merge_error_ = !Configuration::mergeJsonConfig( - request_config_header->value().getStringView(), base_config_, error_message_); - } - if (base_config_.echo_request_headers()) { - std::stringstream headers_dump; - headers_dump << "\nRequest Headers:\n" << headers; - request_headers_dump_ = headers_dump.str(); - } + config_->computeEffectiveConfiguration(headers); if (end_stream) { - sendReply(); + if (!config_->validateOrSendError(*decoder_callbacks_)) { + const absl::StatusOr effective_config = + config_->getEffectiveConfiguration(); + if (effective_config.value()->echo_request_headers()) { + std::stringstream headers_dump; + headers_dump << "\nRequest Headers:\n" << headers; + request_headers_dump_ = headers_dump.str(); + } + sendReply(*effective_config.value()); + } } return Envoy::Http::FilterHeadersStatus::StopIteration; } @@ -66,7 +57,9 @@ HttpTestServerDecoderFilter::decodeHeaders(Envoy::Http::RequestHeaderMap& header Envoy::Http::FilterDataStatus HttpTestServerDecoderFilter::decodeData(Envoy::Buffer::Instance&, bool end_stream) { if (end_stream) { - sendReply(); + if (!config_->validateOrSendError(*decoder_callbacks_)) { + sendReply(*config_->getEffectiveConfiguration().value()); + } } return Envoy::Http::FilterDataStatus::StopIterationNoBuffer; } diff --git a/source/server/http_test_server_filter.h b/source/server/http_test_server_filter.h index 6f8d2ace1..16f3e378e 100644 --- a/source/server/http_test_server_filter.h +++ b/source/server/http_test_server_filter.h @@ -6,17 +6,15 @@ #include "api/server/response_options.pb.h" +#include "server/http_filter_config_base.h" + namespace Nighthawk { namespace Server { // Basically this is left in as a placeholder for further configuration. -class HttpTestServerDecoderFilterConfig { +class HttpTestServerDecoderFilterConfig : public FilterConfigurationBase { public: - HttpTestServerDecoderFilterConfig(nighthawk::server::ResponseOptions proto_config); - const nighthawk::server::ResponseOptions& server_config() { return server_config_; } - -private: - const nighthawk::server::ResponseOptions server_config_; + HttpTestServerDecoderFilterConfig(const nighthawk::server::ResponseOptions& proto_config); }; using HttpTestServerDecoderFilterConfigSharedPtr = @@ -36,12 +34,9 @@ class HttpTestServerDecoderFilter : public Envoy::Http::StreamDecoderFilter { void setDecoderFilterCallbacks(Envoy::Http::StreamDecoderFilterCallbacks&) override; private: - void sendReply(); + void sendReply(const nighthawk::server::ResponseOptions& options); const HttpTestServerDecoderFilterConfigSharedPtr config_; Envoy::Http::StreamDecoderFilterCallbacks* decoder_callbacks_; - nighthawk::server::ResponseOptions base_config_; - bool json_merge_error_{false}; - std::string error_message_; absl::optional request_headers_dump_; }; diff --git a/source/server/http_test_server_filter_config.cc b/source/server/http_test_server_filter_config.cc index ffd814303..5a9754d9d 100644 --- a/source/server/http_test_server_filter_config.cc +++ b/source/server/http_test_server_filter_config.cc @@ -7,6 +7,7 @@ #include "api/server/response_options.pb.h" #include "api/server/response_options.pb.validate.h" +#include "server/configuration.h" #include "server/http_test_server_filter.h" namespace Nighthawk { @@ -19,12 +20,12 @@ class HttpTestServerDecoderFilterConfig Envoy::Http::FilterFactoryCb createFilterFactoryFromProto(const Envoy::Protobuf::Message& proto_config, const std::string&, Envoy::Server::Configuration::FactoryContext& context) override { - auto& validation_visitor = Envoy::ProtobufMessage::getStrictValidationVisitor(); - return createFilter( + const nighthawk::server::ResponseOptions& response_options = Envoy::MessageUtil::downcastAndValidate( - proto_config, validation_visitor), - context); + proto_config, validation_visitor); + validateResponseOptions(response_options); + return createFilter(response_options, context); } Envoy::ProtobufTypes::MessagePtr createEmptyConfigProto() override { diff --git a/source/server/http_time_tracking_filter.cc b/source/server/http_time_tracking_filter.cc index 045192d0e..a6d787f02 100644 --- a/source/server/http_time_tracking_filter.cc +++ b/source/server/http_time_tracking_filter.cc @@ -31,7 +31,7 @@ HttpTimeTrackingFilter::HttpTimeTrackingFilter(HttpTimeTrackingFilterConfigShare Envoy::Http::FilterHeadersStatus HttpTimeTrackingFilter::decodeHeaders(Envoy::Http::RequestHeaderMap& headers, bool end_stream) { config_->computeEffectiveConfiguration(headers); - if (end_stream && config_->maybeSendErrorReply(*decoder_callbacks_)) { + if (end_stream && config_->validateOrSendError(*decoder_callbacks_)) { return Envoy::Http::FilterHeadersStatus::StopIteration; } return Envoy::Http::FilterHeadersStatus::Continue; @@ -39,7 +39,7 @@ HttpTimeTrackingFilter::decodeHeaders(Envoy::Http::RequestHeaderMap& headers, bo Envoy::Http::FilterDataStatus HttpTimeTrackingFilter::decodeData(Envoy::Buffer::Instance&, bool end_stream) { - if (end_stream && config_->maybeSendErrorReply(*decoder_callbacks_)) { + if (end_stream && config_->validateOrSendError(*decoder_callbacks_)) { return Envoy::Http::FilterDataStatus::StopIterationNoBuffer; } return Envoy::Http::FilterDataStatus::Continue; @@ -47,7 +47,8 @@ Envoy::Http::FilterDataStatus HttpTimeTrackingFilter::decodeData(Envoy::Buffer:: Envoy::Http::FilterHeadersStatus HttpTimeTrackingFilter::encodeHeaders(Envoy::Http::ResponseHeaderMap& response_headers, bool) { - const auto effective_config = config_->getEffectiveConfiguration(); + const absl::StatusOr effective_config = + config_->getEffectiveConfiguration(); if (effective_config.ok()) { const std::string previous_request_delta_in_response_header = effective_config.value()->emit_previous_request_delta_in_response_header(); diff --git a/source/server/http_time_tracking_filter_config.cc b/source/server/http_time_tracking_filter_config.cc index 76adb0bb1..e9e3e38d0 100644 --- a/source/server/http_time_tracking_filter_config.cc +++ b/source/server/http_time_tracking_filter_config.cc @@ -7,6 +7,7 @@ #include "api/server/response_options.pb.h" #include "api/server/response_options.pb.validate.h" +#include "server/configuration.h" #include "server/http_time_tracking_filter.h" namespace Nighthawk { @@ -21,10 +22,11 @@ class HttpTimeTrackingFilterConfig Envoy::Server::Configuration::FactoryContext& context) override { Envoy::ProtobufMessage::ValidationVisitor& validation_visitor = Envoy::ProtobufMessage::getStrictValidationVisitor(); - return createFilter( + const nighthawk::server::ResponseOptions& response_options = Envoy::MessageUtil::downcastAndValidate( - proto_config, validation_visitor), - context); + proto_config, validation_visitor); + validateResponseOptions(response_options); + return createFilter(response_options, context); } Envoy::ProtobufTypes::MessagePtr createEmptyConfigProto() override { diff --git a/test/BUILD b/test/BUILD index ecb8d5039..c3ee1093a 100644 --- a/test/BUILD +++ b/test/BUILD @@ -87,6 +87,7 @@ envoy_cc_test( "//test/mocks/client:mock_benchmark_client", "//test/mocks/client:mock_options", "//test/mocks/common:mock_termination_predicate", + "//test/test_common:environment_lib", "@envoy//test/mocks/event:event_mocks", "@envoy//test/mocks/tracing:tracing_mocks", "@envoy//test/test_common:simulated_time_system_lib", @@ -153,6 +154,7 @@ envoy_cc_test( "//test/test_common:environment_lib", "@envoy//test/test_common:network_utility_lib", "@envoy//test/test_common:registry_lib", + "@envoy//test/test_common:simulated_time_system_lib", ], ) @@ -324,8 +326,11 @@ envoy_cc_test( srcs = ["request_stream_grpc_client_test.cc"], repository = "@envoy", deps = [ + "//api/request_source:grpc_request_source_service_lib", "//source/common:request_stream_grpc_client_lib", "@envoy//test/test_common:utility_lib", + "@envoy_api//envoy/api/v2/core:pkg_cc_proto", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) diff --git a/test/adaptive_load/BUILD b/test/adaptive_load/BUILD index b831726cc..902e744b0 100644 --- a/test/adaptive_load/BUILD +++ b/test/adaptive_load/BUILD @@ -21,6 +21,28 @@ envoy_cc_test_library( ], ) +envoy_cc_test( + name = "adaptive_load_client_main_test", + srcs = ["adaptive_load_client_main_test.cc"], + data = [ + "test_data/golden_output.textproto", + "test_data/invalid_session_spec.textproto", + "test_data/valid_session_spec.textproto", + ], + repository = "@envoy", + deps = [ + ":minimal_output", + "//include/nighthawk/adaptive_load:adaptive_load_controller", + "//source/adaptive_load:adaptive_load_client_main", + "//test/mocks/adaptive_load:mock_adaptive_load_controller", + "//test/test_common:environment_lib", + "@com_github_grpc_grpc//:grpc++_test", + "@envoy//source/common/protobuf:utility_lib_with_external_headers", + "@envoy//test/mocks/filesystem:filesystem_mocks", + "@envoy//test/test_common:utility_lib", + ], +) + envoy_cc_test( name = "adaptive_load_controller_test", srcs = ["adaptive_load_controller_test.cc"], diff --git a/test/adaptive_load/adaptive_load_client_main_test.cc b/test/adaptive_load/adaptive_load_client_main_test.cc new file mode 100644 index 000000000..3ecef25bd --- /dev/null +++ b/test/adaptive_load/adaptive_load_client_main_test.cc @@ -0,0 +1,379 @@ +#include "envoy/api/io_error.h" +#include "envoy/filesystem/filesystem.h" + +#include "nighthawk/adaptive_load/adaptive_load_controller.h" +#include "nighthawk/common/exception.h" + +#include "external/envoy/test/mocks/filesystem/mocks.h" +#include "external/envoy/test/test_common/file_system_for_test.h" +#include "external/envoy/test/test_common/utility.h" + +#include "api/adaptive_load/adaptive_load.pb.h" +#include "api/adaptive_load/benchmark_result.pb.h" + +#include "test/adaptive_load/minimal_output.h" +#include "test/mocks/adaptive_load/mock_adaptive_load_controller.h" +#include "test/test_common/environment.h" + +#include "absl/strings/string_view.h" +#include "adaptive_load/adaptive_load_client_main.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Nighthawk { + +namespace { + +using ::testing::_; +using ::testing::ByMove; +using ::testing::DoAll; +using ::testing::Eq; +using ::testing::HasSubstr; +using ::testing::NiceMock; +using ::testing::Return; +using ::testing::SetArgPointee; + +/** + * Envoy IO error value to simulate filesystem errors. + */ +class UnknownIoError : public Envoy::Api::IoError { +public: + IoErrorCode getErrorCode() const override { + return Envoy::Api::IoError::IoErrorCode::UnknownError; + } + std::string getErrorDetails() const override { return "unknown error details"; } +}; + +/** + * Creates a minimal valid output that matches test/adaptive_load/test_data/golden_output.textproto. + * + * @return AdaptiveLoadSessionOutput + */ +nighthawk::adaptive_load::AdaptiveLoadSessionOutput MakeBasicAdaptiveLoadSessionOutput() { + nighthawk::adaptive_load::AdaptiveLoadSessionOutput output; + nighthawk::adaptive_load::MetricEvaluation* evaluation = + output.mutable_adjusting_stage_results()->Add()->add_metric_evaluations(); + evaluation->set_metric_id("com.a/b"); + evaluation->set_metric_value(123); + return output; +} + +TEST(AdaptiveLoadClientMainTest, FailsWithNoInputs) { + const std::vector argv = { + "executable-name-here", + }; + NiceMock controller; + Envoy::Filesystem::Instance& filesystem = Envoy::Filesystem::fileSystemForTest(); + + EXPECT_THROW_WITH_REGEX(AdaptiveLoadClientMain(1, argv.data(), controller, filesystem), + Nighthawk::Client::MalformedArgvException, "Required arguments missing"); +} + +TEST(AdaptiveLoadClientMainTest, FailsIfSpecFileNotSet) { + std::string outfile = Nighthawk::TestEnvironment::runfilesPath("unused.textproto"); + const std::vector argv = { + "executable-name-here", + "--output-file", + outfile.c_str(), + }; + + NiceMock controller; + Envoy::Filesystem::Instance& filesystem = Envoy::Filesystem::fileSystemForTest(); + + EXPECT_THROW_WITH_REGEX(AdaptiveLoadClientMain(3, argv.data(), controller, filesystem), + Nighthawk::Client::MalformedArgvException, + "Required argument missing: spec-file"); +} + +TEST(AdaptiveLoadClientMainTest, FailsIfOutputFileNotSet) { + std::string infile = Nighthawk::TestEnvironment::runfilesPath("unused.textproto"); + const std::vector argv = { + "executable-name-here", + "--spec-file", + infile.c_str(), + }; + + NiceMock controller; + Envoy::Filesystem::Instance& filesystem = Envoy::Filesystem::fileSystemForTest(); + + EXPECT_THROW_WITH_REGEX(AdaptiveLoadClientMain main(3, argv.data(), controller, filesystem), + Nighthawk::Client::MalformedArgvException, + "Required argument missing: output-file"); +} + +TEST(AdaptiveLoadClientMainTest, FailsWithNonexistentInputFile) { + std::string infile = Nighthawk::TestEnvironment::runfilesPath("nonexistent.textproto"); + std::string outfile = Nighthawk::TestEnvironment::runfilesPath("unused.textproto"); + const std::vector argv = { + "executable-name-here", "--spec-file", infile.c_str(), "--output-file", outfile.c_str(), + }; + + NiceMock controller; + Envoy::Filesystem::Instance& filesystem = Envoy::Filesystem::fileSystemForTest(); + + AdaptiveLoadClientMain main(5, argv.data(), controller, filesystem); + EXPECT_THROW_WITH_REGEX(main.Run(), Nighthawk::NighthawkException, + "Failed to read spec textproto file"); +} + +TEST(AdaptiveLoadClientMainTest, FailsWithUnparseableInputFile) { + std::string infile = Nighthawk::TestEnvironment::runfilesPath( + "test/adaptive_load/test_data/invalid_session_spec.textproto"); + std::string outfile = Nighthawk::TestEnvironment::runfilesPath("unused.textproto"); + const std::vector argv = { + "executable-name-here", "--spec-file", infile.c_str(), "--output-file", outfile.c_str(), + }; + + NiceMock controller; + Envoy::Filesystem::Instance& filesystem = Envoy::Filesystem::fileSystemForTest(); + + AdaptiveLoadClientMain main(5, argv.data(), controller, filesystem); + EXPECT_THROW_WITH_REGEX(main.Run(), Nighthawk::NighthawkException, "Unable to parse file"); +} + +TEST(AdaptiveLoadClientMainTest, ExitsProcessWithNonzeroStatusOnAdaptiveControllerError) { + std::string infile = Nighthawk::TestEnvironment::runfilesPath( + "test/adaptive_load/test_data/valid_session_spec.textproto"); + std::string outfile = Nighthawk::TestEnvironment::runfilesPath( + "test/adaptive_load/test_data/nonexistent-dir/out.textproto"); + const std::vector argv = { + "executable-name-here", "--spec-file", infile.c_str(), "--output-file", outfile.c_str(), + }; + + MockAdaptiveLoadController controller; + EXPECT_CALL(controller, PerformAdaptiveLoadSession(_, _)) + .WillOnce(Return(absl::DataLossError("error message"))); + Envoy::Filesystem::Instance& filesystem = Envoy::Filesystem::fileSystemForTest(); + + AdaptiveLoadClientMain main(5, argv.data(), controller, filesystem); + EXPECT_EQ(main.Run(), 1); +} + +TEST(AdaptiveLoadClientMainTest, FailsIfCreatingOutputFileFails) { + std::string infile = Nighthawk::TestEnvironment::runfilesPath( + "test/adaptive_load/test_data/valid_session_spec.textproto"); + std::string outfile = Nighthawk::TestEnvironment::runfilesPath( + "test/adaptive_load/test_data/nonexistent-dir/out.textproto"); + const std::vector argv = { + "executable-name-here", "--spec-file", infile.c_str(), "--output-file", outfile.c_str(), + }; + + MockAdaptiveLoadController controller; + EXPECT_CALL(controller, PerformAdaptiveLoadSession(_, _)) + .WillOnce(Return(MakeBasicAdaptiveLoadSessionOutput())); + Envoy::Filesystem::Instance& filesystem = Envoy::Filesystem::fileSystemForTest(); + + AdaptiveLoadClientMain main(5, argv.data(), controller, filesystem); + EXPECT_THROW_WITH_REGEX(main.Run(), Nighthawk::NighthawkException, "Unable to open output file"); +} + +TEST(AdaptiveLoadClientMainTest, FailsIfOpeningOutputFileFails) { + const std::vector argv = { + "executable-name-here", "--spec-file", "in-dummy.textproto", + "--output-file", "out-dummy.textproto", + }; + + MockAdaptiveLoadController controller; + EXPECT_CALL(controller, PerformAdaptiveLoadSession(_, _)) + .WillOnce(Return(MakeBasicAdaptiveLoadSessionOutput())); + + NiceMock filesystem; + + std::string infile_contents = + Envoy::Filesystem::fileSystemForTest().fileReadToEnd(Nighthawk::TestEnvironment::runfilesPath( + std::string("test/adaptive_load/test_data/valid_session_spec.textproto"))); + EXPECT_CALL(filesystem, fileReadToEnd(_)).WillOnce(Return(infile_contents)); + + auto* mock_file = new NiceMock; + EXPECT_CALL(filesystem, createFile(_)) + .WillOnce(Return(ByMove(std::unique_ptr>(mock_file)))); + + EXPECT_CALL(*mock_file, open_(_)) + .WillOnce(Return(ByMove(Envoy::Api::IoCallBoolResult( + false, Envoy::Api::IoErrorPtr(new UnknownIoError(), + [](Envoy::Api::IoError* err) { delete err; }))))); + + AdaptiveLoadClientMain main(5, argv.data(), controller, filesystem); + EXPECT_THROW_WITH_REGEX(main.Run(), Nighthawk::NighthawkException, "Unable to open output file"); +} + +TEST(AdaptiveLoadClientMainTest, FailsIfWritingOutputFileFails) { + const std::vector argv = { + "executable-name-here", "--spec-file", "in-dummy.textproto", + "--output-file", "out-dummy.textproto", + }; + + MockAdaptiveLoadController controller; + EXPECT_CALL(controller, PerformAdaptiveLoadSession(_, _)) + .WillOnce(Return(MakeBasicAdaptiveLoadSessionOutput())); + + NiceMock filesystem; + + std::string infile_contents = + Envoy::Filesystem::fileSystemForTest().fileReadToEnd(Nighthawk::TestEnvironment::runfilesPath( + std::string("test/adaptive_load/test_data/valid_session_spec.textproto"))); + EXPECT_CALL(filesystem, fileReadToEnd(_)).WillOnce(Return(infile_contents)); + + auto* mock_file = new NiceMock; + EXPECT_CALL(filesystem, createFile(_)) + .WillOnce(Return(ByMove(std::unique_ptr>(mock_file)))); + + EXPECT_CALL(*mock_file, open_(_)) + .WillOnce(Return(ByMove(Envoy::Api::IoCallBoolResult( + true, Envoy::Api::IoErrorPtr(nullptr, [](Envoy::Api::IoError*) {}))))); + EXPECT_CALL(*mock_file, write_(_)) + .WillOnce(Return(ByMove(Envoy::Api::IoCallSizeResult( + -1, Envoy::Api::IoErrorPtr(new UnknownIoError(), + [](Envoy::Api::IoError* err) { delete err; }))))); + AdaptiveLoadClientMain main(5, argv.data(), controller, filesystem); + EXPECT_THROW_WITH_REGEX(main.Run(), Nighthawk::NighthawkException, + "Unable to write to output file"); +} + +TEST(AdaptiveLoadClientMainTest, FailsIfClosingOutputFileFails) { + const std::vector argv = { + "executable-name-here", "--spec-file", "in-dummy.textproto", + "--output-file", "out-dummy.textproto", + }; + + MockAdaptiveLoadController controller; + EXPECT_CALL(controller, PerformAdaptiveLoadSession(_, _)) + .WillOnce(Return(MakeBasicAdaptiveLoadSessionOutput())); + + NiceMock filesystem; + + std::string infile_contents = + Envoy::Filesystem::fileSystemForTest().fileReadToEnd(Nighthawk::TestEnvironment::runfilesPath( + std::string("test/adaptive_load/test_data/valid_session_spec.textproto"))); + EXPECT_CALL(filesystem, fileReadToEnd(_)).WillOnce(Return(infile_contents)); + + auto* mock_file = new NiceMock; + EXPECT_CALL(filesystem, createFile(_)) + .WillOnce(Return(ByMove(std::unique_ptr>(mock_file)))); + + EXPECT_CALL(*mock_file, open_(_)) + .WillOnce(Return(ByMove(Envoy::Api::IoCallBoolResult( + true, Envoy::Api::IoErrorPtr(nullptr, [](Envoy::Api::IoError*) {}))))); + EXPECT_CALL(*mock_file, write_(_)) + .WillRepeatedly(Invoke([](absl::string_view data) -> Envoy::Api::IoCallSizeResult { + return Envoy::Api::IoCallSizeResult( + static_cast(data.length()), + Envoy::Api::IoErrorPtr(nullptr, [](Envoy::Api::IoError*) {})); + })); + EXPECT_CALL(*mock_file, close_()) + .WillOnce(Return(ByMove(Envoy::Api::IoCallBoolResult( + false, Envoy::Api::IoErrorPtr(new UnknownIoError(), + [](Envoy::Api::IoError* err) { delete err; }))))); + + AdaptiveLoadClientMain main(5, argv.data(), controller, filesystem); + EXPECT_THROW_WITH_REGEX(main.Run(), Nighthawk::NighthawkException, "Unable to close output file"); +} + +TEST(AdaptiveLoadClientMainTest, WritesOutputProtoToFile) { + const std::vector argv = { + "executable-name-here", "--spec-file", "in-dummy.textproto", + "--output-file", "out-dummy.textproto", + }; + + MockAdaptiveLoadController controller; + EXPECT_CALL(controller, PerformAdaptiveLoadSession(_, _)) + .WillOnce(Return(MakeBasicAdaptiveLoadSessionOutput())); + + NiceMock filesystem; + + std::string infile_contents = + Envoy::Filesystem::fileSystemForTest().fileReadToEnd(Nighthawk::TestEnvironment::runfilesPath( + std::string("test/adaptive_load/test_data/valid_session_spec.textproto"))); + EXPECT_CALL(filesystem, fileReadToEnd(_)).WillOnce(Return(infile_contents)); + + std::string actual_outfile_contents; + auto* mock_file = new NiceMock; + EXPECT_CALL(filesystem, createFile(_)) + .WillOnce(Return(ByMove(std::unique_ptr>(mock_file)))); + + EXPECT_CALL(*mock_file, open_(_)) + .WillOnce(Return(ByMove(Envoy::Api::IoCallBoolResult( + true, Envoy::Api::IoErrorPtr(nullptr, [](Envoy::Api::IoError*) {}))))); + EXPECT_CALL(*mock_file, write_(_)) + .WillRepeatedly(Invoke( + [&actual_outfile_contents](absl::string_view data) -> Envoy::Api::IoCallSizeResult { + actual_outfile_contents += data; + return Envoy::Api::IoCallSizeResult( + static_cast(data.length()), + Envoy::Api::IoErrorPtr(nullptr, [](Envoy::Api::IoError*) {})); + })); + + EXPECT_CALL(*mock_file, close_()) + .WillOnce(Return(ByMove(Envoy::Api::IoCallBoolResult( + true, Envoy::Api::IoErrorPtr(nullptr, [](Envoy::Api::IoError*) {}))))); + + AdaptiveLoadClientMain main(5, argv.data(), controller, filesystem); + main.Run(); + + std::string golden_output = + Envoy::Filesystem::fileSystemForTest().fileReadToEnd(Nighthawk::TestEnvironment::runfilesPath( + std::string("test/adaptive_load/test_data/golden_output.textproto"))); + EXPECT_EQ(actual_outfile_contents, golden_output); +} + +TEST(AdaptiveLoadClientMainTest, DefaultsToInsecureConnection) { + const std::vector argv = { + "executable-name-here", "--spec-file", "a", "--output-file", "b", + }; + + NiceMock controller; + Envoy::Filesystem::Instance& filesystem = Envoy::Filesystem::fileSystemForTest(); + + AdaptiveLoadClientMain main(5, argv.data(), controller, filesystem); + + EXPECT_THAT(main.DescribeInputs(), HasSubstr("insecure")); +} + +TEST(AdaptiveLoadClientMainTest, UsesTlsConnectionWhenSpecified) { + const std::vector argv = { + "executable-name-here", "--use-tls", "--spec-file", "a", "--output-file", "b", + }; + + NiceMock controller; + Envoy::Filesystem::Instance& filesystem = Envoy::Filesystem::fileSystemForTest(); + + AdaptiveLoadClientMain main(6, argv.data(), controller, filesystem); + + EXPECT_THAT(main.DescribeInputs(), HasSubstr("TLS")); +} + +TEST(AdaptiveLoadClientMainTest, UsesDefaultNighthawkServiceAddress) { + const std::vector argv = { + "executable-name-here", "--spec-file", "a", "--output-file", "b", + }; + + NiceMock controller; + Envoy::Filesystem::Instance& filesystem = Envoy::Filesystem::fileSystemForTest(); + + AdaptiveLoadClientMain main(5, argv.data(), controller, filesystem); + + EXPECT_THAT(main.DescribeInputs(), HasSubstr("localhost:8443")); +} + +TEST(AdaptiveLoadClientMainTest, UsesCustomNighthawkServiceAddress) { + const std::vector argv = { + "executable-name-here", + "--nighthawk-service-address", + "1.2.3.4:5678", + "--spec-file", + "a", + "--output-file", + "b", + }; + + NiceMock controller; + Envoy::Filesystem::Instance& filesystem = Envoy::Filesystem::fileSystemForTest(); + + AdaptiveLoadClientMain main(7, argv.data(), controller, filesystem); + + EXPECT_THAT(main.DescribeInputs(), HasSubstr("1.2.3.4:5678")); +} + +} // namespace + +} // namespace Nighthawk diff --git a/test/adaptive_load/test_data/golden_output.textproto b/test/adaptive_load/test_data/golden_output.textproto new file mode 100644 index 000000000..af90ac058 --- /dev/null +++ b/test/adaptive_load/test_data/golden_output.textproto @@ -0,0 +1,6 @@ +adjusting_stage_results { + metric_evaluations { + metric_id: "com.a/b" + metric_value: 123 + } +} diff --git a/test/adaptive_load/test_data/invalid_session_spec.textproto b/test/adaptive_load/test_data/invalid_session_spec.textproto new file mode 100644 index 000000000..5f82458f7 --- /dev/null +++ b/test/adaptive_load/test_data/invalid_session_spec.textproto @@ -0,0 +1 @@ +Bogus text will not parse as a textproto. diff --git a/test/adaptive_load/test_data/valid_session_spec.textproto b/test/adaptive_load/test_data/valid_session_spec.textproto new file mode 100644 index 000000000..9896f3593 --- /dev/null +++ b/test/adaptive_load/test_data/valid_session_spec.textproto @@ -0,0 +1,98 @@ +nighthawk_traffic_template { + # This is a full Nighthawk CommandLineOptions proto, input to Nighthawk Service. + # The adaptive load controller will send this proto as-is to the Nighthawk Service, + # except with a dynamic requests_per_second value inserted. + # + # See https://github.com/envoyproxy/nighthawk/blob/main/api/client/options.proto + # for full details on available settings. + # + # Note that the address of the system under test is set here. + # + # Add any customizations here such as headers. + uri { + value: "http://127.0.0.1:12345/" + } +} + +# 3 seconds per benchmark is good for demos. +# 60 seconds per benchmark has been found to reduce noise. +measuring_period { + seconds: 3 +} + +convergence_deadline { + seconds: 1000 +} + +# Confirm the final qps with a longer benchmark. +testing_stage_duration { + seconds: 10 +} + +# Back off if the latency statistic is over a threshold. +# See https://github.com/envoyproxy/nighthawk/blob/9ade1a58c787e4d0e165cabbb42f6a410a56a865/source/adaptive_load/metrics_plugin_impl.cc#L148 +# for the full list of supported metrics. +metric_thresholds { + # metric_spec { metric_name: "latency-ns-mean-plus-2stdev" } + metric_spec { metric_name: "latency-ns-max" } + threshold_spec { + scoring_function { + name: "nighthawk.binary_scoring" + typed_config { + [type.googleapis.com/ + nighthawk.adaptive_load.BinaryScoringFunctionConfig] { + # 1,000,000 ns = 1 ms + upper_threshold { value: 1000000 } + } + } + } + } +} + +# Back off if Nighthawk internal limitations prevented sending >5% of requests. +metric_thresholds { + metric_spec { + metric_name: "send-rate" + } + threshold_spec { + scoring_function { + name: "nighthawk.binary_scoring" + typed_config { + [type.googleapis.com/nighthawk.adaptive_load.BinaryScoringFunctionConfig] { + lower_threshold { + value: 0.95 + } + } + } + } + } +} + +# Back off if less than 95% of received responses were 2xx. +metric_thresholds { + metric_spec { + metric_name: "success-rate" + } + threshold_spec { + scoring_function { + name: "nighthawk.binary_scoring" + typed_config { + [type.googleapis.com/nighthawk.adaptive_load.BinaryScoringFunctionConfig] { + lower_threshold { + value: 0.95 + } + } + } + } + } +} + +# Exponential search starting with 10 qps +step_controller_config { + name: "nighthawk.exponential_search" + typed_config { + [type.googleapis.com/nighthawk.adaptive_load.ExponentialSearchStepControllerConfig] { + initial_value: 10.0 + } + } +} diff --git a/test/benchmark_http_client_test.cc b/test/benchmark_http_client_test.cc index f61f0f8a6..b473db323 100644 --- a/test/benchmark_http_client_test.cc +++ b/test/benchmark_http_client_test.cc @@ -67,10 +67,10 @@ class BenchmarkClientHttpTest : public Test { {":scheme", "http"}, {":method", "GET"}, {":path", "/"}, {":host", "localhost"}}; default_header_map_ = (std::make_shared(header_map_param)); - EXPECT_CALL(cluster_manager(), httpConnPoolForCluster(_, _, _, _)) - .WillRepeatedly(Return(&pool_)); - EXPECT_CALL(cluster_manager(), get(_)).WillRepeatedly(Return(&thread_local_cluster_)); + EXPECT_CALL(cluster_manager(), getThreadLocalCluster(_)) + .WillRepeatedly(Return(&thread_local_cluster_)); EXPECT_CALL(thread_local_cluster_, info()).WillRepeatedly(Return(cluster_info_)); + EXPECT_CALL(thread_local_cluster_, httpConnPool(_, _, _)).WillRepeatedly(Return(&pool_)); auto& tracer = static_cast(*http_tracer_); EXPECT_CALL(tracer, startSpan_(_, _, _, _)) @@ -110,6 +110,7 @@ class BenchmarkClientHttpTest : public Test { .WillByDefault( WithArgs<0>(([&called_headers](const Envoy::Http::RequestHeaderMap& specific_request) { called_headers.insert(getPathFromRequest(specific_request)); + return Envoy::Http::Status(); }))); EXPECT_CALL(pool_, newStream(_, _)) @@ -119,7 +120,7 @@ class BenchmarkClientHttpTest : public Test { decoders_.push_back(&decoder); NiceMock stream_info; callbacks.onPoolReady(stream_encoder_, Envoy::Upstream::HostDescriptionConstSharedPtr{}, - stream_info); + stream_info, {} /*absl::optional protocol*/); return nullptr; }); @@ -215,7 +216,7 @@ class BenchmarkClientHttpTest : public Test { int worker_number_{0}; Client::BenchmarkClientStatistic statistic_; std::shared_ptr default_header_map_; -}; // namespace Nighthawk +}; TEST_F(BenchmarkClientHttpTest, BasicTestH1200) { response_code_ = "200"; @@ -357,7 +358,7 @@ TEST_F(BenchmarkClientHttpTest, RequestMethodPost) { return std::make_unique(header); }; - EXPECT_CALL(stream_encoder_, encodeData(_, _)).Times(1); + EXPECT_CALL(stream_encoder_, encodeData(_, _)); auto client_setup_parameters = ClientSetupParameters(1, 1, 1, request_generator); verifyBenchmarkClientProcessesExpectedInflightRequests(client_setup_parameters); EXPECT_EQ(1, getCounter("http_2xx")); @@ -418,4 +419,28 @@ TEST_F(BenchmarkClientHttpTest, RequestGeneratorProvidingDifferentPathsSendsRequ &expected_requests); EXPECT_EQ(2, getCounter("http_2xx")); } + +TEST_F(BenchmarkClientHttpTest, DrainTimeoutFires) { + RequestGenerator default_request_generator = getDefaultRequestGenerator(); + setupBenchmarkClient(default_request_generator); + EXPECT_CALL(pool_, newStream(_, _)) + .WillOnce( + [this](Envoy::Http::ResponseDecoder& decoder, Envoy::Http::ConnectionPool::Callbacks&) + -> Envoy::Http::ConnectionPool::Cancellable* { + // The decoder self-terminates in normal operation, but in this test that won't + // happen. Se we delete it ourselves. Note that we run our integration test with + // asan, so any leaks in real usage ought to be caught there. + delete &decoder; + client_->terminate(); + return nullptr; + }); + EXPECT_CALL(pool_, hasActiveConnections()).WillOnce([]() -> bool { return true; }); + EXPECT_CALL(pool_, addDrainedCallback(_)); + // We don't expect the callback that we pass here to fire. + client_->tryStartRequest([](bool, bool) { EXPECT_TRUE(false); }); + // To get past this, the drain timeout within the benchmark client must execute. + dispatcher_->run(Envoy::Event::Dispatcher::RunType::Block); + EXPECT_EQ(0, getCounter("http_2xx")); +} + } // namespace Nighthawk diff --git a/test/client_worker_test.cc b/test/client_worker_test.cc index 5f00cc723..e143b9635 100644 --- a/test/client_worker_test.cc +++ b/test/client_worker_test.cc @@ -54,7 +54,7 @@ class ClientWorkerTest : public Test { EXPECT_CALL(request_generator_factory_, create(_, _, _, _)) .Times(1) .WillOnce(Return(ByMove(std::unique_ptr(request_generator_)))); - EXPECT_CALL(*request_generator_, initOnThread()).Times(1); + EXPECT_CALL(*request_generator_, initOnThread()); EXPECT_CALL(termination_predicate_factory_, create(_, _, _)) .WillOnce(Return(ByMove(createMockTerminationPredicate()))); @@ -105,26 +105,26 @@ TEST_F(ClientWorkerTest, BasicTest) { { InSequence dummy; - EXPECT_CALL(*benchmark_client_, setShouldMeasureLatencies(false)).Times(1); + EXPECT_CALL(*benchmark_client_, setShouldMeasureLatencies(false)); EXPECT_CALL(*benchmark_client_, tryStartRequest(_)) .WillOnce(Invoke(this, &ClientWorkerTest::CheckThreadChanged)); - EXPECT_CALL(*benchmark_client_, setShouldMeasureLatencies(true)).Times(1); - EXPECT_CALL(*sequencer_, start).Times(1); - EXPECT_CALL(*sequencer_, waitForCompletion).Times(1); - EXPECT_CALL(*benchmark_client_, terminate()).Times(1); + EXPECT_CALL(*benchmark_client_, setShouldMeasureLatencies(true)); + EXPECT_CALL(*sequencer_, start); + EXPECT_CALL(*sequencer_, waitForCompletion); + EXPECT_CALL(*benchmark_client_, terminate()); } int worker_number = 12345; auto worker = std::make_unique( *api_, tls_, cluster_manager_ptr_, benchmark_client_factory_, termination_predicate_factory_, sequencer_factory_, request_generator_factory_, store_, worker_number, - time_system_.systemTime(), http_tracer_, ClientWorkerImpl::HardCodedWarmupStyle::ON); + time_system_.monotonicTime(), http_tracer_, ClientWorkerImpl::HardCodedWarmupStyle::ON); worker->start(); worker->waitForCompletion(); - EXPECT_CALL(*benchmark_client_, statistics()).Times(1).WillOnce(Return(createStatisticPtrMap())); - EXPECT_CALL(*sequencer_, statistics()).Times(1).WillOnce(Return(createStatisticPtrMap())); + EXPECT_CALL(*benchmark_client_, statistics()).WillOnce(Return(createStatisticPtrMap())); + EXPECT_CALL(*sequencer_, statistics()).WillOnce(Return(createStatisticPtrMap())); auto statistics = worker->statistics(); EXPECT_EQ(2, statistics.size()); diff --git a/test/common/BUILD b/test/common/BUILD index 4c0547785..54c96ad4d 100644 --- a/test/common/BUILD +++ b/test/common/BUILD @@ -38,3 +38,12 @@ envoy_cc_test( "@com_github_grpc_grpc//:grpc++_test", ], ) + +envoy_cc_test( + name = "signal_handler_test", + srcs = ["signal_handler_test.cc"], + repository = "@envoy", + deps = [ + "//source/common:nighthawk_common_lib", + ], +) diff --git a/test/common/nighthawk_service_client_test.cc b/test/common/nighthawk_service_client_test.cc index b9a385857..f74e7d44f 100644 --- a/test/common/nighthawk_service_client_test.cc +++ b/test/common/nighthawk_service_client_test.cc @@ -33,7 +33,7 @@ TEST(PerformNighthawkBenchmark, UsesSpecifiedCommandLineOptions) { // Configure the mock Nighthawk Service stub to return an inner mock channel when the code under // test requests a channel. Set call expectations on the inner mock channel. EXPECT_CALL(mock_nighthawk_service_stub, ExecutionStreamRaw) - .WillOnce([&request](grpc_impl::ClientContext*) { + .WillOnce([&request](grpc::ClientContext*) { auto* mock_reader_writer = new grpc::testing::MockClientReaderWriter(); // PerformNighthawkBenchmark currently expects Read to return true exactly once. @@ -61,7 +61,7 @@ TEST(PerformNighthawkBenchmark, ReturnsNighthawkResponseSuccessfully) { // Configure the mock Nighthawk Service stub to return an inner mock channel when the code under // test requests a channel. Set call expectations on the inner mock channel. EXPECT_CALL(mock_nighthawk_service_stub, ExecutionStreamRaw) - .WillOnce([&expected_response](grpc_impl::ClientContext*) { + .WillOnce([&expected_response](grpc::ClientContext*) { auto* mock_reader_writer = new grpc::testing::MockClientReaderWriter(); // PerformNighthawkBenchmark currently expects Read to return true exactly once. @@ -88,15 +88,14 @@ TEST(PerformNighthawkBenchmark, ReturnsErrorIfNighthawkServiceDoesNotSendRespons nighthawk::client::MockNighthawkServiceStub mock_nighthawk_service_stub; // Configure the mock Nighthawk Service stub to return an inner mock channel when the code under // test requests a channel. Set call expectations on the inner mock channel. - EXPECT_CALL(mock_nighthawk_service_stub, ExecutionStreamRaw) - .WillOnce([](grpc_impl::ClientContext*) { - auto* mock_reader_writer = - new grpc::testing::MockClientReaderWriter(); - EXPECT_CALL(*mock_reader_writer, Read(_)).WillOnce(Return(false)); - EXPECT_CALL(*mock_reader_writer, Write(_, _)).WillOnce(Return(true)); - EXPECT_CALL(*mock_reader_writer, WritesDone()).WillOnce(Return(true)); - return mock_reader_writer; - }); + EXPECT_CALL(mock_nighthawk_service_stub, ExecutionStreamRaw).WillOnce([](grpc::ClientContext*) { + auto* mock_reader_writer = + new grpc::testing::MockClientReaderWriter(); + EXPECT_CALL(*mock_reader_writer, Read(_)).WillOnce(Return(false)); + EXPECT_CALL(*mock_reader_writer, Write(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*mock_reader_writer, WritesDone()).WillOnce(Return(true)); + return mock_reader_writer; + }); NighthawkServiceClientImpl client; absl::StatusOr response_or = @@ -111,13 +110,12 @@ TEST(PerformNighthawkBenchmark, ReturnsErrorIfNighthawkServiceWriteFails) { nighthawk::client::MockNighthawkServiceStub mock_nighthawk_service_stub; // Configure the mock Nighthawk Service stub to return an inner mock channel when the code under // test requests a channel. Set call expectations on the inner mock channel. - EXPECT_CALL(mock_nighthawk_service_stub, ExecutionStreamRaw) - .WillOnce([](grpc_impl::ClientContext*) { - auto* mock_reader_writer = - new grpc::testing::MockClientReaderWriter(); - EXPECT_CALL(*mock_reader_writer, Write(_, _)).WillOnce(Return(false)); - return mock_reader_writer; - }); + EXPECT_CALL(mock_nighthawk_service_stub, ExecutionStreamRaw).WillOnce([](grpc::ClientContext*) { + auto* mock_reader_writer = + new grpc::testing::MockClientReaderWriter(); + EXPECT_CALL(*mock_reader_writer, Write(_, _)).WillOnce(Return(false)); + return mock_reader_writer; + }); NighthawkServiceClientImpl client; absl::StatusOr response_or = @@ -131,14 +129,13 @@ TEST(PerformNighthawkBenchmark, ReturnsErrorIfNighthawkServiceWritesDoneFails) { nighthawk::client::MockNighthawkServiceStub mock_nighthawk_service_stub; // Configure the mock Nighthawk Service stub to return an inner mock channel when the code under // test requests a channel. Set call expectations on the inner mock channel. - EXPECT_CALL(mock_nighthawk_service_stub, ExecutionStreamRaw) - .WillOnce([](grpc_impl::ClientContext*) { - auto* mock_reader_writer = - new grpc::testing::MockClientReaderWriter(); - EXPECT_CALL(*mock_reader_writer, Write(_, _)).WillOnce(Return(true)); - EXPECT_CALL(*mock_reader_writer, WritesDone()).WillOnce(Return(false)); - return mock_reader_writer; - }); + EXPECT_CALL(mock_nighthawk_service_stub, ExecutionStreamRaw).WillOnce([](grpc::ClientContext*) { + auto* mock_reader_writer = + new grpc::testing::MockClientReaderWriter(); + EXPECT_CALL(*mock_reader_writer, Write(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*mock_reader_writer, WritesDone()).WillOnce(Return(false)); + return mock_reader_writer; + }); NighthawkServiceClientImpl client; absl::StatusOr response_or = @@ -152,19 +149,18 @@ TEST(PerformNighthawkBenchmark, PropagatesErrorIfNighthawkServiceGrpcStreamClose nighthawk::client::MockNighthawkServiceStub mock_nighthawk_service_stub; // Configure the mock Nighthawk Service stub to return an inner mock channel when the code under // test requests a channel. Set call expectations on the inner mock channel. - EXPECT_CALL(mock_nighthawk_service_stub, ExecutionStreamRaw) - .WillOnce([](grpc_impl::ClientContext*) { - auto* mock_reader_writer = - new grpc::testing::MockClientReaderWriter(); - // PerformNighthawkBenchmark currently expects Read to return true exactly once. - EXPECT_CALL(*mock_reader_writer, Read(_)).WillOnce(Return(true)).WillOnce(Return(false)); - EXPECT_CALL(*mock_reader_writer, Write(_, _)).WillOnce(Return(true)); - EXPECT_CALL(*mock_reader_writer, WritesDone()).WillOnce(Return(true)); - EXPECT_CALL(*mock_reader_writer, Finish()) - .WillOnce( - Return(::grpc::Status(::grpc::PERMISSION_DENIED, "Finish failure status message"))); - return mock_reader_writer; - }); + EXPECT_CALL(mock_nighthawk_service_stub, ExecutionStreamRaw).WillOnce([](grpc::ClientContext*) { + auto* mock_reader_writer = + new grpc::testing::MockClientReaderWriter(); + // PerformNighthawkBenchmark currently expects Read to return true exactly once. + EXPECT_CALL(*mock_reader_writer, Read(_)).WillOnce(Return(true)).WillOnce(Return(false)); + EXPECT_CALL(*mock_reader_writer, Write(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*mock_reader_writer, WritesDone()).WillOnce(Return(true)); + EXPECT_CALL(*mock_reader_writer, Finish()) + .WillOnce( + Return(::grpc::Status(::grpc::PERMISSION_DENIED, "Finish failure status message"))); + return mock_reader_writer; + }); NighthawkServiceClientImpl client; absl::StatusOr response_or = diff --git a/test/common/signal_handler_test.cc b/test/common/signal_handler_test.cc new file mode 100644 index 000000000..cabb2251b --- /dev/null +++ b/test/common/signal_handler_test.cc @@ -0,0 +1,35 @@ +#include +#include + +#include "common/signal_handler.h" + +#include "gtest/gtest.h" + +namespace Nighthawk { +namespace { + +TEST(SignalHandlerTest, SignalGetsHandled) { + for (const auto signal : {SIGTERM, SIGINT}) { + bool signal_handled = false; + std::promise signal_all_threads_running; + + SignalHandler signal_handler([&signal_handled, &signal_all_threads_running]() { + signal_handled = true; + signal_all_threads_running.set_value(); + }); + std::raise(signal); + signal_all_threads_running.get_future().wait(); + EXPECT_TRUE(signal_handled); + } +} + +TEST(SignalHandlerTest, DestructDoesNotFireHandler) { + bool signal_handled = false; + { + SignalHandler signal_handler([&signal_handled]() { signal_handled = true; }); + } + EXPECT_FALSE(signal_handled); +} + +} // namespace +} // namespace Nighthawk diff --git a/test/factories_test.cc b/test/factories_test.cc index 1a410496a..b1f9cec12 100644 --- a/test/factories_test.cc +++ b/test/factories_test.cc @@ -11,6 +11,7 @@ #include "test/mocks/client/mock_benchmark_client.h" #include "test/mocks/client/mock_options.h" #include "test/mocks/common/mock_termination_predicate.h" +#include "test/test_common/environment.h" #include "gtest/gtest.h" @@ -35,15 +36,15 @@ class FactoriesTest : public Test { TEST_F(FactoriesTest, CreateBenchmarkClient) { BenchmarkClientFactoryImpl factory(options_); Envoy::Upstream::ClusterManagerPtr cluster_manager; - EXPECT_CALL(options_, connections()).Times(1); - EXPECT_CALL(options_, h2()).Times(1); - EXPECT_CALL(options_, maxPendingRequests()).Times(1); - EXPECT_CALL(options_, maxActiveRequests()).Times(1); - EXPECT_CALL(options_, maxRequestsPerConnection()).Times(1); - EXPECT_CALL(options_, openLoop()).Times(1); - EXPECT_CALL(options_, responseHeaderWithLatencyInput()).Times(1); + EXPECT_CALL(options_, connections()); + EXPECT_CALL(options_, h2()); + EXPECT_CALL(options_, maxPendingRequests()); + EXPECT_CALL(options_, maxActiveRequests()); + EXPECT_CALL(options_, maxRequestsPerConnection()); + EXPECT_CALL(options_, openLoop()); + EXPECT_CALL(options_, responseHeaderWithLatencyInput()); auto cmd = std::make_unique(); - EXPECT_CALL(options_, toCommandLineOptions()).Times(1).WillOnce(Return(ByMove(std::move(cmd)))); + EXPECT_CALL(options_, toCommandLineOptions()).WillOnce(Return(ByMove(std::move(cmd)))); StaticRequestSourceImpl request_generator( std::make_unique()); auto benchmark_client = @@ -52,20 +53,125 @@ TEST_F(FactoriesTest, CreateBenchmarkClient) { EXPECT_NE(nullptr, benchmark_client.get()); } +TEST_F(FactoriesTest, CreateRequestSourcePluginWithWorkingJsonReturnsWorkingRequestSource) { + absl::optional request_source_plugin_config; + std::string request_source_plugin_config_json = + "{" + "name:\"nighthawk.in-line-options-list-request-source-plugin\"," + "typed_config:{" + "\"@type\":\"type.googleapis.com/" + "nighthawk.request_source.InLineOptionsListRequestSourceConfig\"," + "options_list:{" + "options:[{request_method:\"1\",request_headers:[{header:{key:\":path\",value:\"inlinepath\"}" + "}]}]" + "}," + "}" + "}"; + request_source_plugin_config.emplace(envoy::config::core::v3::TypedExtensionConfig()); + Envoy::MessageUtil::loadFromJson(request_source_plugin_config_json, + request_source_plugin_config.value(), + Envoy::ProtobufMessage::getStrictValidationVisitor()); + EXPECT_CALL(options_, requestMethod()); + EXPECT_CALL(options_, requestBodySize()); + EXPECT_CALL(options_, uri()).Times(2).WillRepeatedly(Return("http://foo/")); + EXPECT_CALL(options_, requestSource()); + EXPECT_CALL(options_, requestSourcePluginConfig()) + .Times(2) + .WillRepeatedly(ReturnRef(request_source_plugin_config)); + auto cmd = std::make_unique(); + envoy::config::core::v3::HeaderValueOption* request_headers = + cmd->mutable_request_options()->add_request_headers(); + request_headers->mutable_header()->set_key("foo"); + request_headers->mutable_header()->set_value("bar"); + EXPECT_CALL(options_, toCommandLineOptions()).WillOnce(Return(ByMove(std::move(cmd)))); + RequestSourceFactoryImpl factory(options_, *api_); + Envoy::Upstream::ClusterManagerPtr cluster_manager; + Nighthawk::RequestSourcePtr request_source = factory.create( + cluster_manager, dispatcher_, *stats_store_.createScope("foo."), "requestsource"); + EXPECT_NE(nullptr, request_source.get()); + Nighthawk::RequestGenerator generator = request_source->get(); + Nighthawk::RequestPtr request = generator(); + EXPECT_EQ("inlinepath", request->header()->getPathValue()); +} + +TEST_F(FactoriesTest, CreateRequestSourcePluginWithNonWorkingJsonThrowsError) { + absl::optional request_source_plugin_config; + std::string request_source_plugin_config_json = + "{" + R"(name:"nighthawk.file-based-request-source-plugin",)" + "typed_config:{" + R"("@type":"type.googleapis.com/)" + R"(nighthawk.request_source.FileBasedOptionsListRequestSourceConfig",)" + R"(file_path:")" + + TestEnvironment::runfilesPath("test/request_source/test_data/NotARealFile.yaml") + + "\"," + "}" + "}"; + request_source_plugin_config.emplace(envoy::config::core::v3::TypedExtensionConfig()); + Envoy::MessageUtil::loadFromJson(request_source_plugin_config_json, + request_source_plugin_config.value(), + Envoy::ProtobufMessage::getStrictValidationVisitor()); + EXPECT_CALL(options_, requestMethod()); + EXPECT_CALL(options_, requestBodySize()); + EXPECT_CALL(options_, uri()).Times(2).WillRepeatedly(Return("http://foo/")); + EXPECT_CALL(options_, requestSource()); + EXPECT_CALL(options_, requestSourcePluginConfig()) + .Times(2) + .WillRepeatedly(ReturnRef(request_source_plugin_config)); + auto cmd = std::make_unique(); + envoy::config::core::v3::HeaderValueOption* request_headers = + cmd->mutable_request_options()->add_request_headers(); + request_headers->mutable_header()->set_key("foo"); + request_headers->mutable_header()->set_value("bar"); + EXPECT_CALL(options_, toCommandLineOptions()).WillOnce(Return(ByMove(std::move(cmd)))); + RequestSourceFactoryImpl factory(options_, *api_); + Envoy::Upstream::ClusterManagerPtr cluster_manager; + EXPECT_THROW_WITH_REGEX( + factory.create(cluster_manager, dispatcher_, *stats_store_.createScope("foo."), + "requestsource"), + NighthawkException, + "Request Source plugin loading error should have been caught during input validation"); +} + TEST_F(FactoriesTest, CreateRequestSource) { - EXPECT_CALL(options_, requestMethod()).Times(1); - EXPECT_CALL(options_, requestBodySize()).Times(1); + absl::optional request_source_plugin_config; + EXPECT_CALL(options_, requestMethod()); + EXPECT_CALL(options_, requestBodySize()); + EXPECT_CALL(options_, uri()).Times(2).WillRepeatedly(Return("http://foo/")); + EXPECT_CALL(options_, requestSource()); + EXPECT_CALL(options_, requestSourcePluginConfig()) + .Times(1) + .WillRepeatedly(ReturnRef(request_source_plugin_config)); + auto cmd = std::make_unique(); + envoy::config::core::v3::HeaderValueOption* request_headers = + cmd->mutable_request_options()->add_request_headers(); + request_headers->mutable_header()->set_key("foo"); + request_headers->mutable_header()->set_value("bar"); + EXPECT_CALL(options_, toCommandLineOptions()).WillOnce(Return(ByMove(std::move(cmd)))); + RequestSourceFactoryImpl factory(options_, *api_); + Envoy::Upstream::ClusterManagerPtr cluster_manager; + RequestSourcePtr request_generator = factory.create( + cluster_manager, dispatcher_, *stats_store_.createScope("foo."), "requestsource"); + EXPECT_NE(nullptr, request_generator.get()); +} + +TEST_F(FactoriesTest, CreateRemoteRequestSource) { + absl::optional request_source_plugin_config; + EXPECT_CALL(options_, requestMethod()); + EXPECT_CALL(options_, requestBodySize()); EXPECT_CALL(options_, uri()).Times(2).WillRepeatedly(Return("http://foo/")); - EXPECT_CALL(options_, requestSource()).Times(1); + EXPECT_CALL(options_, requestSource()).WillOnce(Return("http://bar/")); + EXPECT_CALL(options_, requestsPerSecond()).WillOnce(Return(5)); auto cmd = std::make_unique(); - auto request_headers = cmd->mutable_request_options()->add_request_headers(); + envoy::config::core::v3::HeaderValueOption* request_headers = + cmd->mutable_request_options()->add_request_headers(); request_headers->mutable_header()->set_key("foo"); request_headers->mutable_header()->set_value("bar"); - EXPECT_CALL(options_, toCommandLineOptions()).Times(1).WillOnce(Return(ByMove(std::move(cmd)))); - RequestSourceFactoryImpl factory(options_); + EXPECT_CALL(options_, toCommandLineOptions()).WillOnce(Return(ByMove(std::move(cmd)))); + RequestSourceFactoryImpl factory(options_, *api_); Envoy::Upstream::ClusterManagerPtr cluster_manager; - auto request_generator = factory.create(cluster_manager, dispatcher_, - *stats_store_.createScope("foo."), "requestsource"); + RequestSourcePtr request_generator = factory.create( + cluster_manager, dispatcher_, *stats_store_.createScope("foo."), "requestsource"); EXPECT_NE(nullptr, request_generator.get()); } @@ -79,20 +185,20 @@ class SequencerFactoryTest sequencer_idle_strategy) { SequencerFactoryImpl factory(options_); MockBenchmarkClient benchmark_client; - EXPECT_CALL(options_, requestsPerSecond()).Times(1).WillOnce(Return(1)); - EXPECT_CALL(options_, burstSize()).Times(1).WillOnce(Return(2)); + EXPECT_CALL(options_, requestsPerSecond()).WillOnce(Return(1)); + EXPECT_CALL(options_, burstSize()).WillOnce(Return(2)); EXPECT_CALL(options_, sequencerIdleStrategy()) .Times(1) .WillOnce(Return(sequencer_idle_strategy)); EXPECT_CALL(dispatcher_, createTimer_(_)).Times(2); - EXPECT_CALL(options_, jitterUniform()).Times(1).WillOnce(Return(1ns)); + EXPECT_CALL(options_, jitterUniform()).WillOnce(Return(1ns)); Envoy::Event::SimulatedTimeSystem time_system; const SequencerTarget dummy_sequencer_target = [](const CompletionCallback&) -> bool { return true; }; auto sequencer = factory.create(api_->timeSource(), dispatcher_, dummy_sequencer_target, std::make_unique(), stats_store_, - time_system.systemTime() + 10ms); + time_system.monotonicTime() + 10ms); EXPECT_NE(nullptr, sequencer.get()); } }; diff --git a/test/flush_worker_test.cc b/test/flush_worker_test.cc index 42b390265..d9d2be38b 100644 --- a/test/flush_worker_test.cc +++ b/test/flush_worker_test.cc @@ -132,7 +132,7 @@ TEST_F(FlushWorkerTest, WorkerFlushStatsPeriodically) { thread.join(); // Stats flush should happen exactly once as the final flush is done in // FlushWorkerImpl::shutdownThread(). - EXPECT_CALL(*sink_, flush(_)).Times(1); + EXPECT_CALL(*sink_, flush(_)); worker.shutdown(); } @@ -147,7 +147,7 @@ TEST_F(FlushWorkerTest, FinalFlush) { worker.waitForCompletion(); // Stats flush should happen exactly once as the final flush is done in // FlushWorkerImpl::shutdownThread(). - EXPECT_CALL(*sink_, flush(_)).Times(1); + EXPECT_CALL(*sink_, flush(_)); worker.shutdown(); } diff --git a/test/integration/BUILD b/test/integration/BUILD index 00765a29b..c9cd5ad68 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -13,6 +13,7 @@ py_library( name = "integration_test_base", data = [ "configurations/nighthawk_http_origin.yaml", + "configurations/nighthawk_http_origin_envoy_deprecated_v2_api.yaml", "configurations/nighthawk_https_origin.yaml", "configurations/nighthawk_track_timings.yaml", "configurations/sni_origin.yaml", @@ -106,6 +107,12 @@ py_library( deps = [":integration_test_base"], ) +py_library( + name = "test_request_source_plugin_lib", + srcs = ["test_request_source_plugin.py"], + deps = [":integration_test_base"], +) + py_binary( name = "integration_test", srcs = ["integration_test.py"], @@ -122,5 +129,6 @@ py_binary( ":test_integration_zipkin_lib", ":test_output_transform_lib", ":test_remote_execution_lib", + ":test_request_source_plugin_lib", ], ) diff --git a/test/integration/configurations/nighthawk_http_origin.yaml b/test/integration/configurations/nighthawk_http_origin.yaml index 4854523de..3dbf4392a 100644 --- a/test/integration/configurations/nighthawk_http_origin.yaml +++ b/test/integration/configurations/nighthawk_http_origin.yaml @@ -11,10 +11,11 @@ static_resources: port_value: 0 filter_chains: - filters: - - name: envoy.http_connection_manager - config: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager generate_request_id: false - codec_type: auto + codec_type: AUTO stat_prefix: ingress_http route_config: name: local_route @@ -26,10 +27,12 @@ static_resources: - name: time-tracking - name: dynamic-delay - name: test-server - config: + typed_config: + "@type": type.googleapis.com/nighthawk.server.ResponseOptions response_body_size: 10 - response_headers: + v3_response_headers: - { header: { key: "x-nh", value: "1"}} - - name: envoy.router - config: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router dynamic_stats: false diff --git a/test/integration/configurations/nighthawk_http_origin_envoy_deprecated_v2_api.yaml b/test/integration/configurations/nighthawk_http_origin_envoy_deprecated_v2_api.yaml new file mode 100644 index 000000000..5e07954c7 --- /dev/null +++ b/test/integration/configurations/nighthawk_http_origin_envoy_deprecated_v2_api.yaml @@ -0,0 +1,39 @@ +# This file is intentionally using the v2 api: it is used to test support for that. +admin: + access_log_path: $tmpdir/nighthawk-test-server-admin-access.log + profile_path: $tmpdir/nighthawk-test-server.prof + address: + socket_address: { address: $server_ip, port_value: 0 } +static_resources: + listeners: + - address: + socket_address: + address: $server_ip + port_value: 0 + filter_chains: + - filters: + - name: envoy.http_connection_manager + config: + generate_request_id: false + codec_type: auto + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: service + domains: + - "*" + http_filters: + - name: test-server + config: + response_body_size: 10 + v3_response_headers: + - { header: { key: "x-nh", value: "1"}} + - name: envoy.router + config: + dynamic_stats: false +layered_runtime: + layers: + - name: static_layer + static_layer: + envoy.reloadable_features.enable_deprecated_v2_api: true diff --git a/test/integration/configurations/nighthawk_https_origin.yaml b/test/integration/configurations/nighthawk_https_origin.yaml index 708dbdb67..152f75fe6 100644 --- a/test/integration/configurations/nighthawk_https_origin.yaml +++ b/test/integration/configurations/nighthawk_https_origin.yaml @@ -11,10 +11,11 @@ static_resources: port_value: 0 filter_chains: - filters: - - name: envoy.http_connection_manager - config: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager generate_request_id: false - codec_type: auto + codec_type: AUTO stat_prefix: ingress_http route_config: name: local_route @@ -23,20 +24,26 @@ static_resources: domains: - "*" http_filters: + - name: dynamic-delay - name: test-server - config: + typed_config: + "@type": type.googleapis.com/nighthawk.server.ResponseOptions response_body_size: 10 - response_headers: + v3_response_headers: - { header: { key: "x-nh", value: "1" } } - - name: envoy.router - config: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router dynamic_stats: false - tls_context: - common_tls_context: - tls_certificates: - - certificate_chain: - inline_string: | - @inject-runfile:nighthawk/external/envoy/test/config/integration/certs/servercert.pem - private_key: - inline_string: | - @inject-runfile:nighthawk/external/envoy/test/config/integration/certs/serverkey.pem + transport_socket: + name: tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: + inline_string: | + @inject-runfile:nighthawk/external/envoy/test/config/integration/certs/servercert.pem + private_key: + inline_string: | + @inject-runfile:nighthawk/external/envoy/test/config/integration/certs/serverkey.pem diff --git a/test/integration/configurations/nighthawk_track_timings.yaml b/test/integration/configurations/nighthawk_track_timings.yaml index 7f4d1b2f5..ab4b37db0 100644 --- a/test/integration/configurations/nighthawk_track_timings.yaml +++ b/test/integration/configurations/nighthawk_track_timings.yaml @@ -14,10 +14,11 @@ static_resources: port_value: 0 filter_chains: - filters: - - name: envoy.http_connection_manager - config: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager generate_request_id: false - codec_type: auto + codec_type: AUTO stat_prefix: ingress_http route_config: name: local_route @@ -28,11 +29,14 @@ static_resources: http_filters: # Here we set up the time-tracking extension to emit request-arrival delta timings in a response header. - name: time-tracking - config: + typed_config: + "@type": type.googleapis.com/nighthawk.server.ResponseOptions emit_previous_request_delta_in_response_header: x-origin-request-receipt-delta - name: test-server - config: + typed_config: + "@type": type.googleapis.com/nighthawk.server.ResponseOptions response_body_size: 10 - - name: envoy.router - config: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router dynamic_stats: false diff --git a/test/integration/configurations/sni_origin.yaml b/test/integration/configurations/sni_origin.yaml index 9d49bae35..15dffaf3f 100644 --- a/test/integration/configurations/sni_origin.yaml +++ b/test/integration/configurations/sni_origin.yaml @@ -18,7 +18,7 @@ static_resources: transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext common_tls_context: tls_certificates: - certificate_chain: @@ -36,7 +36,7 @@ static_resources: transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext common_tls_context: tls_certificates: - certificate_chain: @@ -46,10 +46,11 @@ static_resources: inline_string: | @inject-runfile:nighthawk/external/envoy/test/config/integration/certs/serverkey.pem filters: - - name: envoy.http_connection_manager - config: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager generate_request_id: false - codec_type: auto + codec_type: AUTO stat_prefix: ingress_http route_config: name: local_route @@ -59,18 +60,21 @@ static_resources: - "sni.com" http_filters: - name: test-server - config: + typed_config: + "@type": type.googleapis.com/nighthawk.server.ResponseOptions response_body_size: 10 - response_headers: + v3_response_headers: - { header: { key: "x-nh", value: "1"}} - - name: envoy.router - config: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router dynamic_stats: false - filters: - - name: envoy.http_connection_manager - config: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager generate_request_id: false - codec_type: auto + codec_type: AUTO stat_prefix: ingress_http route_config: name: local_route @@ -80,10 +84,12 @@ static_resources: - "*" http_filters: - name: test-server - config: + typed_config: + "@type": type.googleapis.com/nighthawk.server.ResponseOptions response_body_size: 10 - response_headers: + v3_response_headers: - { header: { key: "x-nh", value: "1"}} - - name: envoy.router - config: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router dynamic_stats: false diff --git a/test/integration/integration_test.py b/test/integration/integration_test.py index b715408f4..6c49d974c 100644 --- a/test/integration/integration_test.py +++ b/test/integration/integration_test.py @@ -22,7 +22,11 @@ "-x", path, "-n", - "4" if utility.isSanitizerRun() else "20" # Number of tests to run in parallel + "4" if utility.isSanitizerRun() else "20", # Number of tests to run in parallel + "--log-level", + "INFO", + "--log-cli-level", + "INFO", ], plugins=["xdist"]) exit(r) diff --git a/test/integration/integration_test_fixtures.py b/test/integration/integration_test_fixtures.py index e256d53e4..8e29a1940 100644 --- a/test/integration/integration_test_fixtures.py +++ b/test/integration/integration_test_fixtures.py @@ -54,28 +54,35 @@ class IntegrationTestBase(): work when there is only one test server. This class will be refactored (https://github.com/envoyproxy/nighthawk/issues/258). + + Attributes: + ip_version: IP version that the proxy should use when listening. + server_ip: string containing the server ip that will be used to listen + tag: String. Supply this to get recognizeable output locations. + parameters: Dictionary. Supply this to provide template parameter replacement values. + grpc_service: NighthawkGrpcService instance or None. Set by startNighthawkGrpcService(). + test_server: NighthawkTestServer instance, set during setUp(). + nighthawk_client_path: String, path to the nighthawk_client binary. + request: The pytest `request` test fixture used to determine information + about the currently executing test case. """ - def __init__(self, ip_version, server_config, backend_count=1): + def __init__(self, request, server_config, backend_count=1, bootstrap_version_arg=None): """Initialize the IntegrationTestBase instance. Args: ip_version: a single IP mode that this instance will test: IpVersion.IPV4 or IpVersion.IPV6 + request: The pytest `request` test fixture used to determine information + about the currently executing test case. server_config: path to the server configuration backend_count: number of Nighthawk Test Server backends to run, to allow testing MultiTarget mode - Attributes: - ip_version: IP version that the proxy should use when listening. - server_ip: string containing the server ip that will be used to listen - tag: String. Supply this to get recognizeable output locations. - parameters: Dictionary. Supply this to provide template parameter replacement values. - grpc_service: NighthawkGrpcService instance or None. Set by startNighthawkGrpcService(). - test_server: NighthawkTestServer instance, set during setUp(). - nighthawk_client_path: String, path to the nighthawk_client binary. + bootstrap_version_arg: An optional int, specify a bootstrap cli argument value for the test server binary. If None is specified, no bootstrap cli argment will be passed. """ super(IntegrationTestBase, self).__init__() - assert ip_version != IpVersion.UNKNOWN - self.ip_version = ip_version - self.server_ip = "::/0" if ip_version == IpVersion.IPV6 else "0.0.0.0" + self.request = request + self.ip_version = request.param + assert self.ip_version != IpVersion.UNKNOWN + self.server_ip = "::" if self.ip_version == IpVersion.IPV6 else "0.0.0.0" self.server_ip = os.getenv("TEST_SERVER_EXTERNAL_IP", self.server_ip) self.tag = "" self.parameters = {} @@ -86,10 +93,11 @@ def __init__(self, ip_version, server_config, backend_count=1): self._nighthawk_test_config_path = server_config self._nighthawk_service_path = "nighthawk_service" self._nighthawk_output_transform_path = "nighthawk_output_transform" - self._socket_type = socket.AF_INET6 if ip_version == IpVersion.IPV6 else socket.AF_INET + self._socket_type = socket.AF_INET6 if self.ip_version == IpVersion.IPV6 else socket.AF_INET self._test_servers = [] self._backend_count = backend_count self._test_id = "" + self._bootstrap_version_arg = bootstrap_version_arg # TODO(oschaaf): For the NH test server, add a way to let it determine a port by itself and pull that # out. @@ -123,8 +131,14 @@ def setUp(self): self.tag = "{timestamp}/{test_id}".format(timestamp=_TIMESTAMP, test_id=self._test_id) assert self._tryStartTestServers(), "Test server(s) failed to start" - def tearDown(self): - """Stop the server.""" + def tearDown(self, caplog): + """Stop the server. + + Fails the test if any warnings or errors were logged. + + Args: + caplog: The pytest `caplog` test fixture used to examine logged messages. + """ if self.grpc_service is not None: assert (self.grpc_service.stop() == 0) @@ -134,14 +148,25 @@ def tearDown(self): any_failed = True assert (not any_failed) + warnings_and_errors = [] + for when in ("setup", "call", "teardown"): + for record in caplog.get_records(when): + if record.levelno not in (logging.WARNING, logging.ERROR): + continue + warnings_and_errors.append(record.message) + if warnings_and_errors: + pytest.fail("warnings or errors encountered during testing:\n{}".format(warnings_and_errors)) + def _tryStartTestServers(self): for i in range(self._backend_count): test_server = NighthawkTestServer(self._nighthawk_test_server_path, self._nighthawk_test_config_path, self.server_ip, self.ip_version, + self.request, parameters=self.parameters, - tag=self.tag) + tag=self.tag, + bootstrap_version_arg=self._bootstrap_version_arg) if not test_server.start(): return False self._test_servers.append(test_server) @@ -283,27 +308,45 @@ def startNighthawkGrpcService(self, service_name="traffic-generator-service"): class HttpIntegrationTestBase(IntegrationTestBase): """Base for running plain http tests against the Nighthawk test server. - NOTE: any script that consumes derivations of this, needs to needs also explictly + NOTE: any script that consumes derivations of this, needs to also explicitly import server_config, to avoid errors caused by the server_config not being found by pytest. """ - def __init__(self, ip_version, server_config): + def __init__(self, request, server_config): """See base class.""" - super(HttpIntegrationTestBase, self).__init__(ip_version, server_config) + super(HttpIntegrationTestBase, self).__init__(request, server_config) def getTestServerRootUri(self): """See base class.""" return super(HttpIntegrationTestBase, self).getTestServerRootUri(False) +class HttpIntegrationTestBaseWithEnvoyDeprecatedV2Bootstrap(IntegrationTestBase): + """Base for running plain http tests against the Nighthawk test server. + + NOTE: any script that consumes derivations of this, needs to also explicitly + import server_config, to avoid errors caused by the server_config not being found + by pytest. + """ + + def __init__(self, request, server_config): + """See base class.""" + super(HttpIntegrationTestBaseWithEnvoyDeprecatedV2Bootstrap, + self).__init__(request, server_config, bootstrap_version_arg=2) + + def getTestServerRootUri(self): + """See base class.""" + return super(HttpIntegrationTestBaseWithEnvoyDeprecatedV2Bootstrap, + self).getTestServerRootUri(False) + + class MultiServerHttpIntegrationTestBase(IntegrationTestBase): """Base for running plain http tests against multiple Nighthawk test servers.""" - def __init__(self, ip_version, server_config, backend_count): + def __init__(self, request, server_config, backend_count): """See base class.""" - super(MultiServerHttpIntegrationTestBase, self).__init__(ip_version, server_config, - backend_count) + super(MultiServerHttpIntegrationTestBase, self).__init__(request, server_config, backend_count) def getTestServerRootUri(self): """See base class.""" @@ -317,9 +360,9 @@ def getAllTestServerRootUris(self): class HttpsIntegrationTestBase(IntegrationTestBase): """Base for https tests against the Nighthawk test server.""" - def __init__(self, ip_version, server_config): + def __init__(self, request, server_config): """See base class.""" - super(HttpsIntegrationTestBase, self).__init__(ip_version, server_config) + super(HttpsIntegrationTestBase, self).__init__(request, server_config) def getTestServerRootUri(self): """See base class.""" @@ -329,9 +372,9 @@ def getTestServerRootUri(self): class SniIntegrationTestBase(HttpsIntegrationTestBase): """Base for https/sni tests against the Nighthawk test server.""" - def __init__(self, ip_version, server_config): + def __init__(self, request, server_config): """See base class.""" - super(SniIntegrationTestBase, self).__init__(ip_version, server_config) + super(SniIntegrationTestBase, self).__init__(request, server_config) def getTestServerRootUri(self): """See base class.""" @@ -341,10 +384,9 @@ def getTestServerRootUri(self): class MultiServerHttpsIntegrationTestBase(IntegrationTestBase): """Base for https tests against multiple Nighthawk test servers.""" - def __init__(self, ip_version, server_config, backend_count): + def __init__(self, request, server_config, backend_count): """See base class.""" - super(MultiServerHttpsIntegrationTestBase, self).__init__(ip_version, server_config, - backend_count) + super(MultiServerHttpsIntegrationTestBase, self).__init__(request, server_config, backend_count) def getTestServerRootUri(self): """See base class.""" @@ -366,52 +408,65 @@ def server_config(): @pytest.fixture(params=determineIpVersionsFromEnvironment()) -def http_test_server_fixture(request, server_config): +def http_test_server_fixture(request, server_config, caplog): """Fixture for setting up a test environment with the stock http server configuration. Yields: HttpIntegrationTestBase: A fully set up instance. Tear down will happen automatically. """ - f = HttpIntegrationTestBase(request.param, server_config) + f = HttpIntegrationTestBase(request, server_config) + f.setUp() + yield f + f.tearDown(caplog) + + +@pytest.fixture(params=determineIpVersionsFromEnvironment()) +def http_test_server_fixture_envoy_deprecated_v2_api(request, server_config, caplog): + """Fixture for setting up a test environment with http server configuration that uses v2 configuration. + + Yields: + HttpIntegrationTestBaseWithEnvoyDeprecatedV2Bootstrap: A fully set up instance. Tear down will happen automatically. + """ + f = HttpIntegrationTestBaseWithEnvoyDeprecatedV2Bootstrap(request, server_config) f.setUp() yield f - f.tearDown() + f.tearDown(caplog) @pytest.fixture(params=determineIpVersionsFromEnvironment()) -def https_test_server_fixture(request, server_config): +def https_test_server_fixture(request, server_config, caplog): """Fixture for setting up a test environment with the stock https server configuration. Yields: HttpsIntegrationTestBase: A fully set up instance. Tear down will happen automatically. """ - f = HttpsIntegrationTestBase(request.param, server_config) + f = HttpsIntegrationTestBase(request, server_config) f.setUp() yield f - f.tearDown() + f.tearDown(caplog) @pytest.fixture(params=determineIpVersionsFromEnvironment()) -def multi_http_test_server_fixture(request, server_config): +def multi_http_test_server_fixture(request, server_config, caplog): """Fixture for setting up a test environment with multiple servers, using the stock http server configuration. Yields: MultiServerHttpIntegrationTestBase: A fully set up instance. Tear down will happen automatically. """ - f = MultiServerHttpIntegrationTestBase(request.param, server_config, backend_count=3) + f = MultiServerHttpIntegrationTestBase(request, server_config, backend_count=3) f.setUp() yield f - f.tearDown() + f.tearDown(caplog) @pytest.fixture(params=determineIpVersionsFromEnvironment()) -def multi_https_test_server_fixture(request, server_config): +def multi_https_test_server_fixture(request, server_config, caplog): """Fixture for setting up a test environment with multiple servers, using the stock https server configuration. Yields: MultiServerHttpsIntegrationTestBase: A fully set up instance. Tear down will happen automatically. """ - f = MultiServerHttpsIntegrationTestBase(request.param, server_config, backend_count=3) + f = MultiServerHttpsIntegrationTestBase(request, server_config, backend_count=3) f.setUp() yield f - f.tearDown() + f.tearDown(caplog) diff --git a/test/integration/nighthawk_test_server.py b/test/integration/nighthawk_test_server.py index 6a6deb849..3318afdc2 100644 --- a/test/integration/nighthawk_test_server.py +++ b/test/integration/nighthawk_test_server.py @@ -1,14 +1,16 @@ """Contains the NighthawkTestServer class, which wraps the nighthawk_test_servern binary.""" +import collections import http.client import json import logging import os +import random +import re +import requests import socket import subprocess import sys -import random -import requests import tempfile import threading import time @@ -40,6 +42,65 @@ def _substitute_yaml_values(runfiles_instance, obj, params): return obj +class _TestCaseWarnErrorIgnoreList( + collections.namedtuple("_TestCaseWarnErrorIgnoreList", "test_case_regexp ignore_list")): + """Maps test case names to messages that should be ignored in the test server logs. + + If the name of the currently executing test case matches the test_case_regexp, + any messages logged by the test server as either a WARNING or an ERROR + will be checked against the ignore_list. If the logged messages contain any of + the messages in the ignore list as a substring, they will be ignored. + Any unmatched messages of either a WARNING or an ERROR severity will fail the + test case. + + Attributes: + test_case_regexp: A compiled regular expression as returned by re.compile(), + the regexp that will be used to match test case names. + ignore_list: A tuple of strings, messages to ignore for matching test cases. + """ + + +# A list of _TestCaseWarnErrorIgnoreList instances, message pieces that should +# be ignored even if logged by the test server at a WARNING or an ERROR +# severity. +# +# If multiple test_case_regexp entries match the current test case name, all the +# corresponding ignore lists will be used. +_TEST_SERVER_WARN_ERROR_IGNORE_LIST = frozenset([ + # This test case purposefully uses the deprecated Envoy v2 API which emits + # the following warnings. + _TestCaseWarnErrorIgnoreList( + re.compile('test_nighthawk_test_server_envoy_deprecated_v2_api'), + ( + "Configuration does not parse cleanly as v3. v2 configuration is deprecated", + "Deprecated field: type envoy.api.v2.listener.Filter", + "Deprecated field: type envoy.config.filter.network.http_connection_manager.v2.HttpFilter", + "Using deprecated extension name 'envoy.http_connection_manager'", + "Using deprecated extension name 'envoy.router'", + ), + ), + + # A catch-all that applies to all remaining test cases. + _TestCaseWarnErrorIgnoreList( + re.compile('.*'), + ( + # TODO(#582): Identify these and file issues or add explanation as necessary. + "Unable to use runtime singleton for feature envoy.http.headermap.lazy_map_min_size", + "Using deprecated extension name 'envoy.listener.tls_inspector' for 'envoy.filters.listener.tls_inspector'.", + "there is no configured limit to the number of allowed active connections. Set a limit via the runtime key overload.global_downstream_max_connections", + + # A few of our filters use the same typed configuration, specifically + # 'test-server', 'time-tracking' and 'dynamic-delay'. + # For now this is by design. + "Double registration for type: 'nighthawk.server.ResponseOptions'", + + # Logged for normal termination, not really a warning. + "caught ENVOY_SIGTERM", + ), + ), +]) + + class TestServerBase(object): """Base class for running a server in a separate process. @@ -51,8 +112,16 @@ class TestServerBase(object): tmpdir: String, indicates the location used to store outputs like logs. """ - def __init__(self, server_binary_path, config_template_path, server_ip, ip_version, - server_binary_config_path_arg, parameters, tag): + def __init__(self, + server_binary_path, + config_template_path, + server_ip, + ip_version, + request, + server_binary_config_path_arg, + parameters, + tag, + bootstrap_version_arg=None): """Initialize a TestServerBase instance. Args: @@ -60,9 +129,11 @@ def __init__(self, server_binary_path, config_template_path, server_ip, ip_versi config_template_path (str): specify the path to the test server configuration template. server_ip (str): Specify the ip address the test server should use to listen for traffic. ip_version (IPAddress): Specify the ip version the server should use to listen for traffic. + request: The pytest `request` fixture used to determin information about the currently executed test. server_binary_config_path_arg (str): Specify the name of the CLI argument the test server binary uses to accept a configuration path. parameters (dict): Supply to provide configuration template parameter replacement values. tag (str): Supply to get recognizeable output locations. + bootstrap_version_arg (int, optional): specify a bootstrap cli argument value for the test server binary. """ assert ip_version != IpVersion.UNKNOWN self.ip_version = ip_version @@ -82,7 +153,9 @@ def __init__(self, server_binary_path, config_template_path, server_ip, ip_versi self._parameterized_config_path = "" self._instance_id = str(random.randint(1, 1024 * 1024 * 1024)) self._server_binary_config_path_arg = server_binary_config_path_arg + self._bootstrap_version_arg = bootstrap_version_arg self._prepareForExecution() + self._request = request def _prepareForExecution(self): runfiles_instance = runfiles.Create() @@ -121,11 +194,21 @@ def _serverThreadRunner(self): self._parameterized_config_path, "-l", "debug", "--base-id", self._instance_id, "--admin-address-path", self._admin_address_path, "--concurrency", "1" ] + if self._bootstrap_version_arg is not None: + args = args + ["--bootstrap-version", str(self._bootstrap_version_arg)] + logging.info("Test server popen() args: %s" % str.join(" ", args)) self._server_process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = self._server_process.communicate() - logging.debug(stdout.decode("utf-8")) - logging.debug(stderr.decode("utf-8")) + logging.info("Process stdout: %s", stdout.decode("utf-8")) + logging.info("Process stderr: %s", stderr.decode("utf-8")) + warnings, errors = _extractWarningsAndErrors(stdout.decode() + stderr.decode(), + self._request.node.name, + _TEST_SERVER_WARN_ERROR_IGNORE_LIST) + if warnings: + [logging.warn("Process logged a warning: %s", w) for w in warnings] + if errors: + [logging.error("Process logged an error: %s", e) for e in errors] def fetchJsonFromAdminInterface(self, path): """Fetch and parse json from the admin interface. @@ -183,7 +266,7 @@ def enableCpuProfiler(self): return r.status_code == 200 def _waitUntilServerListening(self): - # we allow 30 seconds for the server to have its listeners up. + # we allow some time for the server to have its listeners up. # (It seems that in sanitizer-enabled runs this can take a little while) timeout = time.time() + 60 while time.time() < timeout: @@ -227,8 +310,10 @@ def __init__(self, config_template_path, server_ip, ip_version, + request, parameters=dict(), - tag=""): + tag="", + bootstrap_version_arg=None): """Initialize a NighthawkTestServer instance. Args: @@ -236,11 +321,20 @@ def __init__(self, config_template_path (String): Path to the nighthawk test server configuration template. server_ip (String): Ip address for the server to use when listening. ip_version (IPVersion): IPVersion enum member indicating the ip version that the server should use when listening. + request: The pytest `request` fixture used to determin information about the currently executed test. parameters (dictionary, optional): Directionary with replacement values for substition purposes in the server configuration template. Defaults to dict(). tag (str, optional): Tags. Supply this to get recognizeable output locations. Defaults to "". + bootstrap_version_arg (String, optional): Specify a cli argument value for --bootstrap-version when running the server. """ - super(NighthawkTestServer, self).__init__(server_binary_path, config_template_path, server_ip, - ip_version, "--config-path", parameters, tag) + super(NighthawkTestServer, self).__init__(server_binary_path, + config_template_path, + server_ip, + ip_version, + request, + "--config-path", + parameters, + tag, + bootstrap_version_arg=bootstrap_version_arg) def getCliVersionString(self): """Get the version string as written to the output by the CLI.""" @@ -253,3 +347,59 @@ def getCliVersionString(self): stdout, stderr = process.communicate() assert process.wait() == 0 return stdout.decode("utf-8").strip() + + +def _matchesAnyIgnoreListEntry(line, test_case_name, ignore_list): + """Determine if the line matches any of the ignore list entries for this test case. + + Args: + line: A string, the logged line. + test_case_name: A string, name of the currently executed test case. + ignore_list: A list of _TestCaseWarnErrorIgnoreList instances, the ignore + lists to match against. + + Returns: + A boolean, True if the logged line matches any of the ignore list entries, + False otherwise. + """ + for test_case_ignore_list in ignore_list: + if not test_case_ignore_list.test_case_regexp.match(test_case_name): + continue + for ignore_message in test_case_ignore_list.ignore_list: + if ignore_message in line: + return True + return False + + +def _extractWarningsAndErrors(process_output, test_case_name, ignore_list): + """Extract warnings and errors from the process_output. + + Args: + process_output: A string, the stdout or stderr after running a process. + test_case_name: A string, the name of the current test case. + ignore_list: A list of _TestCaseWarnErrorIgnoreList instances, the message + pieces to ignore. If a message that was logged either at a WARNING or at + an ERROR severity contains one of these message pieces and should be + ignored for the current test case, it will be excluded from the return + values. + + Returns: + A tuple of two lists of strings, the first list contains the warnings found + in the process_output and the second list contains the errors found in the + process_output. + """ + warnings = [] + errors = [] + for line in process_output.split('\n'): + # Optimization - no need to examine lines that aren't errors or warnings. + if "[warning]" not in line and "[error]" not in line: + continue + + if _matchesAnyIgnoreListEntry(line, test_case_name, ignore_list): + continue + + if "[warning]" in line: + warnings.append(line) + elif "[error]" in line: + errors.append(line) + return warnings, errors diff --git a/test/integration/test_integration_basics.py b/test/integration/test_integration_basics.py index 5639fd99d..ea3bf4e43 100644 --- a/test/integration/test_integration_basics.py +++ b/test/integration/test_integration_basics.py @@ -10,11 +10,10 @@ from threading import Thread from test.integration.common import IpVersion -from test.integration.integration_test_fixtures import (http_test_server_fixture, - https_test_server_fixture, - multi_http_test_server_fixture, - multi_https_test_server_fixture, - server_config) +from test.integration.integration_test_fixtures import ( + http_test_server_fixture, http_test_server_fixture_envoy_deprecated_v2_api, + https_test_server_fixture, https_test_server_fixture, multi_http_test_server_fixture, + multi_https_test_server_fixture, server_config) from test.integration import asserts from test.integration import utility @@ -39,7 +38,7 @@ def test_http_h1(http_test_server_fixture): asserts.assertCounterEqual(counters, "upstream_cx_total", 1) asserts.assertCounterEqual( counters, "upstream_cx_tx_bytes_total", - 1400 if http_test_server_fixture.ip_version == IpVersion.IPV6 else 1450) + 1375 if http_test_server_fixture.ip_version == IpVersion.IPV6 else 1450) asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1) asserts.assertCounterEqual(counters, "upstream_rq_total", 25) asserts.assertCounterEqual(counters, "default.total_match_count", 1) @@ -69,6 +68,48 @@ def test_http_h1(http_test_server_fixture): asserts.assertEqual(len(counters), 12) +@pytest.mark.parametrize('server_config', [ + "nighthawk/test/integration/configurations/nighthawk_http_origin_envoy_deprecated_v2_api.yaml" +]) +def test_nighthawk_test_server_envoy_deprecated_v2_api( + http_test_server_fixture_envoy_deprecated_v2_api): + """Test that the v2 configuration works for the test server.""" + parsed_json, _ = http_test_server_fixture_envoy_deprecated_v2_api.runNighthawkClient([ + http_test_server_fixture_envoy_deprecated_v2_api.getTestServerRootUri(), "--duration", "100", + "--termination-predicate", "benchmark.http_2xx:24" + ]) + + counters = http_test_server_fixture_envoy_deprecated_v2_api.getNighthawkCounterMapFromJson( + parsed_json) + asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25) + + +def test_nighthawk_client_v2_api_explicitly_set(http_test_server_fixture): + """Test that the v2 api works when requested to.""" + parsed_json, _ = http_test_server_fixture.runNighthawkClient([ + http_test_server_fixture.getTestServerRootUri(), "--duration", "100", + "--termination-predicate", "benchmark.pool_connection_failure:0", "--failure-predicate", + "foo:1", "--allow-envoy-deprecated-v2-api", "--transport-socket", + "{name:\"envoy.transport_sockets.tls\",typed_config:{\"@type\":\"type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext\",\"common_tls_context\":{}}}" + ]) + + counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) + asserts.assertCounterEqual(counters, "benchmark.pool_connection_failure", 1) + + +# TODO(oschaaf): This ought to work after the Envoy update. +def DISABLED_test_nighthawk_client_v2_api_breaks_by_default(http_test_server_fixture): + """Test that the v2 api breaks us when it's not explicitly requested.""" + _, _ = http_test_server_fixture.runNighthawkClient([ + http_test_server_fixture.getTestServerRootUri(), "--duration", "100", + "--termination-predicate", "benchmark.pool_connection_failure:0", "--failure-predicate", + "foo:1", "--transport-socket", + "{name:\"envoy.transport_sockets.tls\",typed_config:{\"@type\":\"type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext\",\"common_tls_context\":{}}}" + ], + expect_failure=True, + as_json=False) + + def _mini_stress_test(fixture, args): # run a test with more rps then we can handle, and a very small client-side queue. # we should observe both lots of successfull requests as well as time spend in blocking mode., @@ -223,7 +264,7 @@ def test_https_h1(https_test_server_fixture): asserts.assertCounterEqual(counters, "upstream_cx_total", 1) asserts.assertCounterEqual( counters, "upstream_cx_tx_bytes_total", - 1400 if https_test_server_fixture.ip_version == IpVersion.IPV6 else 1450) + 1375 if https_test_server_fixture.ip_version == IpVersion.IPV6 else 1450) asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1) asserts.assertCounterEqual(counters, "upstream_rq_total", 25) asserts.assertCounterEqual(counters, "ssl.ciphers.ECDHE-RSA-AES128-GCM-SHA256", 1) @@ -311,7 +352,7 @@ def _do_tls_configuration_test(https_test_server_fixture, cli_parameter, use_h2) else: json_template = "%s%s%s" % ( "{name:\"envoy.transport_sockets.tls\",typed_config:{", - "\"@type\":\"type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext\",", + "\"@type\":\"type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\",", "common_tls_context:{tls_params:{cipher_suites:[\"-ALL:%s\"]}}}}") for cipher in [ @@ -753,3 +794,26 @@ def test_client_cli_bad_uri(http_test_server_fixture): expect_failure=True, as_json=False) assert "Invalid target URI" in err + + +@pytest.mark.parametrize('server_config', + ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) +def test_drain(https_test_server_fixture): + """Test that the pool drain timeout is effective, and we terminate in a timely fashion. + + Sets up the test server to delay replies 100 seconds. Our execution will only last 3 seconds, so we + expect to observe no replies. Termination should be cut short by the drain timeout, which means + that we should have results in approximately execution duration + drain timeout = 8 seconds. + (the pool drain timeout is hard coded to 5 seconds as of writing this). + """ + t0 = time.time() + parsed_json, _ = https_test_server_fixture.runNighthawkClient([ + https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "3", + "--request-header", "x-nighthawk-test-server-config: {static_delay: \"100s\"}" + ]) + t1 = time.time() + time_delta = t1 - t0 + counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) + assert time_delta < 40 # *lots* of slack to avoid failure in slow CI executions. + asserts.assertCounterGreaterEqual(counters, "upstream_cx_http1_total", 1) + asserts.assertNotIn("benchmark.http_2xx", counters) diff --git a/test/integration/test_request_source_plugin.py b/test/integration/test_request_source_plugin.py new file mode 100644 index 000000000..7609841c2 --- /dev/null +++ b/test/integration/test_request_source_plugin.py @@ -0,0 +1,59 @@ +"""Tests for the nighthawk_service binary.""" + +import pytest +import os + +from test.integration.integration_test_fixtures import (http_test_server_fixture, server_config) +from test.integration import utility +from test.integration import asserts + + +@pytest.mark.parametrize( + "request_source_config,expected_min,expected_max", + [ + pytest.param(""" + { + name:"nighthawk.in-line-options-list-request-source-plugin", + typed_config:{ + "@type":"type.googleapis.com/nighthawk.request_source.InLineOptionsListRequestSourceConfig", + options_list:{ + options:[ + {request_method:"1",request_body_size:"1",request_headers:[{header:{"key":"x-nighthawk-test-server-config","value":"{response_body_size:13}"}}]}, + {request_method:"1",request_body_size:"2",request_headers:[{header:{"key":"x-nighthawk-test-server-config","value":"{response_body_size:17}"}}]}, + ] + }, + } + }""", + 13, + 17, + id="in-line"), + pytest.param(""" + { + name:"nighthawk.file-based-request-source-plugin", + typed_config:{ + "@type":"type.googleapis.com/nighthawk.request_source.FileBasedOptionsListRequestSourceConfig", + file_path:"%s", + } + }""" % (os.path.dirname(os.path.abspath(os.path.dirname(__file__))) + + "/request_source/test_data/test-config.yaml"), + 13, + 17, + id="file-based"), + ], +) +def test_request_source_plugin_happy_flow_parametrized(http_test_server_fixture, + request_source_config, expected_min, + expected_max): + """Test that the nighthawkClient can run with request-source-plugin option.""" + parsed_json, _ = http_test_server_fixture.runNighthawkClient([ + "--termination-predicate", "benchmark.http_2xx:5", "--rps 10", + "--request-source-plugin-config %s" % request_source_config, + http_test_server_fixture.getTestServerRootUri(), "--request-header", "host: sni.com" + ]) + counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) + global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson(parsed_json) + asserts.assertGreaterEqual(counters["benchmark.http_2xx"], 5) + asserts.assertEqual(int(global_histograms["benchmark_http_client.response_body_size"]["raw_max"]), + expected_max) + asserts.assertEqual(int(global_histograms["benchmark_http_client.response_body_size"]["raw_min"]), + expected_min) diff --git a/test/integration/unit_tests/BUILD b/test/integration/unit_tests/BUILD new file mode 100644 index 000000000..faaefdd23 --- /dev/null +++ b/test/integration/unit_tests/BUILD @@ -0,0 +1,11 @@ +load("@rules_python//python:defs.bzl", "py_test") + +licenses(["notice"]) # Apache 2 + +py_test( + name = "test_nighthawk_test_server", + srcs = ["test_nighthawk_test_server.py"], + deps = [ + "//test/integration:integration_test_base_lean", + ], +) diff --git a/test/integration/unit_tests/test_nighthawk_test_server.py b/test/integration/unit_tests/test_nighthawk_test_server.py new file mode 100644 index 000000000..cd7a25e83 --- /dev/null +++ b/test/integration/unit_tests/test_nighthawk_test_server.py @@ -0,0 +1,139 @@ +"""Contains unit tests for functions in nighthawk_test_server.py.""" + +import pytest +import re + +from test.integration import nighthawk_test_server + + +def test_extractWarningsAndErrors_nothing_on_empty_output(): + """Test with an empty input.""" + warnings, errors = nighthawk_test_server._extractWarningsAndErrors("", "test_case", []) + assert not warnings + assert not errors + + +def test_extractWarningsAndErrors_ignores_info_logs(): + """Test where the process output doesn't contain any warnings or errors.""" + process_output = """ + [2020-12-01 04:41:57.219][126][info][misc] Message. + """ + warnings, errors = nighthawk_test_server._extractWarningsAndErrors(process_output, "test_case", + []) + assert not warnings + assert not errors + + +def test_extractWarningsAndErrors_extracts_a_warning(): + """Test where the process output contains a single warning.""" + process_output = "[2020-12-01 04:41:57.219][126][warning][misc] Message." + warnings, errors = nighthawk_test_server._extractWarningsAndErrors(process_output, "test_case", + []) + assert warnings == ["[2020-12-01 04:41:57.219][126][warning][misc] Message."] + assert not errors + + +def test_extractWarningsAndErrors_extracts_an_error(): + """Test where the process output contains a single error.""" + process_output = "[2020-12-01 04:41:57.219][126][error][misc] Message." + warnings, errors = nighthawk_test_server._extractWarningsAndErrors(process_output, "test_case", + []) + assert not warnings + assert errors == ["[2020-12-01 04:41:57.219][126][error][misc] Message."] + + +def test_extractWarningsAndErrors_extracts_multiple_messages(): + """Test where the process output contains multiple warnings and errors.""" + process_output = """[warning][misc] Warning1. +[error][misc] Error1. +[info][misc] Info1. +[error][runtime] Error2. +[warning][runtime] Warning2. + """ + warnings, errors = nighthawk_test_server._extractWarningsAndErrors(process_output, "test_case", + []) + assert warnings == ["[warning][misc] Warning1.", "[warning][runtime] Warning2."] + assert errors == ["[error][misc] Error1.", "[error][runtime] Error2."] + + +def test_extractWarningsAndErrors_skips_messages_matching_ignore_list_when_test_case_matched_with_a_glob( +): + """Test where the ignore list is used.""" + process_output = """[warning][misc] Warning1 foo. +[error][misc] Error1 bar. +[info][misc] Info1. +[error][runtime] Error2 baz. +[warning][runtime] Warning2 bar. + """ + + ignore_list = [ + nighthawk_test_server._TestCaseWarnErrorIgnoreList(re.compile(".*"), ("foo", "bar")), + ] + warnings, errors = nighthawk_test_server._extractWarningsAndErrors(process_output, "test_case", + ignore_list) + assert not warnings + assert errors == ["[error][runtime] Error2 baz."] + + +def test_extractWarningsAndErrors_skips_messages_matching_ignore_list_when_test_case_matched_exactly( +): + """Test where the ignore list is used.""" + process_output = """[warning][misc] Warning1 foo. +[error][misc] Error1 bar. +[info][misc] Info1. +[error][runtime] Error2 baz. +[warning][runtime] Warning2 bar. + """ + + ignore_list = [ + nighthawk_test_server._TestCaseWarnErrorIgnoreList(re.compile("test_case"), ("foo", "bar")), + ] + warnings, errors = nighthawk_test_server._extractWarningsAndErrors(process_output, "test_case", + ignore_list) + assert not warnings + assert errors == ["[error][runtime] Error2 baz."] + + +def test_extractWarningsAndErrors_does_not_apply_ignore_list_for_non_matching_test_case_name(): + """Test where the ignore list is used.""" + process_output = """[warning][misc] Warning1 foo. +[error][misc] Error1 bar. +[info][misc] Info1. +[error][runtime] Error2 baz. +[warning][runtime] Warning2 bar. + """ + + ignore_list = [ + nighthawk_test_server._TestCaseWarnErrorIgnoreList(re.compile("test_case1"), ("foo",)), + nighthawk_test_server._TestCaseWarnErrorIgnoreList(re.compile("test_case2"), ("bar",)), + ] + warnings, errors = nighthawk_test_server._extractWarningsAndErrors(process_output, "test_case1", + ignore_list) + assert warnings == ["[warning][runtime] Warning2 bar."] + assert errors == [ + "[error][misc] Error1 bar.", + "[error][runtime] Error2 baz.", + ] + + +def test_extractWarningsAndErrors_applies_all_matching_ignore_lists(): + """Test where the ignore list is used.""" + process_output = """[warning][misc] Warning1 foo. +[error][misc] Error1 bar. +[info][misc] Info1. +[error][runtime] Error2 baz. +[warning][runtime] Warning2 bar. + """ + + ignore_list = [ + nighthawk_test_server._TestCaseWarnErrorIgnoreList(re.compile("test_case1"), ("foo",)), + nighthawk_test_server._TestCaseWarnErrorIgnoreList(re.compile(".*"), ("bar",)), + ] + warnings, errors = nighthawk_test_server._extractWarningsAndErrors(process_output, "test_case1", + ignore_list) + assert not warnings + assert errors == ["[error][runtime] Error2 baz."] + + +if __name__ == "__main__": + raise SystemExit(pytest.main([__file__])) diff --git a/test/mocks/adaptive_load/BUILD b/test/mocks/adaptive_load/BUILD index ff68762f6..44402c141 100644 --- a/test/mocks/adaptive_load/BUILD +++ b/test/mocks/adaptive_load/BUILD @@ -8,6 +8,16 @@ licenses(["notice"]) # Apache 2 envoy_package() +envoy_cc_mock( + name = "mock_adaptive_load_controller", + srcs = ["mock_adaptive_load_controller.cc"], + hdrs = ["mock_adaptive_load_controller.h"], + repository = "@envoy", + deps = [ + "//include/nighthawk/adaptive_load:adaptive_load_controller", + ], +) + envoy_cc_mock( name = "mock_metrics_evaluator", srcs = ["mock_metrics_evaluator.cc"], diff --git a/test/mocks/adaptive_load/mock_adaptive_load_controller.cc b/test/mocks/adaptive_load/mock_adaptive_load_controller.cc new file mode 100644 index 000000000..a0f0578dd --- /dev/null +++ b/test/mocks/adaptive_load/mock_adaptive_load_controller.cc @@ -0,0 +1,7 @@ +#include "test/mocks/adaptive_load/mock_adaptive_load_controller.h" + +namespace Nighthawk { + +MockAdaptiveLoadController::MockAdaptiveLoadController() = default; + +} // namespace Nighthawk diff --git a/test/mocks/adaptive_load/mock_adaptive_load_controller.h b/test/mocks/adaptive_load/mock_adaptive_load_controller.h new file mode 100644 index 000000000..0a0be0d28 --- /dev/null +++ b/test/mocks/adaptive_load/mock_adaptive_load_controller.h @@ -0,0 +1,33 @@ +#pragma once + +#include "nighthawk/adaptive_load/adaptive_load_controller.h" + +#include "gmock/gmock.h" + +namespace Nighthawk { + +/** + * A mock AdaptiveLoadController that returns empty values or success from all methods + * by default. + * + * + * Typical usage: + * + * NiceMock mock_controller; + * EXPECT_CALL(mock_controller, PerformAdaptiveLoadSession(_)) + * .WillOnce(Return(AdaptiveLoadSessionOutput())); + */ +class MockAdaptiveLoadController : public AdaptiveLoadController { +public: + /** + * Empty constructor. + */ + MockAdaptiveLoadController(); + + MOCK_METHOD(absl::StatusOr, + PerformAdaptiveLoadSession, + (nighthawk::client::NighthawkService::StubInterface * nighthawk_service_stub, + const nighthawk::adaptive_load::AdaptiveLoadSessionSpec& spec)); +}; + +} // namespace Nighthawk diff --git a/test/mocks/client/mock_benchmark_client.h b/test/mocks/client/mock_benchmark_client.h index c8d2ba9a3..15b1babea 100644 --- a/test/mocks/client/mock_benchmark_client.h +++ b/test/mocks/client/mock_benchmark_client.h @@ -11,13 +11,13 @@ class MockBenchmarkClient : public BenchmarkClient { public: MockBenchmarkClient(); - MOCK_METHOD0(terminate, void()); - MOCK_METHOD1(setShouldMeasureLatencies, void(bool)); - MOCK_CONST_METHOD0(statistics, StatisticPtrMap()); - MOCK_METHOD1(tryStartRequest, bool(Client::CompletionCallback)); - MOCK_CONST_METHOD0(scope, Envoy::Stats::Scope&()); - MOCK_CONST_METHOD0(shouldMeasureLatencies, bool()); - MOCK_CONST_METHOD0(requestHeaders, const Envoy::Http::RequestHeaderMap&()); + MOCK_METHOD(void, terminate, ()); + MOCK_METHOD(void, setShouldMeasureLatencies, (bool)); + MOCK_METHOD(StatisticPtrMap, statistics, (), (const)); + MOCK_METHOD(bool, tryStartRequest, (Client::CompletionCallback)); + MOCK_METHOD(Envoy::Stats::Scope&, scope, (), (const)); + MOCK_METHOD(bool, shouldMeasureLatencies, (), (const)); + MOCK_METHOD(const Envoy::Http::RequestHeaderMap&, requestHeaders, (), (const)); }; } // namespace Client diff --git a/test/mocks/client/mock_options.h b/test/mocks/client/mock_options.h index 258904cd5..a6e85d42c 100644 --- a/test/mocks/client/mock_options.h +++ b/test/mocks/client/mock_options.h @@ -36,6 +36,8 @@ class MockOptions : public Options { MOCK_CONST_METHOD0(sequencerIdleStrategy, nighthawk::client::SequencerIdleStrategy::SequencerIdleStrategyOptions()); MOCK_CONST_METHOD0(requestSource, std::string()); + MOCK_CONST_METHOD0(requestSourcePluginConfig, + absl::optional&()); MOCK_CONST_METHOD0(trace, std::string()); MOCK_CONST_METHOD0( h1ConnectionReuseStrategy, @@ -55,6 +57,8 @@ class MockOptions : public Options { MOCK_CONST_METHOD0(statsSinks, std::vector()); MOCK_CONST_METHOD0(statsFlushInterval, uint32_t()); MOCK_CONST_METHOD0(responseHeaderWithLatencyInput, std::string()); + MOCK_CONST_METHOD0(allowEnvoyDeprecatedV2Api, bool()); + MOCK_CONST_METHOD0(scheduled_start, absl::optional()); }; } // namespace Client diff --git a/test/mocks/common/mock_rate_limiter.h b/test/mocks/common/mock_rate_limiter.h index a07062b47..71e02a40a 100644 --- a/test/mocks/common/mock_rate_limiter.h +++ b/test/mocks/common/mock_rate_limiter.h @@ -10,18 +10,19 @@ class MockRateLimiter : public RateLimiter { public: MockRateLimiter(); - MOCK_METHOD0(tryAcquireOne, bool()); - MOCK_METHOD0(releaseOne, void()); - MOCK_METHOD0(timeSource, Envoy::TimeSource&()); - MOCK_METHOD0(elapsed, std::chrono::nanoseconds()); + MOCK_METHOD(bool, tryAcquireOne, ()); + MOCK_METHOD(void, releaseOne, ()); + MOCK_METHOD(Envoy::TimeSource&, timeSource, ()); + MOCK_METHOD(std::chrono::nanoseconds, elapsed, ()); + MOCK_METHOD(absl::optional, firstAcquisitionTime, (), (const)); }; class MockDiscreteNumericDistributionSampler : public DiscreteNumericDistributionSampler { public: MockDiscreteNumericDistributionSampler(); - MOCK_METHOD0(getValue, uint64_t()); - MOCK_CONST_METHOD0(min, uint64_t()); - MOCK_CONST_METHOD0(max, uint64_t()); + MOCK_METHOD(uint64_t, getValue, ()); + MOCK_METHOD(uint64_t, min, (), (const)); + MOCK_METHOD(uint64_t, max, (), (const)); }; } // namespace Nighthawk diff --git a/test/mocks/common/mock_request_source.h b/test/mocks/common/mock_request_source.h index ab3b7694a..268cc8ebd 100644 --- a/test/mocks/common/mock_request_source.h +++ b/test/mocks/common/mock_request_source.h @@ -9,8 +9,8 @@ namespace Nighthawk { class MockRequestSource : public RequestSource { public: MockRequestSource(); - MOCK_METHOD0(get, RequestGenerator()); - MOCK_METHOD0(initOnThread, void()); + MOCK_METHOD(RequestGenerator, get, ()); + MOCK_METHOD(void, initOnThread, ()); }; } // namespace Nighthawk \ No newline at end of file diff --git a/test/mocks/common/mock_sequencer.h b/test/mocks/common/mock_sequencer.h index 52014362b..dd93205a1 100644 --- a/test/mocks/common/mock_sequencer.h +++ b/test/mocks/common/mock_sequencer.h @@ -1,5 +1,6 @@ #pragma once +#include "nighthawk/common/rate_limiter.h" #include "nighthawk/common/sequencer.h" #include "gmock/gmock.h" @@ -10,12 +11,13 @@ class MockSequencer : public Sequencer { public: MockSequencer(); - MOCK_METHOD0(start, void()); - MOCK_METHOD0(waitForCompletion, void()); - MOCK_CONST_METHOD0(completionsPerSecond, double()); - MOCK_CONST_METHOD0(executionDuration, std::chrono::nanoseconds()); - MOCK_CONST_METHOD0(statistics, StatisticPtrMap()); - MOCK_METHOD0(cancel, void()); + MOCK_METHOD(void, start, ()); + MOCK_METHOD(void, waitForCompletion, ()); + MOCK_METHOD(double, completionsPerSecond, (), (const)); + MOCK_METHOD(std::chrono::nanoseconds, executionDuration, (), (const)); + MOCK_METHOD(StatisticPtrMap, statistics, (), (const)); + MOCK_METHOD(void, cancel, ()); + MOCK_METHOD(RateLimiter&, rate_limiter, (), (const)); }; } // namespace Nighthawk \ No newline at end of file diff --git a/test/mocks/common/mock_sequencer_factory.h b/test/mocks/common/mock_sequencer_factory.h index 96983e24f..63c972f26 100644 --- a/test/mocks/common/mock_sequencer_factory.h +++ b/test/mocks/common/mock_sequencer_factory.h @@ -14,7 +14,7 @@ class MockSequencerFactory : public SequencerFactory { const SequencerTarget& sequencer_target, TerminationPredicatePtr&& termination_predicate, Envoy::Stats::Scope& scope, - const Envoy::SystemTime scheduled_starting_time)); + const Envoy::MonotonicTime scheduled_starting_time)); }; } // namespace Nighthawk \ No newline at end of file diff --git a/test/mocks/common/mock_termination_predicate.h b/test/mocks/common/mock_termination_predicate.h index 91077148e..da5c929ed 100644 --- a/test/mocks/common/mock_termination_predicate.h +++ b/test/mocks/common/mock_termination_predicate.h @@ -9,10 +9,10 @@ namespace Nighthawk { class MockTerminationPredicate : public TerminationPredicate { public: MockTerminationPredicate(); - MOCK_METHOD1(link, TerminationPredicate&(TerminationPredicatePtr&&)); - MOCK_METHOD1(appendToChain, TerminationPredicate&(TerminationPredicatePtr&&)); - MOCK_METHOD0(evaluateChain, TerminationPredicate::Status()); - MOCK_METHOD0(evaluate, TerminationPredicate::Status()); + MOCK_METHOD(TerminationPredicate&, link, (TerminationPredicatePtr && p)); + MOCK_METHOD(TerminationPredicate&, appendToChain, (TerminationPredicatePtr && p)); + MOCK_METHOD(TerminationPredicate::Status, evaluateChain, ()); + MOCK_METHOD(TerminationPredicate::Status, evaluate, ()); }; } // namespace Nighthawk \ No newline at end of file diff --git a/test/mocks/common/mock_termination_predicate_factory.h b/test/mocks/common/mock_termination_predicate_factory.h index 23aed4bf2..e37e8f128 100644 --- a/test/mocks/common/mock_termination_predicate_factory.h +++ b/test/mocks/common/mock_termination_predicate_factory.h @@ -12,7 +12,7 @@ class MockTerminationPredicateFactory : public TerminationPredicateFactory { MOCK_CONST_METHOD3(create, TerminationPredicatePtr(Envoy::TimeSource& time_source, Envoy::Stats::Scope& scope, - const Envoy::SystemTime scheduled_starting_time)); + const Envoy::MonotonicTime scheduled_starting_time)); }; } // namespace Nighthawk \ No newline at end of file diff --git a/test/options_test.cc b/test/options_test.cc index ddbb80595..a1f6c143b 100644 --- a/test/options_test.cc +++ b/test/options_test.cc @@ -3,6 +3,7 @@ #include "client/options_impl.h" #include "test/client/utility.h" +#include "test/test_common/environment.h" #include "gtest/gtest.h" @@ -30,7 +31,6 @@ class OptionsImplTest : public Test { EXPECT_EQ(expected_key, headers[0].header().key()); EXPECT_EQ(expected_value, headers[0].header().value()); } - std::string client_name_; std::string good_test_uri_; std::string no_arg_match_; @@ -118,10 +118,11 @@ TEST_F(OptionsImplTest, AlmostAll) { "--experimental-h2-use-multiple-connections " "--experimental-h1-connection-reuse-strategy lru --label label1 --label label2 {} " "--simple-warmup --stats-sinks {} --stats-sinks {} --stats-flush-interval 10 " - "--latency-response-header-name zz", + "--latency-response-header-name zz --allow-envoy-deprecated-v2-api", client_name_, "{name:\"envoy.transport_sockets.tls\"," - "typed_config:{\"@type\":\"type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext\"," + "typed_config:{\"@type\":\"type.googleapis.com/" + "envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\"," "common_tls_context:{tls_params:{" "cipher_suites:[\"-ALL:ECDHE-RSA-AES256-GCM-SHA384\"]}}}}", good_test_uri_, sink_json_1, sink_json_2)); @@ -142,18 +143,19 @@ TEST_F(OptionsImplTest, AlmostAll) { const std::vector expected_headers = {"f1:b1", "f2:b2", "f3:b3:b4"}; EXPECT_EQ(expected_headers, options->requestHeaders()); EXPECT_EQ(1234, options->requestBodySize()); - EXPECT_EQ("name: \"envoy.transport_sockets.tls\"\n" - "typed_config {\n" - " [type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext] {\n" - " common_tls_context {\n" - " tls_params {\n" - " cipher_suites: \"-ALL:ECDHE-RSA-AES256-GCM-SHA384\"\n" - " }\n" - " }\n" - " }\n" - "}\n" - "183412668: \"envoy.api.v2.core.TransportSocket\"\n", - options->transportSocket().value().DebugString()); + EXPECT_EQ( + "name: \"envoy.transport_sockets.tls\"\n" + "typed_config {\n" + " [type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext] {\n" + " common_tls_context {\n" + " tls_params {\n" + " cipher_suites: \"-ALL:ECDHE-RSA-AES256-GCM-SHA384\"\n" + " }\n" + " }\n" + " }\n" + "}\n" + "183412668: \"envoy.api.v2.core.TransportSocket\"\n", + options->transportSocket().value().DebugString()); EXPECT_EQ(10, options->maxPendingRequests()); EXPECT_EQ(11, options->maxActiveRequests()); EXPECT_EQ(12, options->maxRequestsPerConnection()); @@ -191,6 +193,7 @@ TEST_F(OptionsImplTest, AlmostAll) { "183412668: \"envoy.config.metrics.v2.StatsSink\"\n", options->statsSinks()[1].DebugString()); EXPECT_EQ("zz", options->responseHeaderWithLatencyInput()); + EXPECT_TRUE(options->allowEnvoyDeprecatedV2Api()); // Check that our conversion to CommandLineOptionsPtr makes sense. CommandLineOptionsPtr cmd = options->toCommandLineOptions(); @@ -249,7 +252,9 @@ TEST_F(OptionsImplTest, AlmostAll) { EXPECT_TRUE(util(cmd->stats_sinks(0), options->statsSinks()[0])); EXPECT_TRUE(util(cmd->stats_sinks(1), options->statsSinks()[1])); EXPECT_EQ(cmd->latency_response_header_name().value(), options->responseHeaderWithLatencyInput()); - + ASSERT_TRUE(cmd->has_allow_envoy_deprecated_v2_api()); + EXPECT_EQ(cmd->allow_envoy_deprecated_v2_api().value(), options->allowEnvoyDeprecatedV2Api()); + // TODO(#433) Here and below, replace comparisons once we choose a proto diff. OptionsImpl options_from_proto(*cmd); std::string s1 = Envoy::MessageUtil::getYamlStringFromMessage( *(options_from_proto.toCommandLineOptions()), true, true); @@ -272,10 +277,122 @@ TEST_F(OptionsImplTest, RequestSource) { // Check that our conversion to CommandLineOptionsPtr makes sense. CommandLineOptionsPtr cmd = options->toCommandLineOptions(); EXPECT_EQ(cmd->request_source().uri(), request_source); + // TODO(#433) OptionsImpl options_from_proto(*cmd); EXPECT_TRUE(util(*(options_from_proto.toCommandLineOptions()), *cmd)); } +class RequestSourcePluginTestFixture : public OptionsImplTest, + public WithParamInterface {}; +TEST_P(RequestSourcePluginTestFixture, CreatesOptionsImplWithRequestSourceConfig) { + Envoy::MessageUtil util; + const std::string request_source_config = GetParam(); + std::unique_ptr options = TestUtility::createOptionsImpl( + fmt::format("{} --request-source-plugin-config {} {}", client_name_, request_source_config, + good_test_uri_)); + + CommandLineOptionsPtr command = options->toCommandLineOptions(); + EXPECT_TRUE( + util(command->request_source_plugin_config(), options->requestSourcePluginConfig().value())); + + // The predicates are defined as proto maps, and these seem to re-serialize into a different + // order. Hence we trim the maps to contain a single entry so they don't thwart our textual + // comparison below. + EXPECT_EQ(1, command->mutable_failure_predicates()->erase("benchmark.http_4xx")); + EXPECT_EQ(1, command->mutable_failure_predicates()->erase("benchmark.http_5xx")); + EXPECT_EQ(1, command->mutable_failure_predicates()->erase("requestsource.upstream_rq_5xx")); + + // TODO(#433) + // Now we construct a new options from the proto we created above. This should result in an + // OptionsImpl instance equivalent to options. We test that by converting both to yaml strings, + // expecting them to be equal. This should provide helpful output when the test fails by showing + // the unexpected (yaml) diff. + OptionsImpl options_from_proto(*command); + std::string yaml_for_options_proto = Envoy::MessageUtil::getYamlStringFromMessage( + *(options_from_proto.toCommandLineOptions()), true, true); + std::string yaml_for_command = Envoy::MessageUtil::getYamlStringFromMessage(*command, true, true); + EXPECT_EQ(yaml_for_options_proto, yaml_for_command); + // Additional comparison to avoid edge cases missed. + EXPECT_TRUE(util(*(options_from_proto.toCommandLineOptions()), *command)); +} +std::vector RequestSourcePluginJsons() { + std::string file_request_source_plugin_json = + "{" + R"(name:"nighthawk.file-based-request-source-plugin",)" + "typed_config:{" + R"("@type":"type.googleapis.com/)" + R"(nighthawk.request_source.FileBasedOptionsListRequestSourceConfig",)" + R"(file_path:")" + + TestEnvironment::runfilesPath("test/request_source/test_data/test-config.yaml") + + "\"," + "}" + "}"; + std::string in_line_request_source_plugin_json = + "{" + R"(name:"nighthawk.in-line-options-list-request-source-plugin",)" + "typed_config:{" + R"("@type":"type.googleapis.com/)" + R"(nighthawk.request_source.InLineOptionsListRequestSourceConfig",)" + "options_list:{" + R"(options:[{request_method:"1",request_headers:[{header:{key:"key",value:"value"}}]}])" + "}," + "}" + "}"; + std::string stub_request_source_plugin_json = + "{" + R"(name:"nighthawk.stub-request-source-plugin",)" + "typed_config:{" + R"("@type":"type.googleapis.com/nighthawk.request_source.StubPluginConfig",)" + R"(test_value:"3",)" + "}" + "}"; + return std::vector{ + file_request_source_plugin_json, + in_line_request_source_plugin_json, + stub_request_source_plugin_json, + }; +} +INSTANTIATE_TEST_SUITE_P(HappyPathRequestSourceConfigJsonSuccessfullyTranslatesIntoOptions, + RequestSourcePluginTestFixture, + ::testing::ValuesIn(RequestSourcePluginJsons())); + +// This test covers --RequestSourcePlugin, which can't be tested at the same time as --RequestSource +// and some other options. This is the test for the inlineoptionslistplugin. +TEST_F(OptionsImplTest, InLineOptionsListRequestSourcePluginIsMutuallyExclusiveWithRequestSource) { + const std::string request_source = "127.9.9.4:32323"; + const std::string request_source_config = + "{" + "name:\"nighthawk.in-line-options-list-request-source-plugin\"," + "typed_config:{" + "\"@type\":\"type.googleapis.com/" + "nighthawk.request_source.InLineOptionsListRequestSourceConfig\"," + "options_list:{" + "options:[{request_method:\"1\",request_headers:[{header:{key:\"key\",value:\"value\"}}]}]" + "}," + "}" + "}"; + EXPECT_THROW_WITH_REGEX( + TestUtility::createOptionsImpl( + fmt::format("{} --request-source-plugin-config {} --request-source {} {}", client_name_, + request_source_config, request_source, good_test_uri_)), + MalformedArgvException, + "--request-source and --request_source_plugin_config cannot both be set."); +} + +TEST_F(OptionsImplTest, BadRequestSourcePluginSpecification) { + // Bad JSON + EXPECT_THROW_WITH_REGEX( + TestUtility::createOptionsImpl(fmt::format("{} --request-source-plugin-config {} {}", + client_name_, "{broken_json:", good_test_uri_)), + MalformedArgvException, "Unable to parse JSON as proto"); + // Correct JSON, but contents not according to spec. + EXPECT_THROW_WITH_REGEX(TestUtility::createOptionsImpl( + fmt::format("{} --request-source-plugin-config {} {}", client_name_, + "{misspelled_field:{}}", good_test_uri_)), + MalformedArgvException, + "envoy.config.core.v3.TypedExtensionConfig reason INVALID_ARGUMENT"); +} + // We test --no-duration here and not in All above because it is exclusive to --duration. TEST_F(OptionsImplTest, NoDuration) { Envoy::MessageUtil util; @@ -284,6 +401,7 @@ TEST_F(OptionsImplTest, NoDuration) { EXPECT_TRUE(options->noDuration()); // Check that our conversion to CommandLineOptionsPtr makes sense. CommandLineOptionsPtr cmd = options->toCommandLineOptions(); + // TODO(#433) OptionsImpl options_from_proto(*cmd); EXPECT_TRUE(util(*(options_from_proto.toCommandLineOptions()), *cmd)); } @@ -324,7 +442,7 @@ TEST_F(OptionsImplTest, TlsContext) { EXPECT_EQ(1, cmd->mutable_failure_predicates()->erase("benchmark.http_4xx")); EXPECT_EQ(1, cmd->mutable_failure_predicates()->erase("benchmark.http_5xx")); EXPECT_EQ(1, cmd->mutable_failure_predicates()->erase("requestsource.upstream_rq_5xx")); - + // TODO(#433) OptionsImpl options_from_proto(*cmd); std::string s1 = Envoy::MessageUtil::getYamlStringFromMessage( *(options_from_proto.toCommandLineOptions()), true, true); @@ -386,7 +504,7 @@ TEST_F(OptionsImplTest, MultiTarget) { EXPECT_EQ(1, cmd->mutable_failure_predicates()->erase("benchmark.http_4xx")); EXPECT_EQ(1, cmd->mutable_failure_predicates()->erase("benchmark.http_5xx")); EXPECT_EQ(1, cmd->mutable_failure_predicates()->erase("requestsource.upstream_rq_5xx")); - + // TODO(#433) OptionsImpl options_from_proto(*cmd); std::string s1 = Envoy::MessageUtil::getYamlStringFromMessage( *(options_from_proto.toCommandLineOptions()), true, true); @@ -479,6 +597,22 @@ TEST_F(OptionsImplTest, PrefetchConnectionsFlag) { MalformedArgvException, "Couldn't find match for argument"); } +TEST_F(OptionsImplTest, AllowEnvoyDeprecatedV2ApiFlag) { + EXPECT_FALSE(TestUtility::createOptionsImpl(fmt::format("{} {}", client_name_, good_test_uri_)) + ->allowEnvoyDeprecatedV2Api()); + EXPECT_TRUE(TestUtility::createOptionsImpl(fmt::format("{} --allow-envoy-deprecated-v2-api {}", + client_name_, good_test_uri_)) + ->allowEnvoyDeprecatedV2Api()); + EXPECT_THROW_WITH_REGEX( + TestUtility::createOptionsImpl( + fmt::format("{} --allow-envoy-deprecated-v2-api 0 {}", client_name_, good_test_uri_)), + MalformedArgvException, "Couldn't find match for argument"); + EXPECT_THROW_WITH_REGEX( + TestUtility::createOptionsImpl( + fmt::format("{} --allow-envoy-deprecated-v2-api true {}", client_name_, good_test_uri_)), + MalformedArgvException, "Couldn't find match for argument"); +} + // Test --concurrency, which is a bit special. It's an int option, which also accepts 'auto' as // a value. We need to implement some stuff ourselves to get this to work, hence we don't run it // through the OptionsImplIntTest. diff --git a/test/output_formatter_test.cc b/test/output_formatter_test.cc index 8e23185b7..9b334fcc7 100644 --- a/test/output_formatter_test.cc +++ b/test/output_formatter_test.cc @@ -85,9 +85,9 @@ class OutputCollectorTest : public Test { void setupCollector() { collector_ = std::make_unique(time_system_, options_); - collector_->addResult("worker_0", statistics_, counters_, 1s); - collector_->addResult("worker_1", statistics_, counters_, 1s); - collector_->addResult("global", statistics_, counters_, 1s); + collector_->addResult("worker_0", statistics_, counters_, 1s, time_system_.systemTime()); + collector_->addResult("worker_1", statistics_, counters_, 1s, absl::nullopt); + collector_->addResult("global", statistics_, counters_, 1s, time_system_.systemTime()); } nighthawk::client::CommandLineOptions command_line_options_; diff --git a/test/output_transform_main_test.cc b/test/output_transform_main_test.cc index 43df89261..2e5c618c3 100644 --- a/test/output_transform_main_test.cc +++ b/test/output_transform_main_test.cc @@ -64,7 +64,7 @@ TEST_F(OutputTransformMainTest, HappyFlowForAllOutputFormats) { output.add_results()->set_name("global"); } output.mutable_options()->mutable_uri()->set_value("http://127.0.0.1/"); - stream_ << Envoy::MessageUtil::getJsonStringFromMessage(output, true, true); + stream_ << Envoy::MessageUtil::getJsonStringFromMessageOrDie(output, true, true); OutputTransformMain main(argv.size(), argv.data(), stream_); EXPECT_EQ(main.run(), 0); } diff --git a/test/process_test.cc b/test/process_test.cc index e0bae9a71..4296ef770 100644 --- a/test/process_test.cc +++ b/test/process_test.cc @@ -1,3 +1,4 @@ +#include #include #include @@ -6,7 +7,9 @@ #include "external/envoy/test/test_common/environment.h" #include "external/envoy/test/test_common/network_utility.h" #include "external/envoy/test/test_common/registry.h" +#include "external/envoy/test/test_common/simulated_time_system.h" #include "external/envoy/test/test_common/utility.h" +#include "external/envoy_api/envoy/config/bootstrap/v3/bootstrap.pb.h" #include "common/uri_impl.h" @@ -178,6 +181,117 @@ TEST_P(ProcessTest, NoFlushWhenCancelExecutionBeforeLoadTestBegin) { EXPECT_EQ(numFlushes, 0); } +TEST(RuntimeConfiguration, allowEnvoyDeprecatedV2Api) { + envoy::config::bootstrap::v3::Bootstrap bootstrap; + EXPECT_EQ(bootstrap.DebugString(), ""); + ProcessImpl::allowEnvoyDeprecatedV2Api(bootstrap); + std::cerr << bootstrap.DebugString() << std::endl; + EXPECT_EQ(bootstrap.DebugString(), R"EOF(layered_runtime { + layers { + name: "admin layer" + admin_layer { + } + } + layers { + name: "static_layer" + static_layer { + fields { + key: "envoy.reloadable_features.allow_prefetch" + value { + string_value: "true" + } + } + fields { + key: "envoy.reloadable_features.enable_deprecated_v2_api" + value { + string_value: "true" + } + } + } + } +} +)EOF"); +} + +/** + * Fixture for executing the Nighthawk process with simulated time. + */ +class ProcessTestWithSimTime : public Envoy::Event::TestUsingSimulatedTime, + public TestWithParam { +public: + ProcessTestWithSimTime() + : options_(TestUtility::createOptionsImpl( + fmt::format("foo --duration 1 -v error --failure-predicate foo:0 --rps 10 https://{}/", + Envoy::Network::Test::getLoopbackAddressUrlString(GetParam())))){}; + +protected: + void run(std::function verify_callback) { + auto run_thread = std::thread([this, &verify_callback] { + ProcessPtr process = std::make_unique(*options_, simTime()); + OutputCollectorImpl collector(simTime(), *options_); + const bool result = process->run(collector); + process->shutdown(); + verify_callback(result, collector.toProto()); + }); + + // We introduce real-world sleeps to give the executing ProcessImpl + // an opportunity to observe passage of simulated time. We increase simulated + // time in three steps, to give it an opportunity to start at the wrong time + // in case there is an error in the scheduling logic it implements. + // Note that these sleeps may seem excessively long, but sanitizer runs may need that. + sleep(1); + // Move time to 1 second before the scheduled execution time. + simTime().setSystemTime(options_->scheduled_start().value() - 1s); + sleep(1); + // Move time right up to the scheduled execution time. + simTime().setSystemTime(options_->scheduled_start().value()); + sleep(1); + // Move time past the scheduled execution time and execution duration. + simTime().setSystemTime(options_->scheduled_start().value() + 2s); + // Wait for execution to wrap up. + run_thread.join(); + } + + void setScheduleOnOptions(std::chrono::nanoseconds ns_since_epoch) { + CommandLineOptionsPtr command_line = options_->toCommandLineOptions(); + *(command_line->mutable_scheduled_start()) = + Envoy::Protobuf::util::TimeUtil::NanosecondsToTimestamp(ns_since_epoch.count()); + options_ = std::make_unique(*command_line); + } + + OptionsPtr options_; +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, ProcessTestWithSimTime, + ValuesIn(Envoy::TestEnvironment::getIpVersionsForTest()), + Envoy::TestUtility::ipTestParamsToString); + +// Verify that scheduling execution ahead of time works, and that the execution start timestamp +// associated to the worker result correctly reflects the scheduled time. This should be spot on +// because we use simulated time. +TEST_P(ProcessTestWithSimTime, ScheduleAheadWorks) { + for (const auto& relative_schedule : std::vector{30s, 1h}) { + setScheduleOnOptions( + std::chrono::nanoseconds(simTime().systemTime().time_since_epoch() + relative_schedule)); + run([this](bool success, const nighthawk::client::Output& output) { + EXPECT_TRUE(success); + ASSERT_EQ(output.results_size(), 1); + EXPECT_EQ(Envoy::ProtobufUtil::TimeUtil::TimestampToNanoseconds( + output.results()[0].execution_start()), + options_->scheduled_start().value().time_since_epoch().count()); + }); + } +} + +// Verify that scheduling an execution in the past yields an error. +TEST_P(ProcessTestWithSimTime, ScheduleInThePastFails) { + setScheduleOnOptions(std::chrono::nanoseconds(simTime().systemTime().time_since_epoch() - 1s)); + run([](bool success, const nighthawk::client::Output& output) { + EXPECT_FALSE(success); + EXPECT_EQ(output.results_size(), 0); + }); +} + } // namespace } // namespace Client } // namespace Nighthawk diff --git a/test/rate_limiter_test.cc b/test/rate_limiter_test.cc index 754bbd3bd..98f253365 100644 --- a/test/rate_limiter_test.cc +++ b/test/rate_limiter_test.cc @@ -65,7 +65,7 @@ TEST_F(RateLimiterTest, BurstingRateLimiterTest) { rate_limiter->releaseOne(); EXPECT_TRUE(rate_limiter->tryAcquireOne()); EXPECT_TRUE(rate_limiter->tryAcquireOne()); - EXPECT_CALL(unsafe_mock_rate_limiter, tryAcquireOne).Times(1).WillOnce(Return(false)); + EXPECT_CALL(unsafe_mock_rate_limiter, tryAcquireOne).WillOnce(Return(false)); EXPECT_FALSE(rate_limiter->tryAcquireOne()); } @@ -76,7 +76,8 @@ TEST_F(RateLimiterTest, ScheduledStartingRateLimiterTest) { // scheduled delay. This should be business as usual from a functional perspective, but internally // this rate limiter specializes on this case to log a warning message, and we want to cover that. for (const bool starting_late : std::vector{false, true}) { - const Envoy::SystemTime scheduled_starting_time = time_system.systemTime() + schedule_delay; + const Envoy::MonotonicTime scheduled_starting_time = + time_system.monotonicTime() + schedule_delay; std::unique_ptr mock_rate_limiter = std::make_unique(); MockRateLimiter& unsafe_mock_rate_limiter = *mock_rate_limiter; InSequence s; @@ -95,7 +96,7 @@ TEST_F(RateLimiterTest, ScheduledStartingRateLimiterTest) { } // We should expect zero releases until it is time to start. - while (time_system.systemTime() < scheduled_starting_time) { + while (time_system.monotonicTime() < scheduled_starting_time) { EXPECT_FALSE(rate_limiter->tryAcquireOne()); time_system.advanceTimeWait(1ms); } @@ -108,8 +109,8 @@ TEST_F(RateLimiterTest, ScheduledStartingRateLimiterTest) { TEST_F(RateLimiterTest, ScheduledStartingRateLimiterTestBadArgs) { Envoy::Event::SimulatedTimeSystem time_system; // Verify we enforce future-only scheduling. - for (const auto timing : - std::vector{time_system.systemTime(), time_system.systemTime() - 10ms}) { + for (const auto& timing : std::vector{time_system.monotonicTime(), + time_system.monotonicTime() - 10ms}) { std::unique_ptr mock_rate_limiter = std::make_unique(); MockRateLimiter& unsafe_mock_rate_limiter = *mock_rate_limiter; EXPECT_CALL(unsafe_mock_rate_limiter, timeSource) @@ -259,7 +260,7 @@ TEST_F(DistributionSamplingRateLimiterTest, ReleaseOneFunctionsWhenAcquired) { EXPECT_CALL(mock_inner_rate_limiter_, tryAcquireOne).WillOnce(Return(true)); EXPECT_CALL(mock_discrete_numeric_distribution_sampler_, getValue).WillOnce(Return(0)); EXPECT_TRUE(rate_limiter_->tryAcquireOne()); - EXPECT_CALL(mock_inner_rate_limiter_, releaseOne).Times(1); + EXPECT_CALL(mock_inner_rate_limiter_, releaseOne); rate_limiter_->releaseOne(); } @@ -420,7 +421,7 @@ TEST_F(RateLimiterTest, GraduallyOpeningRateLimiterFilterInvalidArgumentTest) { // Pass in a badly configured distribution sampler. auto bad_distribution_sampler = std::make_unique(); - EXPECT_CALL(*bad_distribution_sampler, min).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*bad_distribution_sampler, min).WillOnce(Return(0)); EXPECT_THROW( GraduallyOpeningRateLimiterFilter gorl(1s, std::move(bad_distribution_sampler), std::make_unique>()); @@ -428,8 +429,8 @@ TEST_F(RateLimiterTest, GraduallyOpeningRateLimiterFilterInvalidArgumentTest) { bad_distribution_sampler = std::make_unique(); // Correct min, but now introduce a bad max. - EXPECT_CALL(*bad_distribution_sampler, min).Times(1).WillOnce(Return(1)); - EXPECT_CALL(*bad_distribution_sampler, max).Times(1).WillOnce(Return(99)); + EXPECT_CALL(*bad_distribution_sampler, min).WillOnce(Return(1)); + EXPECT_CALL(*bad_distribution_sampler, max).WillOnce(Return(99)); EXPECT_THROW( GraduallyOpeningRateLimiterFilter gorl(1s, std::move(bad_distribution_sampler), std::make_unique>()); diff --git a/test/request_source/BUILD b/test/request_source/BUILD new file mode 100644 index 000000000..9b1e6bf18 --- /dev/null +++ b/test/request_source/BUILD @@ -0,0 +1,48 @@ +load( + "@envoy//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_cc_test_library", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_test_library( + name = "stub_plugin_impl", + srcs = [ + "stub_plugin_impl.cc", + ], + hdrs = [ + "stub_plugin_impl.h", + ], + repository = "@envoy", + deps = [ + "//include/nighthawk/request_source:request_source_plugin_config_factory_lib", + "//source/common:nighthawk_common_lib", + "//source/common:request_impl_lib", + "//source/common:request_source_impl_lib", + "@envoy//source/common/protobuf:message_validator_lib_with_external_headers", + "@envoy//source/common/protobuf:protobuf_with_external_headers", + "@envoy//source/common/protobuf:utility_lib_with_external_headers", + "@envoy//source/exe:platform_header_lib_with_external_headers", + "@envoy//source/exe:platform_impl_lib", + ], +) + +envoy_cc_test( + name = "request_source_plugin_test", + srcs = ["request_source_plugin_test.cc"], + data = [ + "test_data/test-config.yaml", + ], + repository = "@envoy", + deps = [ + "//source/request_source:request_options_list_plugin_impl", + "//test/request_source:stub_plugin_impl", + "//test/test_common:environment_lib", + "@envoy//source/common/config:utility_lib_with_external_headers", + "@envoy//test/mocks/api:api_mocks", + ], +) diff --git a/test/request_source/request_source_plugin_test.cc b/test/request_source/request_source_plugin_test.cc new file mode 100644 index 000000000..04140a92a --- /dev/null +++ b/test/request_source/request_source_plugin_test.cc @@ -0,0 +1,341 @@ +#include "envoy/common/exception.h" + +#include "external/envoy/source/common/config/utility.h" +#include "external/envoy/test/mocks/api/mocks.h" +#include "external/envoy/test/mocks/stats/mocks.h" +#include "external/envoy/test/test_common/file_system_for_test.h" +#include "external/envoy/test/test_common/utility.h" + +#include "request_source/request_options_list_plugin_impl.h" + +#include "test/request_source/stub_plugin_impl.h" +#include "test/test_common/environment.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Nighthawk { + +namespace { +using nighthawk::request_source::FileBasedOptionsListRequestSourceConfig; +using nighthawk::request_source::InLineOptionsListRequestSourceConfig; +using nighthawk::request_source::StubPluginConfig; +using ::testing::NiceMock; +using ::testing::Test; +nighthawk::request_source::FileBasedOptionsListRequestSourceConfig +MakeFileBasedPluginConfigWithTestYaml(absl::string_view request_file) { + nighthawk::request_source::FileBasedOptionsListRequestSourceConfig config; + config.mutable_file_path()->assign(request_file); + config.mutable_max_file_size()->set_value(4000); + return config; +} +nighthawk::request_source::InLineOptionsListRequestSourceConfig +MakeInLinePluginConfig(nighthawk::client::RequestOptionsList options_list, int num_requests) { + nighthawk::request_source::InLineOptionsListRequestSourceConfig config; + *config.mutable_options_list() = std::move(options_list); + config.set_num_requests(num_requests); + return config; +} + +class StubRequestSourcePluginTest : public Test { +public: + StubRequestSourcePluginTest() : api_(Envoy::Api::createApiForTest(stats_store_)) {} + Envoy::Stats::MockIsolatedStatsStore stats_store_; + Envoy::Api::ApiPtr api_; +}; + +class FileBasedRequestSourcePluginTest : public Test { +public: + FileBasedRequestSourcePluginTest() : api_(Envoy::Api::createApiForTest(stats_store_)) {} + Envoy::Stats::MockIsolatedStatsStore stats_store_; + Envoy::Api::ApiPtr api_; +}; + +class InLineRequestSourcePluginTest : public Test { +public: + InLineRequestSourcePluginTest() : api_(Envoy::Api::createApiForTest(stats_store_)) {} + Envoy::Stats::MockIsolatedStatsStore stats_store_; + Envoy::Api::ApiPtr api_; +}; +TEST_F(StubRequestSourcePluginTest, CreateEmptyConfigProtoCreatesCorrectType) { + auto& config_factory = + Envoy::Config::Utility::getAndCheckFactoryByName( + "nighthawk.stub-request-source-plugin"); + const Envoy::ProtobufTypes::MessagePtr empty_config = config_factory.createEmptyConfigProto(); + const nighthawk::request_source::StubPluginConfig expected_config; + EXPECT_EQ(empty_config->DebugString(), expected_config.DebugString()); + EXPECT_TRUE(Envoy::MessageUtil()(*empty_config, expected_config)); +} + +TEST_F(StubRequestSourcePluginTest, FactoryRegistrationUsesCorrectPluginName) { + nighthawk::request_source::StubPluginConfig config; + Envoy::ProtobufWkt::Any config_any; + config_any.PackFrom(config); + auto& config_factory = + Envoy::Config::Utility::getAndCheckFactoryByName( + "nighthawk.stub-request-source-plugin"); + EXPECT_EQ(config_factory.name(), "nighthawk.stub-request-source-plugin"); +} + +TEST_F(StubRequestSourcePluginTest, CreateRequestSourcePluginCreatesCorrectPluginType) { + nighthawk::request_source::StubPluginConfig config; + Envoy::ProtobufWkt::Any config_any; + config_any.PackFrom(config); + auto& config_factory = + Envoy::Config::Utility::getAndCheckFactoryByName( + "nighthawk.stub-request-source-plugin"); + auto header = Envoy::Http::RequestHeaderMapImpl::create(); + RequestSourcePtr plugin = + config_factory.createRequestSourcePlugin(config_any, *api_, std::move(header)); + plugin->initOnThread(); + EXPECT_NE(dynamic_cast(plugin.get()), nullptr); +} +TEST_F(StubRequestSourcePluginTest, CreateRequestSourcePluginCreatesWorkingPlugin) { + nighthawk::request_source::StubPluginConfig config; + double test_value = 2; + config.mutable_test_value()->set_value(test_value); + Envoy::ProtobufWkt::Any config_any; + config_any.PackFrom(config); + auto& config_factory = + Envoy::Config::Utility::getAndCheckFactoryByName( + "nighthawk.stub-request-source-plugin"); + auto template_header = Envoy::Http::RequestHeaderMapImpl::create(); + RequestSourcePtr plugin = + config_factory.createRequestSourcePlugin(config_any, *api_, std::move(template_header)); + plugin->initOnThread(); + Nighthawk::RequestGenerator generator = plugin->get(); + Nighthawk::RequestPtr request = generator(); + Nighthawk::HeaderMapPtr header = request->header(); + ASSERT_EQ(header->get(Envoy::Http::LowerCaseString("test_value")).size(), 1); + EXPECT_EQ(header->get(Envoy::Http::LowerCaseString("test_value"))[0]->value().getStringView(), + absl::string_view(std::to_string(test_value))); +} +TEST_F(FileBasedRequestSourcePluginTest, CreateEmptyConfigProtoCreatesCorrectType) { + auto& config_factory = + Envoy::Config::Utility::getAndCheckFactoryByName( + "nighthawk.file-based-request-source-plugin"); + const Envoy::ProtobufTypes::MessagePtr empty_config = config_factory.createEmptyConfigProto(); + const nighthawk::request_source::FileBasedOptionsListRequestSourceConfig expected_config; + EXPECT_EQ(empty_config->DebugString(), expected_config.DebugString()); + EXPECT_TRUE(Envoy::MessageUtil()(*empty_config, expected_config)); +} + +TEST_F(FileBasedRequestSourcePluginTest, FactoryRegistrationUsesCorrectPluginName) { + nighthawk::request_source::FileBasedOptionsListRequestSourceConfig config; + Envoy::ProtobufWkt::Any config_any; + config_any.PackFrom(config); + auto& config_factory = + Envoy::Config::Utility::getAndCheckFactoryByName( + "nighthawk.file-based-request-source-plugin"); + EXPECT_EQ(config_factory.name(), "nighthawk.file-based-request-source-plugin"); +} + +TEST_F(FileBasedRequestSourcePluginTest, CreateRequestSourcePluginCreatesCorrectPluginType) { + nighthawk::request_source::FileBasedOptionsListRequestSourceConfig config = + MakeFileBasedPluginConfigWithTestYaml( + TestEnvironment::runfilesPath("test/request_source/test_data/test-config.yaml")); + Envoy::ProtobufWkt::Any config_any; + config_any.PackFrom(config); + auto& config_factory = + Envoy::Config::Utility::getAndCheckFactoryByName( + "nighthawk.file-based-request-source-plugin"); + auto header = Envoy::Http::RequestHeaderMapImpl::create(); + RequestSourcePtr plugin = + config_factory.createRequestSourcePlugin(config_any, *api_, std::move(header)); + plugin->initOnThread(); + EXPECT_NE(dynamic_cast(plugin.get()), nullptr); +} + +TEST_F(FileBasedRequestSourcePluginTest, + CreateRequestSourcePluginGetsWorkingRequestGeneratorThatEndsAtNumRequest) { + nighthawk::request_source::FileBasedOptionsListRequestSourceConfig config = + MakeFileBasedPluginConfigWithTestYaml( + TestEnvironment::runfilesPath("test/request_source/test_data/test-config.yaml")); + config.set_num_requests(2); + Envoy::ProtobufWkt::Any config_any; + config_any.PackFrom(config); + auto& config_factory = + Envoy::Config::Utility::getAndCheckFactoryByName( + "nighthawk.file-based-request-source-plugin"); + auto header = Envoy::Http::RequestHeaderMapImpl::create(); + RequestSourcePtr file_based_request_source = + config_factory.createRequestSourcePlugin(config_any, *api_, std::move(header)); + file_based_request_source->initOnThread(); + Nighthawk::RequestGenerator generator = file_based_request_source->get(); + Nighthawk::RequestPtr request1 = generator(); + Nighthawk::RequestPtr request2 = generator(); + Nighthawk::RequestPtr request3 = generator(); + ASSERT_NE(request1, nullptr); + ASSERT_NE(request2, nullptr); + + Nighthawk::HeaderMapPtr header1 = request1->header(); + Nighthawk::HeaderMapPtr header2 = request2->header(); + EXPECT_EQ(header1->getPathValue(), "/a"); + EXPECT_EQ(header2->getPathValue(), "/b"); + EXPECT_EQ(request3, nullptr); +} + +TEST_F(FileBasedRequestSourcePluginTest, CreateRequestSourcePluginWithTooLargeAFileThrowsAnError) { + nighthawk::request_source::FileBasedOptionsListRequestSourceConfig config = + MakeFileBasedPluginConfigWithTestYaml( + TestEnvironment::runfilesPath("test/request_source/test_data/test-config.yaml")); + const uint32_t max_file_size = 10; + config.set_num_requests(2); + config.mutable_max_file_size()->set_value(max_file_size); + Envoy::ProtobufWkt::Any config_any; + config_any.PackFrom(config); + auto& config_factory = + Envoy::Config::Utility::getAndCheckFactoryByName( + "nighthawk.file-based-request-source-plugin"); + auto header = Envoy::Http::RequestHeaderMapImpl::create(); + EXPECT_THROW_WITH_REGEX( + config_factory.createRequestSourcePlugin(config_any, *api_, std::move(header)), + NighthawkException, "file size must be less than max_file_size"); +} + +TEST_F(FileBasedRequestSourcePluginTest, + CreateRequestSourcePluginWithMoreNumRequestsThanInFileGetsRequestGeneratorThatLoops) { + nighthawk::request_source::FileBasedOptionsListRequestSourceConfig config = + MakeFileBasedPluginConfigWithTestYaml( + TestEnvironment::runfilesPath("test/request_source/test_data/test-config.yaml")); + Envoy::ProtobufWkt::Any config_any; + config_any.PackFrom(config); + auto& config_factory = + Envoy::Config::Utility::getAndCheckFactoryByName( + "nighthawk.file-based-request-source-plugin"); + auto header = Envoy::Http::RequestHeaderMapImpl::create(); + RequestSourcePtr file_based_request_source = + config_factory.createRequestSourcePlugin(config_any, *api_, std::move(header)); + file_based_request_source->initOnThread(); + Nighthawk::RequestGenerator generator = file_based_request_source->get(); + Nighthawk::RequestPtr request1 = generator(); + Nighthawk::RequestPtr request2 = generator(); + Nighthawk::RequestPtr request3 = generator(); + ASSERT_NE(request1, nullptr); + ASSERT_NE(request2, nullptr); + ASSERT_NE(request3, nullptr); + + Nighthawk::HeaderMapPtr header1 = request1->header(); + Nighthawk::HeaderMapPtr header2 = request2->header(); + Nighthawk::HeaderMapPtr header3 = request3->header(); + EXPECT_EQ(header1->getPathValue(), "/a"); + EXPECT_EQ(header2->getPathValue(), "/b"); + EXPECT_EQ(header3->getPathValue(), "/a"); +} + +TEST_F(InLineRequestSourcePluginTest, CreateEmptyConfigProtoCreatesCorrectType) { + auto& config_factory = + Envoy::Config::Utility::getAndCheckFactoryByName( + "nighthawk.in-line-options-list-request-source-plugin"); + const Envoy::ProtobufTypes::MessagePtr empty_config = config_factory.createEmptyConfigProto(); + const nighthawk::request_source::InLineOptionsListRequestSourceConfig expected_config; + EXPECT_EQ(empty_config->DebugString(), expected_config.DebugString()); + EXPECT_TRUE(Envoy::MessageUtil()(*empty_config, expected_config)); +} + +TEST_F(InLineRequestSourcePluginTest, FactoryRegistrationUsesCorrectPluginName) { + nighthawk::request_source::InLineOptionsListRequestSourceConfig config; + Envoy::ProtobufWkt::Any config_any; + config_any.PackFrom(config); + auto& config_factory = + Envoy::Config::Utility::getAndCheckFactoryByName( + "nighthawk.in-line-options-list-request-source-plugin"); + EXPECT_EQ(config_factory.name(), "nighthawk.in-line-options-list-request-source-plugin"); +} + +TEST_F(InLineRequestSourcePluginTest, CreateRequestSourcePluginCreatesCorrectPluginType) { + Envoy::MessageUtil util; + nighthawk::client::RequestOptionsList options_list; + util.loadFromFile(/*file to load*/ TestEnvironment::runfilesPath( + "test/request_source/test_data/test-config.yaml"), + /*out parameter*/ options_list, + /*validation visitor*/ Envoy::ProtobufMessage::getStrictValidationVisitor(), + /*Api*/ *api_, + /*use api boosting*/ true); + nighthawk::request_source::InLineOptionsListRequestSourceConfig config = + MakeInLinePluginConfig(options_list, /*num_requests*/ 2); + Envoy::ProtobufWkt::Any config_any; + config_any.PackFrom(config); + auto& config_factory = + Envoy::Config::Utility::getAndCheckFactoryByName( + "nighthawk.in-line-options-list-request-source-plugin"); + auto header = Envoy::Http::RequestHeaderMapImpl::create(); + RequestSourcePtr plugin = + config_factory.createRequestSourcePlugin(config_any, *api_, std::move(header)); + plugin->initOnThread(); + EXPECT_NE(dynamic_cast(plugin.get()), nullptr); +} + +TEST_F(InLineRequestSourcePluginTest, + CreateRequestSourcePluginGetsWorkingRequestGeneratorThatEndsAtNumRequest) { + Envoy::MessageUtil util; + nighthawk::client::RequestOptionsList options_list; + util.loadFromFile(/*file to load*/ TestEnvironment::runfilesPath( + "test/request_source/test_data/test-config.yaml"), + /*out parameter*/ options_list, + /*validation visitor*/ Envoy::ProtobufMessage::getStrictValidationVisitor(), + /*Api*/ *api_, + /*use api boosting*/ true); + nighthawk::request_source::InLineOptionsListRequestSourceConfig config = + MakeInLinePluginConfig(options_list, /*num_requests*/ 2); + Envoy::ProtobufWkt::Any config_any; + config_any.PackFrom(config); + auto& config_factory = + Envoy::Config::Utility::getAndCheckFactoryByName( + "nighthawk.in-line-options-list-request-source-plugin"); + auto header = Envoy::Http::RequestHeaderMapImpl::create(); + RequestSourcePtr plugin = + config_factory.createRequestSourcePlugin(config_any, *api_, std::move(header)); + plugin->initOnThread(); + Nighthawk::RequestGenerator generator = plugin->get(); + Nighthawk::RequestPtr request1 = generator(); + Nighthawk::RequestPtr request2 = generator(); + Nighthawk::RequestPtr request3 = generator(); + ASSERT_NE(request1, nullptr); + ASSERT_NE(request2, nullptr); + + Nighthawk::HeaderMapPtr header1 = request1->header(); + Nighthawk::HeaderMapPtr header2 = request2->header(); + EXPECT_EQ(header1->getPathValue(), "/a"); + EXPECT_EQ(header2->getPathValue(), "/b"); + EXPECT_EQ(request3, nullptr); +} + +TEST_F(InLineRequestSourcePluginTest, + CreateRequestSourcePluginWithMoreNumRequestsThanInListGetsRequestGeneratorThatLoops) { + Envoy::MessageUtil util; + nighthawk::client::RequestOptionsList options_list; + util.loadFromFile(/*file to load*/ TestEnvironment::runfilesPath( + "test/request_source/test_data/test-config.yaml"), + /*out parameter*/ options_list, + /*validation visitor*/ Envoy::ProtobufMessage::getStrictValidationVisitor(), + /*Api*/ *api_, + /*use api boosting*/ true); + nighthawk::request_source::InLineOptionsListRequestSourceConfig config = + MakeInLinePluginConfig(options_list, /*num_requests*/ 4); + Envoy::ProtobufWkt::Any config_any; + config_any.PackFrom(config); + auto& config_factory = + Envoy::Config::Utility::getAndCheckFactoryByName( + "nighthawk.in-line-options-list-request-source-plugin"); + auto header = Envoy::Http::RequestHeaderMapImpl::create(); + RequestSourcePtr plugin = + config_factory.createRequestSourcePlugin(config_any, *api_, std::move(header)); + plugin->initOnThread(); + Nighthawk::RequestGenerator generator = plugin->get(); + Nighthawk::RequestPtr request1 = generator(); + Nighthawk::RequestPtr request2 = generator(); + Nighthawk::RequestPtr request3 = generator(); + ASSERT_NE(request1, nullptr); + ASSERT_NE(request2, nullptr); + ASSERT_NE(request3, nullptr); + + Nighthawk::HeaderMapPtr header1 = request1->header(); + Nighthawk::HeaderMapPtr header2 = request2->header(); + Nighthawk::HeaderMapPtr header3 = request3->header(); + EXPECT_EQ(header1->getPathValue(), "/a"); + EXPECT_EQ(header2->getPathValue(), "/b"); + EXPECT_EQ(header3->getPathValue(), "/a"); +} +} // namespace +} // namespace Nighthawk diff --git a/test/request_source/stub_plugin_impl.cc b/test/request_source/stub_plugin_impl.cc new file mode 100644 index 000000000..7ca882263 --- /dev/null +++ b/test/request_source/stub_plugin_impl.cc @@ -0,0 +1,47 @@ +#include "test/request_source/stub_plugin_impl.h" + +#include "external/envoy/source/common/protobuf/message_validator_impl.h" +#include "external/envoy/source/common/protobuf/utility.h" +#include "external/envoy/source/exe/platform_impl.h" + +#include "api/client/options.pb.h" + +#include "common/request_impl.h" +#include "common/request_source_impl.h" + +namespace Nighthawk { + +std::string StubRequestSourcePluginConfigFactory::name() const { + return "nighthawk.stub-request-source-plugin"; +} + +Envoy::ProtobufTypes::MessagePtr StubRequestSourcePluginConfigFactory::createEmptyConfigProto() { + return std::make_unique(); +} + +RequestSourcePtr StubRequestSourcePluginConfigFactory::createRequestSourcePlugin( + const Envoy::Protobuf::Message& message, Envoy::Api::Api&, Envoy::Http::RequestHeaderMapPtr) { + const auto& any = dynamic_cast(message); + nighthawk::request_source::StubPluginConfig config; + Envoy::MessageUtil::unpackTo(any, config); + return std::make_unique(config); +} + +REGISTER_FACTORY(StubRequestSourcePluginConfigFactory, RequestSourcePluginConfigFactory); + +StubRequestSource::StubRequestSource(const nighthawk::request_source::StubPluginConfig& config) + : test_value_{config.has_test_value() ? config.test_value().value() : 0} {} +RequestGenerator StubRequestSource::get() { + + RequestGenerator request_generator = [this]() { + Envoy::Http::RequestHeaderMapPtr header = Envoy::Http::RequestHeaderMapImpl::create(); + header->setCopy(Envoy::Http::LowerCaseString("test_value"), std::to_string(test_value_)); + auto returned_request_impl = std::make_unique(std::move(header)); + return returned_request_impl; + }; + return request_generator; +} + +void StubRequestSource::initOnThread() {} + +} // namespace Nighthawk \ No newline at end of file diff --git a/test/request_source/stub_plugin_impl.h b/test/request_source/stub_plugin_impl.h new file mode 100644 index 000000000..b45e64c9e --- /dev/null +++ b/test/request_source/stub_plugin_impl.h @@ -0,0 +1,56 @@ +// Test implementations of RequestSourceConfigFactory and RequestSource that perform minimum +// functionality for testing purposes. +#pragma once + +#include "envoy/registry/registry.h" + +#include "nighthawk/request_source/request_source_plugin_config_factory.h" + +#include "api/client/options.pb.h" +#include "api/request_source/request_source_plugin.pb.h" + +#include "common/uri_impl.h" + +namespace Nighthawk { + +// Stub Request Source implementation for comparison. +class StubRequestSource : public RequestSource { +public: + StubRequestSource(const nighthawk::request_source::StubPluginConfig& config); + // The generator function will return a header whose only value is the test_value taken from the + // config. The function is threadsafe. + RequestGenerator get() override; + + // default implementation + void initOnThread() override; + +private: + const double test_value_; +}; + +// Factory that creates a StubRequestSource from a StubRequestSourcePluginConfig proto. +// Registered as an Envoy plugin. +// Stub implementation of RequestSourceConfigFactory which produces a RequestSource. +// RequestSources are used to get RequestGenerators which generate requests for the benchmark +// client. All plugins configuration are specified in the request_source_plugin.proto This class is +// thread-safe, but it doesn't do anything. Usage: assume you are passed an appropriate Any type +// object called config, an Api object called api, and a default header called header. auto& +// config_factory = +// Envoy::Config::Utility::getAndCheckFactoryByName( +// "nighthawk.stub-request-source-plugin"); +// RequestSourcePtr plugin = +// config_factory.createRequestSourcePlugin(config, std::move(api), std::move(header)); + +class StubRequestSourcePluginConfigFactory : public virtual RequestSourcePluginConfigFactory { +public: + std::string name() const override; + Envoy::ProtobufTypes::MessagePtr createEmptyConfigProto() override; + // This implementation is thread safe, but the RequestSource it generates doesn't do much. + RequestSourcePtr createRequestSourcePlugin(const Envoy::Protobuf::Message& message, + Envoy::Api::Api& api, + Envoy::Http::RequestHeaderMapPtr header) override; +}; + +// This factory will be activated through RequestSourceFactory in factories.h +DECLARE_FACTORY(StubRequestSourcePluginConfigFactory); +} // namespace Nighthawk \ No newline at end of file diff --git a/test/request_source/test_data/test-config.yaml b/test/request_source/test_data/test-config.yaml new file mode 100644 index 000000000..b78bcba64 --- /dev/null +++ b/test/request_source/test_data/test-config.yaml @@ -0,0 +1,11 @@ +options: + - request_method: 1 + request_body_size: 10 + request_headers: + - { header: { key: ":path", value: "/a" } } + - { header: { key: "x-nighthawk-test-server-config", value: "{response_body_size:13}" } } + - request_method: 1 + request_body_size: 10 + request_headers: + - { header: { key: ":path", value: "/b" } } + - { header: { key: "x-nighthawk-test-server-config", value: "{response_body_size:17}" } } \ No newline at end of file diff --git a/test/request_stream_grpc_client_test.cc b/test/request_stream_grpc_client_test.cc index 91661ea52..b78b32f32 100644 --- a/test/request_stream_grpc_client_test.cc +++ b/test/request_stream_grpc_client_test.cc @@ -1,5 +1,10 @@ +#include "envoy/api/v2/core/base.pb.h" +#include "envoy/config/core/v3/base.pb.h" + #include "external/envoy/test/test_common/utility.h" +#include "api/request_source/service.pb.h" + #include "common/request_impl.h" #include "common/request_stream_grpc_client_impl.h" @@ -8,6 +13,9 @@ using namespace testing; namespace Nighthawk { +namespace { + +using ::nighthawk::request_source::RequestSpecifier; // The grpc client itself is tested via the python based integration tests. // It is convenient to test message translation here. @@ -18,7 +26,9 @@ class ProtoRequestHelperTest : public Test { // We test for equality. If we observe mismatch, we use EXPECT_EQ which is guaranteed // to fail -- but will provide much more helpful output. if (!Envoy::TestUtility::headerMapEqualIgnoreOrder(expected_header_, *request->header())) { - EXPECT_EQ(expected_header_, *request->header()); + EXPECT_EQ(expected_header_, *request->header()) << "expected headers:\n" + << expected_header_ << "\nactual headers:\n" + << *request->header() << "\n"; }; } @@ -42,14 +52,34 @@ TEST_F(ProtoRequestHelperTest, ExplicitFields) { translateExpectingEqual(); } -// Test the generic header api we offer in the proto api. -TEST_F(ProtoRequestHelperTest, GenericHeaderFields) { - auto* request_specifier = response_.mutable_request_specifier(); - auto* headers = request_specifier->mutable_headers(); - auto* header_1 = headers->add_headers(); +// Test the generic header api we offer in the proto api using Envoy API v2 +// primitives. +TEST_F(ProtoRequestHelperTest, GenericHeaderFieldsUsingDeprecatedEnvoyV2Api) { + RequestSpecifier* request_specifier = response_.mutable_request_specifier(); + envoy::api::v2::core::HeaderMap* headers = request_specifier->mutable_headers(); + envoy::api::v2::core::HeaderValue* header_1 = headers->add_headers(); + header_1->set_key("header1"); + header_1->set_value("value1"); + envoy::api::v2::core::HeaderValue* header_2 = headers->add_headers(); + header_2->set_key("header2"); + header_2->set_value("value2"); + // We re-add the same header, but do not expect that to show up in the translation because we + // always replace. + headers->add_headers()->MergeFrom(*header_2); + expected_header_ = + Envoy::Http::TestRequestHeaderMapImpl{{"header1", "value1"}, {"header2", "value2"}}; + translateExpectingEqual(); +} + +// Test the generic header api we offer in the proto api using Envoy API v3 +// primitives. +TEST_F(ProtoRequestHelperTest, GenericHeaderFieldsUsingEnvoyV3Api) { + RequestSpecifier* request_specifier = response_.mutable_request_specifier(); + envoy::config::core::v3::HeaderMap* headers = request_specifier->mutable_v3_headers(); + envoy::config::core::v3::HeaderValue* header_1 = headers->add_headers(); header_1->set_key("header1"); header_1->set_value("value1"); - auto* header_2 = headers->add_headers(); + envoy::config::core::v3::HeaderValue* header_2 = headers->add_headers(); header_2->set_key("header2"); header_2->set_value("value2"); // We re-add the same header, but do not expect that to show up in the translation because we @@ -74,4 +104,5 @@ TEST_F(ProtoRequestHelperTest, AmbiguousHost) { translateExpectingEqual(); } +} // namespace } // namespace Nighthawk diff --git a/test/sequencer_test.cc b/test/sequencer_test.cc index 82ad3f69e..b87f1730f 100644 --- a/test/sequencer_test.cc +++ b/test/sequencer_test.cc @@ -34,7 +34,7 @@ class FakeSequencerTarget { class MockSequencerTarget : public FakeSequencerTarget { public: - MOCK_METHOD1(callback, bool(OperationCallback)); + MOCK_METHOD(bool, callback, (OperationCallback)); }; class SequencerTestBase : public testing::Test { @@ -201,7 +201,7 @@ TEST_F(SequencerTestWithTimerEmulation, RateLimiterSaturatedTargetInteraction) { EXPECT_CALL(*target(), callback(_)).Times(2).WillOnce(Return(true)).WillOnce(Return(false)); // The sequencer should call RateLimiter::releaseOne() when the target returns false. - EXPECT_CALL(rate_limiter_unsafe_ref_, releaseOne()).Times(1); + EXPECT_CALL(rate_limiter_unsafe_ref_, releaseOne()); expectDispatcherRun(); EXPECT_CALL(platform_util_, sleep(_)).Times(AtLeast(1)); diff --git a/test/server/BUILD b/test/server/BUILD index 68d5bab08..20b13e589 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -65,3 +65,16 @@ envoy_cc_test( "@envoy//test/test_common:simulated_time_system_lib", ], ) + +envoy_cc_test( + name = "configuration_test", + srcs = ["configuration_test.cc"], + repository = "@envoy", + deps = [ + "//api/server:response_options_proto_cc_proto", + "//source/server:configuration_lib", + "@envoy//test/test_common:utility_lib", + "@envoy_api//envoy/api/v2/core:pkg_cc_proto", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) diff --git a/test/server/configuration_test.cc b/test/server/configuration_test.cc new file mode 100644 index 000000000..f0c1d9458 --- /dev/null +++ b/test/server/configuration_test.cc @@ -0,0 +1,235 @@ +#include "envoy/api/v2/core/base.pb.h" +#include "envoy/config/core/v3/base.pb.h" + +#include "external/envoy/test/test_common/utility.h" + +#include "api/server/response_options.pb.validate.h" + +#include "server/configuration.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Nighthawk { +namespace Server { +namespace Configuration { +namespace { + +using ::Envoy::Http::LowerCaseString; +using ::Envoy::Http::TestResponseHeaderMapImpl; + +TEST(UpgradeDeprecatedEnvoyV2HeaderValueOptionToV3Test, UpgradesEmptyHeaderValue) { + envoy::api::v2::core::HeaderValueOption v2_header_value_option; + envoy::config::core::v3::HeaderValueOption v3_header_value_option = + upgradeDeprecatedEnvoyV2HeaderValueOptionToV3(v2_header_value_option); + + EXPECT_FALSE(v3_header_value_option.has_append()); + EXPECT_FALSE(v3_header_value_option.has_header()); +} + +TEST(UpgradeDeprecatedEnvoyV2HeaderValueOptionToV3Test, UpgradesHeaderValueWithHeaderAndAppendSet) { + envoy::api::v2::core::HeaderValueOption v2_header_value_option; + v2_header_value_option.mutable_append()->set_value(true); + v2_header_value_option.mutable_header()->set_key("key"); + v2_header_value_option.mutable_header()->set_value("value"); + + envoy::config::core::v3::HeaderValueOption v3_header_value_option = + upgradeDeprecatedEnvoyV2HeaderValueOptionToV3(v2_header_value_option); + + EXPECT_TRUE(v3_header_value_option.append().value()); + EXPECT_EQ(v3_header_value_option.header().key(), "key"); + EXPECT_EQ(v3_header_value_option.header().value(), "value"); +} + +TEST(UpgradeDeprecatedEnvoyV2HeaderValueOptionToV3Test, UpgradesHeaderValueWithHeaderOnly) { + envoy::api::v2::core::HeaderValueOption v2_header_value_option; + v2_header_value_option.mutable_header()->set_key("key"); + v2_header_value_option.mutable_header()->set_value("value"); + + envoy::config::core::v3::HeaderValueOption v3_header_value_option = + upgradeDeprecatedEnvoyV2HeaderValueOptionToV3(v2_header_value_option); + + EXPECT_FALSE(v3_header_value_option.has_append()); + EXPECT_EQ(v3_header_value_option.header().key(), "key"); + EXPECT_EQ(v3_header_value_option.header().value(), "value"); +} + +TEST(UpgradeDeprecatedEnvoyV2HeaderValueOptionToV3Test, UpgradesHeaderValueWithAppendOnly) { + envoy::api::v2::core::HeaderValueOption v2_header_value_option; + v2_header_value_option.mutable_append()->set_value(true); + + envoy::config::core::v3::HeaderValueOption v3_header_value_option = + upgradeDeprecatedEnvoyV2HeaderValueOptionToV3(v2_header_value_option); + + EXPECT_TRUE(v3_header_value_option.append().value()); + EXPECT_FALSE(v3_header_value_option.has_header()); +} + +// Specifies the Envoy API version to use in the test configuration. +enum EnvoyApiVersion { + EnvoyApiV2, + EnvoyApiV3, +}; + +// Specifies if headers with duplicate key should be appended or replace the +// previous header. +enum HeaderAddMode { + ReplaceOnDuplicateKey, + AppendOnDuplicateKey, +}; + +// Creates a test configuration with three headers, two of which have the same +// key. The following headers are added: +// +// key1: header1_value +// key2: header2_value +// key1: header3_value +// +// @param api_version determines the version of the Envoy API used in the +// created configuration. +// @param add_mode specifies how the header with the duplicate key is added. +// @return a configuration for the test. +nighthawk::server::ResponseOptions createTestConfiguration(EnvoyApiVersion api_version, + HeaderAddMode add_mode) { + nighthawk::server::ResponseOptions configuration; + + if (api_version == EnvoyApiV2) { + envoy::api::v2::core::HeaderValueOption* header1 = configuration.add_response_headers(); + header1->mutable_header()->set_key("key1"); + header1->mutable_header()->set_value("header1_value"); + + envoy::api::v2::core::HeaderValueOption* header2 = configuration.add_response_headers(); + header2->mutable_header()->set_key("key2"); + header2->mutable_header()->set_value("header2_value"); + + envoy::api::v2::core::HeaderValueOption* header3 = configuration.add_response_headers(); + header3->mutable_header()->set_key("key1"); + header3->mutable_header()->set_value("header3_value"); + if (add_mode == AppendOnDuplicateKey) { + header3->mutable_append()->set_value("true"); + } + } else if (api_version == EnvoyApiV3) { + envoy::config::core::v3::HeaderValueOption* header1 = configuration.add_v3_response_headers(); + header1->mutable_header()->set_key("key1"); + header1->mutable_header()->set_value("header1_value"); + + envoy::config::core::v3::HeaderValueOption* header2 = configuration.add_v3_response_headers(); + header2->mutable_header()->set_key("key2"); + header2->mutable_header()->set_value("header2_value"); + + envoy::config::core::v3::HeaderValueOption* header3 = configuration.add_v3_response_headers(); + header3->mutable_header()->set_key("key1"); + header3->mutable_header()->set_value("header3_value"); + if (add_mode == AppendOnDuplicateKey) { + header3->mutable_append()->set_value("true"); + } + } + return configuration; +} + +// Creates the expected header map for the specified add mode. +// +// @param add_mode specifies how the header with the duplicate key is added. +// @return a header map populated with the expected headers. +TestResponseHeaderMapImpl createExpectedHeaderMap(HeaderAddMode add_mode) { + TestResponseHeaderMapImpl expected_header_map; + if (add_mode == ReplaceOnDuplicateKey) { + expected_header_map.addCopy(LowerCaseString("key2"), "header2_value"); + expected_header_map.addCopy(LowerCaseString("key1"), "header3_value"); + } else if (add_mode == AppendOnDuplicateKey) { + expected_header_map.addCopy(LowerCaseString("key1"), "header1_value"); + expected_header_map.addCopy(LowerCaseString("key2"), "header2_value"); + expected_header_map.addCopy(LowerCaseString("key1"), "header3_value"); + } + return expected_header_map; +} + +TEST(ApplyConfigToResponseHeaders, ReplacesHeadersFromEnvoyApiV2Config) { + HeaderAddMode add_mode = ReplaceOnDuplicateKey; + nighthawk::server::ResponseOptions configuration = createTestConfiguration(EnvoyApiV2, add_mode); + + TestResponseHeaderMapImpl header_map; + applyConfigToResponseHeaders(header_map, configuration); + TestResponseHeaderMapImpl expected_header_map = createExpectedHeaderMap(add_mode); + + EXPECT_EQ(header_map, expected_header_map) << "got header_map:\n" + << header_map << "\nexpected_header_map:\n" + << expected_header_map; +} + +TEST(ApplyConfigToResponseHeaders, AppendsHeadersFromEnvoyApiV2Config) { + HeaderAddMode add_mode = AppendOnDuplicateKey; + nighthawk::server::ResponseOptions configuration = createTestConfiguration(EnvoyApiV2, add_mode); + + TestResponseHeaderMapImpl header_map; + applyConfigToResponseHeaders(header_map, configuration); + TestResponseHeaderMapImpl expected_header_map = createExpectedHeaderMap(add_mode); + + EXPECT_EQ(header_map, expected_header_map) << "got header_map:\n" + << header_map << "\nexpected_header_map:\n" + << expected_header_map; +} + +TEST(ApplyConfigToResponseHeaders, ReplacesHeadersFromEnvoyApiV3Config) { + HeaderAddMode add_mode = ReplaceOnDuplicateKey; + nighthawk::server::ResponseOptions configuration = createTestConfiguration(EnvoyApiV3, add_mode); + + TestResponseHeaderMapImpl header_map; + applyConfigToResponseHeaders(header_map, configuration); + TestResponseHeaderMapImpl expected_header_map = createExpectedHeaderMap(add_mode); + + EXPECT_EQ(header_map, expected_header_map) << "got header_map:\n" + << header_map << "\nexpected_header_map:\n" + << expected_header_map; +} + +TEST(ApplyConfigToResponseHeaders, AppendsHeadersFromEnvoyApiV3Config) { + HeaderAddMode add_mode = AppendOnDuplicateKey; + nighthawk::server::ResponseOptions configuration = createTestConfiguration(EnvoyApiV3, add_mode); + + TestResponseHeaderMapImpl header_map; + applyConfigToResponseHeaders(header_map, configuration); + TestResponseHeaderMapImpl expected_header_map = createExpectedHeaderMap(add_mode); + + EXPECT_EQ(header_map, expected_header_map) << "got header_map:\n" + << header_map << "\nexpected_header_map:\n" + << expected_header_map; +} + +TEST(ApplyConfigToResponseHeaders, ThrowsOnInvalidConfiguration) { + nighthawk::server::ResponseOptions configuration; + configuration.add_response_headers(); + configuration.add_v3_response_headers(); + + TestResponseHeaderMapImpl header_map; + EXPECT_THROW(applyConfigToResponseHeaders(header_map, configuration), Envoy::EnvoyException); +} + +TEST(ValidateResponseOptions, DoesNotThrowOnEmptyConfiguration) { + nighthawk::server::ResponseOptions configuration; + EXPECT_NO_THROW(validateResponseOptions(configuration)); +} + +TEST(ValidateResponseOptions, DoesNotThrowWhenOnlyEnvoyApiV2ResponseHeadersAreSet) { + nighthawk::server::ResponseOptions configuration; + configuration.add_response_headers(); + EXPECT_NO_THROW(validateResponseOptions(configuration)); +} + +TEST(ValidateResponseOptions, DoesNotThrowWhenOnlyEnvoyApiV3ResponseHeadersAreSet) { + nighthawk::server::ResponseOptions configuration; + configuration.add_v3_response_headers(); + EXPECT_NO_THROW(validateResponseOptions(configuration)); +} + +TEST(ValidateResponseOptions, ThrowsWhenBothEnvoyApiV2AndV3ResponseHeadersAreSet) { + nighthawk::server::ResponseOptions configuration; + configuration.add_response_headers(); + configuration.add_v3_response_headers(); + EXPECT_THROW(validateResponseOptions(configuration), Envoy::EnvoyException); +} + +} // namespace +} // namespace Configuration +} // namespace Server +} // namespace Nighthawk diff --git a/test/server/http_dynamic_delay_filter_integration_test.cc b/test/server/http_dynamic_delay_filter_integration_test.cc index 3840a11ec..b69049bee 100644 --- a/test/server/http_dynamic_delay_filter_integration_test.cc +++ b/test/server/http_dynamic_delay_filter_integration_test.cc @@ -10,6 +10,9 @@ #include "gtest/gtest.h" namespace Nighthawk { +namespace { + +using ::testing::HasSubstr; const Envoy::Http::LowerCaseString kDelayHeaderString("x-envoy-fault-delay-request"); @@ -38,6 +41,22 @@ class HttpDynamicDelayIntegrationTest INSTANTIATE_TEST_SUITE_P(IpVersions, HttpDynamicDelayIntegrationTest, testing::ValuesIn(Envoy::TestEnvironment::getIpVersionsForTest())); +TEST_P(HttpDynamicDelayIntegrationTest, + DiesWhenBothEnvoyApiV2AndV3ResponseHeadersAreSetInConfiguration) { + const std::string invalid_configuration = R"EOF( + name: dynamic-delay + typed_config: + "@type": type.googleapis.com/nighthawk.server.ResponseOptions + response_headers: + - { header: { key: "key1", value: "value1"} } + v3_response_headers: + - { header: { key: "key1", value: "value1"} } + )EOF"; + + ASSERT_DEATH(initializeFilterConfiguration(invalid_configuration), + HasSubstr("cannot specify both response_headers and v3_response_headers")); +} + // Verify expectations with an empty dynamic-delay configuration. TEST_P(HttpDynamicDelayIntegrationTest, NoStaticConfiguration) { initializeFilterConfiguration(R"( @@ -48,19 +67,21 @@ name: dynamic-delay // Don't send any config request header ... getResponse(ResponseOrigin::UPSTREAM); // ... we shouldn't observe any delay being requested via the upstream request headers. - EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString), nullptr); + EXPECT_TRUE(upstream_request_->headers().get(kDelayHeaderString).empty()); // Send a config request header with an empty / default configuration .... setRequestLevelConfiguration("{}"); getResponse(ResponseOrigin::UPSTREAM); // ... we shouldn't observe any delay being requested via the upstream request headers. - EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString), nullptr); + EXPECT_TRUE(upstream_request_->headers().get(kDelayHeaderString).empty()); // Send a config request header requesting a 1.6s delay... setRequestLevelConfiguration("{static_delay: \"1.6s\"}"); getResponse(ResponseOrigin::UPSTREAM); // ...we should observe a delay of 1.6s in the upstream request. - EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString)->value().getStringView(), "1600"); + ASSERT_EQ(upstream_request_->headers().get(kDelayHeaderString).size(), 1); + EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString)[0]->value().getStringView(), + "1600"); } // Verify expectations with static/file-based static_delay configuration. @@ -75,13 +96,17 @@ name: dynamic-delay // Without any request-level configuration, we expect the statically configured static delay to // apply. getResponse(ResponseOrigin::UPSTREAM); - EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString)->value().getStringView(), "1330"); + ASSERT_EQ(upstream_request_->headers().get(kDelayHeaderString).size(), 1); + EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString)[0]->value().getStringView(), + "1330"); // With an empty request-level configuration, we expect the statically configured static delay to // apply. setRequestLevelConfiguration("{}"); getResponse(ResponseOrigin::UPSTREAM); - EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString)->value().getStringView(), "1330"); + ASSERT_EQ(upstream_request_->headers().get(kDelayHeaderString).size(), 1); + EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString)[0]->value().getStringView(), + "1330"); // Overriding the statically configured static delay via request-level configuration should be // reflected in the output. @@ -92,7 +117,7 @@ name: dynamic-delay // However, the seconds part is set to '0', which equates to the default of the underlying int // type, and the fact that we are using proto3, which doesn't merge default values. // Hence the following expectation will fail, as it yields 1200 instead of the expected 200. - // EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString)->value().getStringView(), + // EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString)[0]->value().getStringView(), // "200"); // Overriding the statically configured static delay via request-level configuration should be @@ -100,7 +125,9 @@ name: dynamic-delay setRequestLevelConfiguration("{static_delay: \"2.2s\"}"); getResponse(ResponseOrigin::UPSTREAM); // 2.2 seconds -> 2200 ms. - EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString)->value().getStringView(), "2200"); + ASSERT_EQ(upstream_request_->headers().get(kDelayHeaderString).size(), 1); + EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString)[0]->value().getStringView(), + "2200"); } // Verify expectations with static/file-based concurrency_based_linear_delay configuration. @@ -116,7 +143,8 @@ name: dynamic-delay getResponse(ResponseOrigin::UPSTREAM); // Based on the algorithm of concurrency_based_linear_delay, for the first request we expect to // observe the configured minimal_delay + concurrency_delay_factor = 0.06s -> 60ms. - EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString)->value().getStringView(), "60"); + ASSERT_EQ(upstream_request_->headers().get(kDelayHeaderString).size(), 1); + EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString)[0]->value().getStringView(), "60"); } class ComputeTest : public testing::Test { @@ -148,4 +176,5 @@ TEST_F(ComputeTest, ComputeConcurrencyBasedLinearDelayMs) { EXPECT_EQ(compute(4, 1, 500000, 1, 500000), 5003); } +} // namespace } // namespace Nighthawk diff --git a/test/server/http_filter_base_test.cc b/test/server/http_filter_base_test.cc index 212bd59b5..adf90896f 100644 --- a/test/server/http_filter_base_test.cc +++ b/test/server/http_filter_base_test.cc @@ -90,5 +90,25 @@ TEST_P(HttpFilterBaseIntegrationTest, EmptyRequestLevelConfigurationShouldFail) EXPECT_THAT(response->body(), HasSubstr(kBadConfigErrorSentinel)); } +TEST_P(HttpFilterBaseIntegrationTest, MultipleValidConfigurationHeadersFails) { + // Make sure we fail when two valid configuration headers are send. + setRequestLevelConfiguration("{}"); + appendRequestLevelConfiguration("{}"); + Envoy::IntegrationStreamDecoderPtr response = getResponse(ResponseOrigin::EXTENSION); + ASSERT_TRUE(response->complete()); + EXPECT_THAT(response->body(), + HasSubstr("Received multiple configuration headers in the request")); +} + +TEST_P(HttpFilterBaseIntegrationTest, SingleValidPlusEmptyConfigurationHeadersFails) { + // Make sure we fail when both a valid configuration plus an empty configuration header is send. + setRequestLevelConfiguration("{}"); + appendRequestLevelConfiguration(""); + Envoy::IntegrationStreamDecoderPtr response = getResponse(ResponseOrigin::EXTENSION); + ASSERT_TRUE(response->complete()); + EXPECT_THAT(response->body(), + HasSubstr("Received multiple configuration headers in the request")); +} + } // namespace } // namespace Nighthawk \ No newline at end of file diff --git a/test/server/http_filter_integration_test_base.cc b/test/server/http_filter_integration_test_base.cc index d204133ad..009ae2928 100644 --- a/test/server/http_filter_integration_test_base.cc +++ b/test/server/http_filter_integration_test_base.cc @@ -21,6 +21,12 @@ void HttpFilterIntegrationTestBase::setRequestLevelConfiguration( setRequestHeader(Server::TestServer::HeaderNames::get().TestServerConfig, request_level_config); } +void HttpFilterIntegrationTestBase::appendRequestLevelConfiguration( + absl::string_view request_level_config) { + appendRequestHeader(Server::TestServer::HeaderNames::get().TestServerConfig, + request_level_config); +} + void HttpFilterIntegrationTestBase::switchToPostWithEntityBody() { setRequestHeader(Envoy::Http::Headers::get().Method, Envoy::Http::Headers::get().MethodValues.Post); @@ -31,6 +37,11 @@ void HttpFilterIntegrationTestBase::setRequestHeader( request_headers_.setCopy(header_name, header_value); } +void HttpFilterIntegrationTestBase::appendRequestHeader( + const Envoy::Http::LowerCaseString& header_name, absl::string_view header_value) { + request_headers_.addCopy(header_name, header_value); +} + Envoy::IntegrationStreamDecoderPtr HttpFilterIntegrationTestBase::getResponse(ResponseOrigin expected_origin) { cleanupUpstreamAndDownstream(); diff --git a/test/server/http_filter_integration_test_base.h b/test/server/http_filter_integration_test_base.h index 8027753ec..53b5cc79e 100644 --- a/test/server/http_filter_integration_test_base.h +++ b/test/server/http_filter_integration_test_base.h @@ -52,6 +52,16 @@ class HttpFilterIntegrationTestBase : public Envoy::HttpIntegrationTest { */ void setRequestLevelConfiguration(absl::string_view request_level_config); + /** + * Make getResponse add request-level configuration. Test server extensions read that + * configuration and merge it with their static configuration to determine a final effective + * configuration. See TestServerConfig in well_known_headers.h for the up to date header name. + * + * @param request_level_config Configuration to be delivered by request-header in future calls to + * getResponse(). For example: "{response_body_size:1024}". + */ + void appendRequestLevelConfiguration(absl::string_view request_level_config); + /** * Switch getResponse() to use the POST request method with an entity body. * Doing so will make tests hit a different code paths in extensions. @@ -67,6 +77,15 @@ class HttpFilterIntegrationTestBase : public Envoy::HttpIntegrationTest { void setRequestHeader(const Envoy::Http::LowerCaseString& header_name, absl::string_view header_value); + /** + * Appends a request header value. + * + * @param header_name Name of the request header to set. + * @param header_value Value to set for the request header. + */ + void appendRequestHeader(const Envoy::Http::LowerCaseString& header_name, + absl::string_view header_value); + /** * Fetch a response, according to the options specified by the class methods. By default, * simulates a GET request with minimal headers. diff --git a/test/server/http_test_server_filter_integration_test.cc b/test/server/http_test_server_filter_integration_test.cc index 702b77c3c..3e246e64a 100644 --- a/test/server/http_test_server_filter_integration_test.cc +++ b/test/server/http_test_server_filter_integration_test.cc @@ -1,100 +1,47 @@ -#include "envoy/upstream/cluster_manager.h" -#include "envoy/upstream/upstream.h" - -#include "external/envoy/test/common/upstream/utility.h" -#include "external/envoy/test/integration/http_integration.h" - #include "api/server/response_options.pb.h" #include "api/server/response_options.pb.validate.h" #include "server/configuration.h" #include "server/http_test_server_filter.h" -#include "server/well_known_headers.h" + +#include "test/server/http_filter_integration_test_base.h" #include "gtest/gtest.h" namespace Nighthawk { +namespace { using namespace testing; -constexpr absl::string_view kBadJson = "bad_json"; -class HttpTestServerIntegrationTestBase : public Envoy::HttpIntegrationTest, - public TestWithParam { -public: - HttpTestServerIntegrationTestBase() - : HttpIntegrationTest(Envoy::Http::CodecClient::Type::HTTP1, GetParam(), realTime()) {} - - // TODO(oschaaf): Modify Envoy's Envoy::IntegrationUtil::makeSingleRequest() to allow for a way to - // manipulate the request headers before they get send. Then we can eliminate these copies. - Envoy::BufferingStreamDecoderPtr makeSingleRequest( - uint32_t port, absl::string_view method, absl::string_view url, absl::string_view body, - Envoy::Http::CodecClient::Type type, Envoy::Network::Address::IpVersion ip_version, - absl::string_view host, absl::string_view content_type, - const std::function& request_header_delegate) { - auto addr = Envoy::Network::Utility::resolveUrl(fmt::format( - "tcp://{}:{}", Envoy::Network::Test::getLoopbackAddressUrlString(ip_version), port)); - return makeSingleRequest(addr, method, url, body, type, host, content_type, - request_header_delegate); - } +using ::testing::HasSubstr; - Envoy::BufferingStreamDecoderPtr makeSingleRequest( - const Envoy::Network::Address::InstanceConstSharedPtr& addr, absl::string_view method, - absl::string_view url, absl::string_view body, Envoy::Http::CodecClient::Type type, - absl::string_view host, absl::string_view content_type, - const std::function& request_header_delegate) { - Envoy::Api::ApiPtr api = Envoy::Api::createApiForTest(); - Envoy::Event::DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); - std::shared_ptr cluster{ - new NiceMock()}; - Envoy::Upstream::HostDescriptionConstSharedPtr host_description{ - Envoy::Upstream::makeTestHostDescription(cluster, "tcp://127.0.0.1:80")}; - Envoy::Http::CodecClientProd client( - type, - dispatcher->createClientConnection(addr, Envoy::Network::Address::InstanceConstSharedPtr(), - Envoy::Network::Test::createRawBufferSocket(), nullptr), - host_description, *dispatcher); - Envoy::BufferingStreamDecoderPtr response( - new Envoy::BufferingStreamDecoder([&client, &dispatcher]() -> void { - client.close(); - dispatcher->exit(); - })); - Envoy::Http::RequestEncoder& encoder = client.newStream(*response); - encoder.getStream().addCallbacks(*response); - - auto headers = Envoy::Http::RequestHeaderMapImpl::create(); - headers->setMethod(method); - headers->setPath(url); - headers->setHost(host); - headers->setScheme(Envoy::Http::Headers::get().SchemeValues.Http); - if (!content_type.empty()) { - headers->setContentType(content_type); - } - request_header_delegate(*headers); - encoder.encodeHeaders(*headers, body.empty()); - if (!body.empty()) { - Envoy::Buffer::OwnedImpl body_buffer(body); - encoder.encodeData(body_buffer, true); - } +constexpr absl::string_view kDefaultProto = R"EOF( +name: test-server +typed_config: + "@type": type.googleapis.com/nighthawk.server.ResponseOptions + response_body_size: 10 + response_headers: + - { header: { key: "x-supplied-by", value: "nighthawk-test-server"} } +)EOF"; - dispatcher->run(Envoy::Event::Dispatcher::RunType::Block); - return response; - } +constexpr absl::string_view kNoConfigProto = R"EOF( +name: test-server +)EOF"; + +class HttpTestServerIntegrationTest : public HttpFilterIntegrationTestBase, + public TestWithParam { +public: + HttpTestServerIntegrationTest() : HttpFilterIntegrationTestBase(GetParam()) {} void testWithResponseSize(int response_body_size, bool expect_header = true) { - Envoy::BufferingStreamDecoderPtr response = makeSingleRequest( - lookupPort("http"), "GET", "/", "", downstream_protocol_, version_, "foo.com", "", - [response_body_size](Envoy::Http::RequestHeaderMapImpl& request_headers) { - const std::string header_config = - fmt::format("{{response_body_size:{}}}", response_body_size); - request_headers.addCopy( - Nighthawk::Server::TestServer::HeaderNames::get().TestServerConfig, header_config); - }); + setRequestLevelConfiguration(fmt::format("{{response_body_size:{}}}", response_body_size)); + Envoy::IntegrationStreamDecoderPtr response = getResponse(ResponseOrigin::EXTENSION); ASSERT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().Status()->value().getStringView()); if (expect_header) { auto inserted_header = response->headers().get(Envoy::Http::LowerCaseString("x-supplied-by")); - ASSERT_NE(nullptr, inserted_header); - EXPECT_EQ("nighthawk-test-server", inserted_header->value().getStringView()); + ASSERT_EQ(1, inserted_header.size()); + EXPECT_EQ("nighthawk-test-server", inserted_header[0]->value().getStringView()); } if (response_body_size == 0) { EXPECT_EQ(nullptr, response->headers().ContentType()); @@ -105,55 +52,26 @@ class HttpTestServerIntegrationTestBase : public Envoy::HttpIntegrationTest, } void testBadResponseSize(int response_body_size) { - Envoy::BufferingStreamDecoderPtr response = makeSingleRequest( - lookupPort("http"), "GET", "/", "", downstream_protocol_, version_, "foo.com", "", - [response_body_size](Envoy::Http::RequestHeaderMapImpl& request_headers) { - const std::string header_config = - fmt::format("{{response_body_size:{}}}", response_body_size); - request_headers.addCopy( - Nighthawk::Server::TestServer::HeaderNames::get().TestServerConfig, header_config); - }); + setRequestLevelConfiguration(fmt::format("{{response_body_size:{}}}", response_body_size)); + Envoy::IntegrationStreamDecoderPtr response = getResponse(ResponseOrigin::EXTENSION); ASSERT_TRUE(response->complete()); EXPECT_EQ("500", response->headers().Status()->value().getStringView()); } }; -class HttpTestServerIntegrationTest : public HttpTestServerIntegrationTestBase { -public: - void SetUp() override { initialize(); } - - void initialize() override { - config_helper_.addFilter(R"EOF( -name: test-server -typed_config: - "@type": type.googleapis.com/nighthawk.server.ResponseOptions - response_body_size: 10 - response_headers: - - { header: { key: "x-supplied-by", value: "nighthawk-test-server"} } -)EOF"); - HttpTestServerIntegrationTestBase::initialize(); - } - - void TearDown() override { - cleanupUpstreamAndDownstream(); - test_server_.reset(); - fake_upstreams_.clear(); - } -}; - INSTANTIATE_TEST_SUITE_P(IpVersions, HttpTestServerIntegrationTest, ValuesIn(Envoy::TestEnvironment::getIpVersionsForTest())); TEST_P(HttpTestServerIntegrationTest, TestNoHeaderConfig) { - Envoy::BufferingStreamDecoderPtr response = - makeSingleRequest(lookupPort("http"), "GET", "/", "", downstream_protocol_, version_, - "foo.com", "", [](Envoy::Http::RequestHeaderMapImpl&) {}); + initializeFilterConfiguration(kDefaultProto); + Envoy::IntegrationStreamDecoderPtr response = getResponse(ResponseOrigin::EXTENSION); ASSERT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ(std::string(10, 'a'), response->body()); } TEST_P(HttpTestServerIntegrationTest, TestBasics) { + initializeFilterConfiguration(kDefaultProto); testWithResponseSize(1); testWithResponseSize(10); testWithResponseSize(100); @@ -161,49 +79,100 @@ TEST_P(HttpTestServerIntegrationTest, TestBasics) { testWithResponseSize(10000); } -TEST_P(HttpTestServerIntegrationTest, TestNegative) { testBadResponseSize(-1); } +TEST_P(HttpTestServerIntegrationTest, TestNegative) { + initializeFilterConfiguration(kDefaultProto); + testBadResponseSize(-1); +} // TODO(oschaaf): We can't currently override with a default value ('0') in this case. -TEST_P(HttpTestServerIntegrationTest, DISABLED_TestZeroLengthRequest) { testWithResponseSize(0); } +TEST_P(HttpTestServerIntegrationTest, DISABLED_TestZeroLengthRequest) { + initializeFilterConfiguration(kDefaultProto); + testWithResponseSize(0); +} TEST_P(HttpTestServerIntegrationTest, TestMaxBoundaryLengthRequest) { + initializeFilterConfiguration(kDefaultProto); const int max = 1024 * 1024 * 4; testWithResponseSize(max); } TEST_P(HttpTestServerIntegrationTest, TestTooLarge) { + initializeFilterConfiguration(kDefaultProto); const int max = 1024 * 1024 * 4; testBadResponseSize(max + 1); } -TEST_P(HttpTestServerIntegrationTest, TestHeaderConfig) { - Envoy::BufferingStreamDecoderPtr response = makeSingleRequest( - lookupPort("http"), "GET", "/", "", downstream_protocol_, version_, "foo.com", "", - [](Envoy::Http::RequestHeaderMapImpl& request_headers) { - const std::string header_config = - R"({response_headers: [ { header: { key: "foo", value: "bar2"}, append: true } ]})"; - request_headers.addCopy(Nighthawk::Server::TestServer::HeaderNames::get().TestServerConfig, - header_config); - }); +TEST_P(HttpTestServerIntegrationTest, TestHeaderConfigUsingEnvoyApiV2) { + initializeFilterConfiguration(kDefaultProto); + setRequestLevelConfiguration( + R"({response_headers: [ { header: { key: "foo", value: "bar2"}, append: true } ]})"); + Envoy::IntegrationStreamDecoderPtr response = getResponse(ResponseOrigin::EXTENSION); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + ASSERT_EQ(1, response->headers().get(Envoy::Http::LowerCaseString("foo")).size()); + EXPECT_EQ( + "bar2", + response->headers().get(Envoy::Http::LowerCaseString("foo"))[0]->value().getStringView()); + EXPECT_EQ(std::string(10, 'a'), response->body()); +} + +TEST_P(HttpTestServerIntegrationTest, TestHeaderConfigUsingEnvoyApiV3) { + const std::string v3_configuration = R"EOF( + name: test-server + typed_config: + "@type": type.googleapis.com/nighthawk.server.ResponseOptions + response_body_size: 10 + v3_response_headers: + - { header: { key: "foo", value: "bar2"}, append: true } + )EOF"; + + initializeFilterConfiguration(v3_configuration); + Envoy::IntegrationStreamDecoderPtr response = getResponse(ResponseOrigin::EXTENSION); ASSERT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().Status()->value().getStringView()); - EXPECT_EQ("bar2", - response->headers().get(Envoy::Http::LowerCaseString("foo"))->value().getStringView()); + ASSERT_EQ(1, response->headers().get(Envoy::Http::LowerCaseString("foo")).size()); + EXPECT_EQ( + "bar2", + response->headers().get(Envoy::Http::LowerCaseString("foo"))[0]->value().getStringView()); EXPECT_EQ(std::string(10, 'a'), response->body()); } +TEST_P(HttpTestServerIntegrationTest, + DiesWhenRequestLevelConfigurationResultsInBothEnvoyApiV2AndV3ResponseHeadersSet) { + initializeFilterConfiguration(kDefaultProto); + setRequestLevelConfiguration( + R"({v3_response_headers: [ { header: { key: "foo", value: "bar2"}, append: true } ]})"); + + ASSERT_DEATH(getResponse(ResponseOrigin::EXTENSION), + HasSubstr("cannot specify both response_headers and v3_response_headers")); +} + +TEST_P(HttpTestServerIntegrationTest, + DiesWhenBothEnvoyApiV2AndV3ResponseHeadersAreSetInConfiguration) { + const std::string invalid_configuration = R"EOF( + name: test-server + typed_config: + "@type": type.googleapis.com/nighthawk.server.ResponseOptions + response_headers: + - { header: { key: "key1", value: "value1"} } + v3_response_headers: + - { header: { key: "key1", value: "value1"} } + )EOF"; + + ASSERT_DEATH(initializeFilterConfiguration(invalid_configuration), + HasSubstr("cannot specify both response_headers and v3_response_headers")); +} + TEST_P(HttpTestServerIntegrationTest, TestEchoHeaders) { + initializeFilterConfiguration(kDefaultProto); + setRequestLevelConfiguration("{echo_request_headers: true}"); + setRequestHeader(Envoy::Http::LowerCaseString("gray"), "pidgeon"); + setRequestHeader(Envoy::Http::LowerCaseString("red"), "fox"); + setRequestHeader(Envoy::Http::LowerCaseString(":authority"), "foo.com"); + setRequestHeader(Envoy::Http::LowerCaseString(":path"), "/somepath"); for (auto unique_header : {"one", "two", "three"}) { - Envoy::BufferingStreamDecoderPtr response = makeSingleRequest( - lookupPort("http"), "GET", "/somepath", "", downstream_protocol_, version_, "foo.com", "", - [unique_header](Envoy::Http::RequestHeaderMapImpl& request_headers) { - request_headers.addCopy(Envoy::Http::LowerCaseString("gray"), "pidgeon"); - request_headers.addCopy(Envoy::Http::LowerCaseString("red"), "fox"); - request_headers.addCopy(Envoy::Http::LowerCaseString("unique_header"), unique_header); - request_headers.addCopy( - Nighthawk::Server::TestServer::HeaderNames::get().TestServerConfig, - "{echo_request_headers: true}"); - }); + setRequestHeader(Envoy::Http::LowerCaseString("unique_header"), unique_header); + Envoy::IntegrationStreamDecoderPtr response = getResponse(ResponseOrigin::EXTENSION); ASSERT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_THAT(response->body(), HasSubstr(R"(':authority', 'foo.com')")); @@ -215,37 +184,16 @@ TEST_P(HttpTestServerIntegrationTest, TestEchoHeaders) { } } -class HttpTestServerIntegrationNoConfigTest : public HttpTestServerIntegrationTestBase { -public: - void SetUp() override { initialize(); } - - void TearDown() override { - cleanupUpstreamAndDownstream(); - test_server_.reset(); - fake_upstreams_.clear(); - } - - void initialize() override { - config_helper_.addFilter(R"EOF( -name: test-server -)EOF"); - HttpTestServerIntegrationTestBase::initialize(); - } -}; - -INSTANTIATE_TEST_SUITE_P(IpVersions, HttpTestServerIntegrationNoConfigTest, - ValuesIn(Envoy::TestEnvironment::getIpVersionsForTest())); - -TEST_P(HttpTestServerIntegrationNoConfigTest, TestNoHeaderConfig) { - Envoy::BufferingStreamDecoderPtr response = - makeSingleRequest(lookupPort("http"), "GET", "/", "", downstream_protocol_, version_, - "foo.com", "", [](Envoy::Http::RequestHeaderMapImpl&) {}); +TEST_P(HttpTestServerIntegrationTest, NoNoStaticConfigHeaderConfig) { + initializeFilterConfiguration(kNoConfigProto); + Envoy::IntegrationStreamDecoderPtr response = getResponse(ResponseOrigin::EXTENSION); ASSERT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ("", response->body()); } -TEST_P(HttpTestServerIntegrationNoConfigTest, TestBasics) { +TEST_P(HttpTestServerIntegrationTest, TestNoStaticConfigBasics) { + initializeFilterConfiguration(kNoConfigProto); testWithResponseSize(1, false); testWithResponseSize(10, false); testWithResponseSize(100, false); @@ -253,56 +201,45 @@ TEST_P(HttpTestServerIntegrationNoConfigTest, TestBasics) { testWithResponseSize(10000, false); } -TEST_P(HttpTestServerIntegrationNoConfigTest, TestNegative) { testBadResponseSize(-1); } +TEST_P(HttpTestServerIntegrationTest, TestNoStaticConfigNegative) { + initializeFilterConfiguration(kNoConfigProto); + testBadResponseSize(-1); +} -TEST_P(HttpTestServerIntegrationNoConfigTest, TestZeroLengthRequest) { +TEST_P(HttpTestServerIntegrationTest, TestNoStaticConfigZeroLengthRequest) { + initializeFilterConfiguration(kNoConfigProto); testWithResponseSize(0, false); } -TEST_P(HttpTestServerIntegrationNoConfigTest, TestMaxBoundaryLengthRequest) { +TEST_P(HttpTestServerIntegrationTest, TestNoStaticConfigMaxBoundaryLengthRequest) { + initializeFilterConfiguration(kNoConfigProto); const int max = 1024 * 1024 * 4; testWithResponseSize(max, false); } -TEST_P(HttpTestServerIntegrationNoConfigTest, TestTooLarge) { +TEST_P(HttpTestServerIntegrationTest, TestNoStaticConfigTooLarge) { + initializeFilterConfiguration(kNoConfigProto); const int max = 1024 * 1024 * 4; testBadResponseSize(max + 1); } -TEST_P(HttpTestServerIntegrationNoConfigTest, TestHeaderConfig) { - Envoy::BufferingStreamDecoderPtr response = makeSingleRequest( - lookupPort("http"), "GET", "/", "", downstream_protocol_, version_, "foo.com", "", - [](Envoy::Http::RequestHeaderMapImpl& request_headers) { - const std::string header_config = - R"({response_headers: [ { header: { key: "foo", value: "bar2"}, append: true } ]})"; - request_headers.addCopy(Nighthawk::Server::TestServer::HeaderNames::get().TestServerConfig, - header_config); - }); +TEST_P(HttpTestServerIntegrationTest, TestNoStaticConfigHeaderConfig) { + initializeFilterConfiguration(kNoConfigProto); + setRequestLevelConfiguration( + R"({response_headers: [ { header: { key: "foo", value: "bar2"}, append: true } ]})"); + Envoy::IntegrationStreamDecoderPtr response = getResponse(ResponseOrigin::EXTENSION); + ASSERT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().Status()->value().getStringView()); - EXPECT_EQ("bar2", - response->headers().get(Envoy::Http::LowerCaseString("foo"))->value().getStringView()); + ASSERT_EQ(1, response->headers().get(Envoy::Http::LowerCaseString("foo")).size()); + EXPECT_EQ( + "bar2", + response->headers().get(Envoy::Http::LowerCaseString("foo"))[0]->value().getStringView()); EXPECT_EQ("", response->body()); } -TEST_P(HttpTestServerIntegrationNoConfigTest, BadTestHeaderConfig) { - Envoy::BufferingStreamDecoderPtr response = makeSingleRequest( - lookupPort("http"), "GET", "/", "", downstream_protocol_, version_, "foo.com", "", - [](Envoy::Http::RequestHeaderMapImpl& request_headers) { - request_headers.addCopy(Nighthawk::Server::TestServer::HeaderNames::get().TestServerConfig, - kBadJson); - }); - ASSERT_TRUE(response->complete()); - EXPECT_EQ("500", response->headers().Status()->value().getStringView()); - EXPECT_EQ("test-server didn't understand the request: Error merging json config: Unable to parse " - "JSON as proto (INVALID_ARGUMENT:Unexpected token.\nbad_json\n^): bad_json", - response->body()); -} - -class HttpTestServerDecoderFilterTest : public Test {}; - // Here we test config-level merging as well as its application at the response-header level. -TEST_F(HttpTestServerDecoderFilterTest, HeaderMerge) { +TEST(HttpTestServerDecoderFilterTest, HeaderMerge) { nighthawk::server::ResponseOptions initial_options; auto response_header = initial_options.add_response_headers(); response_header->mutable_header()->set_key("foo"); @@ -312,9 +249,11 @@ TEST_F(HttpTestServerDecoderFilterTest, HeaderMerge) { Server::HttpTestServerDecoderFilterConfigSharedPtr config = std::make_shared(initial_options); Server::HttpTestServerDecoderFilter f(config); - std::string error_message; - nighthawk::server::ResponseOptions options = config->server_config(); + absl::StatusOr options_or = + config->getEffectiveConfiguration(); + ASSERT_TRUE(options_or.ok()); + nighthawk::server::ResponseOptions options = *options_or.value(); EXPECT_EQ(1, options.response_headers_size()); EXPECT_EQ("foo", options.response_headers(0).header().key()); @@ -326,6 +265,7 @@ TEST_F(HttpTestServerDecoderFilterTest, HeaderMerge) { EXPECT_TRUE(Envoy::TestUtility::headerMapEqualIgnoreOrder( header_map, Envoy::Http::TestResponseHeaderMapImpl{{":status", "200"}, {"foo", "bar1"}})); + std::string error_message; EXPECT_TRUE(Server::Configuration::mergeJsonConfig( R"({response_headers: [ { header: { key: "foo", value: "bar2"}, append: false } ]})", options, error_message)); @@ -355,11 +295,12 @@ TEST_F(HttpTestServerDecoderFilterTest, HeaderMerge) { header_map, Envoy::Http::TestResponseHeaderMapImpl{ {":status", "200"}, {"foo", "bar2"}, {"foo2", "bar3"}})); - EXPECT_FALSE(Server::Configuration::mergeJsonConfig(kBadJson, options, error_message)); - EXPECT_EQ("Error merging json config: Unable to parse JSON as proto (INVALID_ARGUMENT:Unexpected " - "token.\nbad_json\n^): bad_json", - error_message); + EXPECT_FALSE(Server::Configuration::mergeJsonConfig("bad_json", options, error_message)); + EXPECT_THAT(error_message, + testing::HasSubstr("Error merging json config: Unable to parse JSON as proto " + "(INVALID_ARGUMENT:Unexpected token.\nbad_json\n^): bad_json")); EXPECT_EQ(3, options.response_headers_size()); } +} // namespace } // namespace Nighthawk diff --git a/test/server/http_time_tracking_filter_integration_test.cc b/test/server/http_time_tracking_filter_integration_test.cc index 5f1348c56..9858fbf4f 100644 --- a/test/server/http_time_tracking_filter_integration_test.cc +++ b/test/server/http_time_tracking_filter_integration_test.cc @@ -17,6 +17,8 @@ namespace { using namespace std::chrono_literals; +using ::testing::HasSubstr; + const std::string kLatencyResponseHeaderName = "x-prd"; const std::string kDefaultProtoFragment = fmt::format( "emit_previous_request_delta_in_response_header: \"{}\"", kLatencyResponseHeaderName); @@ -37,6 +39,22 @@ class HttpTimeTrackingIntegrationTest INSTANTIATE_TEST_SUITE_P(IpVersions, HttpTimeTrackingIntegrationTest, testing::ValuesIn(Envoy::TestEnvironment::getIpVersionsForTest())); +TEST_P(HttpTimeTrackingIntegrationTest, + DiesWhenBothEnvoyApiV2AndV3ResponseHeadersAreSetInConfiguration) { + const std::string invalid_configuration = R"EOF( + name: time-tracking + typed_config: + "@type": type.googleapis.com/nighthawk.server.ResponseOptions + response_headers: + - { header: { key: "key1", value: "value1"} } + v3_response_headers: + - { header: { key: "key1", value: "value1"} } + )EOF"; + + ASSERT_DEATH(initializeFilterConfiguration(invalid_configuration), + HasSubstr("cannot specify both response_headers and v3_response_headers")); +} + // Verify expectations with static/file-based time-tracking configuration. TEST_P(HttpTimeTrackingIntegrationTest, ReturnsPositiveLatencyForStaticConfiguration) { initializeFilterConfiguration(fmt::format(kProtoConfigTemplate, kDefaultProtoFragment)); @@ -44,16 +62,15 @@ TEST_P(HttpTimeTrackingIntegrationTest, ReturnsPositiveLatencyForStaticConfigura // As the first request doesn't have a prior one, we should not observe a delta. Envoy::IntegrationStreamDecoderPtr response = getResponse(ResponseOrigin::UPSTREAM); int64_t latency; - const Envoy::Http::HeaderEntry* latency_header_1 = - response->headers().get(Envoy::Http::LowerCaseString(kLatencyResponseHeaderName)); - EXPECT_EQ(latency_header_1, nullptr); + EXPECT_EQ( + response->headers().get(Envoy::Http::LowerCaseString(kLatencyResponseHeaderName)).size(), 0); // On the second request we should observe a delta. response = getResponse(ResponseOrigin::UPSTREAM); - const Envoy::Http::HeaderEntry* latency_header_2 = + const Envoy::Http::HeaderMap::GetResult& latency_header = response->headers().get(Envoy::Http::LowerCaseString(kLatencyResponseHeaderName)); - ASSERT_NE(latency_header_2, nullptr); - EXPECT_TRUE(absl::SimpleAtoi(latency_header_2->value().getStringView(), &latency)); + ASSERT_EQ(latency_header.size(), 1); + EXPECT_TRUE(absl::SimpleAtoi(latency_header[0]->value().getStringView(), &latency)); EXPECT_GT(latency, 0); } @@ -63,18 +80,18 @@ TEST_P(HttpTimeTrackingIntegrationTest, ReturnsPositiveLatencyForPerRequestConfi // As the first request doesn't have a prior one, we should not observe a delta. setRequestLevelConfiguration("{}"); Envoy::IntegrationStreamDecoderPtr response = getResponse(ResponseOrigin::UPSTREAM); - EXPECT_EQ(response->headers().get(Envoy::Http::LowerCaseString(kLatencyResponseHeaderName)), - nullptr); + EXPECT_TRUE( + response->headers().get(Envoy::Http::LowerCaseString(kLatencyResponseHeaderName)).empty()); // With request level configuration indicating that the timing header should be emitted, // we should be able to observe it. setRequestLevelConfiguration(fmt::format("{{{}}}", kDefaultProtoFragment)); response = getResponse(ResponseOrigin::UPSTREAM); - const Envoy::Http::HeaderEntry* latency_header = + const Envoy::Http::HeaderMap::GetResult& latency_header = response->headers().get(Envoy::Http::LowerCaseString(kLatencyResponseHeaderName)); - ASSERT_NE(latency_header, nullptr); + ASSERT_EQ(latency_header.size(), 1); int64_t latency; - EXPECT_TRUE(absl::SimpleAtoi(latency_header->value().getStringView(), &latency)); + EXPECT_TRUE(absl::SimpleAtoi(latency_header[0]->value().getStringView(), &latency)); // TODO(oschaaf): figure out if we can use simtime here, and verify actual timing matches // what we'd expect using that. EXPECT_GT(latency, 0); diff --git a/test/statistic_test.cc b/test/statistic_test.cc index 596f04983..2378969ec 100644 --- a/test/statistic_test.cc +++ b/test/statistic_test.cc @@ -209,6 +209,42 @@ TYPED_TEST(TypedStatisticTest, ProtoOutputEmptyStats) { EXPECT_EQ(proto.pstdev().nanos(), 0); } +TYPED_TEST(TypedStatisticTest, NativeRoundtrip) { + TypeParam a; + + a.setId("bar"); + a.addValue(6543456); + a.addValue(342335); + a.addValue(543); + + const absl::StatusOr> status_or_stream = a.serializeNative(); + if (status_or_stream.ok()) { + // If the histogram states it implements native serialization/deserialization, put it through + // a round trip test. + TypeParam b; + absl::Status status = b.deserializeNative(*status_or_stream.value()); + EXPECT_TRUE(status.ok()); + EXPECT_EQ(3, b.count()); + EXPECT_EQ(a.count(), b.count()); + EXPECT_EQ(a.mean(), b.mean()); + EXPECT_EQ(a.pstdev(), b.pstdev()); + } else { + EXPECT_EQ(status_or_stream.status().code(), absl::StatusCode::kUnimplemented); + } +} + +TYPED_TEST(TypedStatisticTest, AttemptsToDeserializeBogusBehaveWell) { + // Deserializing corrupted data should either result in the statistic reporting + // it didn't implement deserialization, or having it report an internal failure. + const std::vector expected_status_list{absl::StatusCode::kInternal, + absl::StatusCode::kUnimplemented}; + TypeParam a; + std::istringstream bogus_input(std::string("BOGUS")); + const absl::Status status = a.deserializeNative(bogus_input); + EXPECT_FALSE(status.ok()); + EXPECT_THAT(expected_status_list, Contains(status.code())); +} + TYPED_TEST(TypedStatisticTest, StringOutput) { TypeParam a; @@ -307,9 +343,9 @@ TEST(StatisticTest, HdrStatisticPercentilesProto) { util.loadFromJson(Envoy::Filesystem::fileSystemForTest().fileReadToEnd( TestEnvironment::runfilesPath("test/test_data/hdr_proto_json.gold")), parsed_json_proto, Envoy::ProtobufMessage::getStrictValidationVisitor()); - const std::string json = util.getJsonStringFromMessage( + const std::string json = util.getJsonStringFromMessageOrDie( statistic.toProto(Statistic::SerializationDomain::DURATION), true, true); - const std::string golden_json = util.getJsonStringFromMessage(parsed_json_proto, true, true); + const std::string golden_json = util.getJsonStringFromMessageOrDie(parsed_json_proto, true, true); EXPECT_THAT(statistic.toProto(Statistic::SerializationDomain::DURATION), Envoy::ProtoEq(parsed_json_proto)) << json << "\n" @@ -329,9 +365,9 @@ TEST(StatisticTest, CircllhistStatisticPercentilesProto) { util.loadFromJson(Envoy::Filesystem::fileSystemForTest().fileReadToEnd( TestEnvironment::runfilesPath("test/test_data/circllhist_proto_json.gold")), parsed_json_proto, Envoy::ProtobufMessage::getStrictValidationVisitor()); - const std::string json = util.getJsonStringFromMessage( + const std::string json = util.getJsonStringFromMessageOrDie( statistic.toProto(Statistic::SerializationDomain::DURATION), true, true); - const std::string golden_json = util.getJsonStringFromMessage(parsed_json_proto, true, true); + const std::string golden_json = util.getJsonStringFromMessageOrDie(parsed_json_proto, true, true); EXPECT_THAT(statistic.toProto(Statistic::SerializationDomain::DURATION), Envoy::ProtoEq(parsed_json_proto)) << json << "\n" diff --git a/test/stream_decoder_test.cc b/test/stream_decoder_test.cc index 8c426bbd8..8614f4f32 100644 --- a/test/stream_decoder_test.cc +++ b/test/stream_decoder_test.cc @@ -26,7 +26,8 @@ class StreamDecoderTest : public Test, public StreamDecoderCompletionCallback { : api_(Envoy::Api::createApiForTest(time_system_)), dispatcher_(api_->allocateDispatcher("test_thread")), request_headers_(std::make_shared( - std::initializer_list>({{":method", "GET"}}))), + std::initializer_list>( + {{":method", "GET"}, {":path", "/foo"}}))), http_tracer_(std::make_unique()), test_header_(std::make_unique( std::initializer_list>({{":status", "200"}}))), @@ -117,7 +118,8 @@ TEST_F(StreamDecoderTest, LatencyIsNotMeasured) { NiceMock stream_info; EXPECT_CALL(stream_encoder, encodeHeaders(Envoy::HeaderMapEqualRef(request_headers_.get()), true)); - decoder->onPoolReady(stream_encoder, ptr, stream_info); + decoder->onPoolReady(stream_encoder, ptr, stream_info, + {} /*absl::optional protocol*/); decoder->decodeHeaders(std::move(test_header_), true); EXPECT_EQ(0, connect_statistic_.count()); EXPECT_EQ(0, latency_statistic_.count()); @@ -134,9 +136,9 @@ TEST_F(StreamDecoderTest, LatencyIsMeasured) { const Envoy::Tracing::Decision) -> Envoy::Tracing::Span* { EXPECT_EQ(Envoy::Tracing::OperationName::Egress, config.operationName()); auto* span = new Envoy::Tracing::MockSpan(); - EXPECT_CALL(*span, injectContext(_)).Times(1); + EXPECT_CALL(*span, injectContext(_)); EXPECT_CALL(*span, setTag(_, _)).Times(12); - EXPECT_CALL(*span, finishSpan()).Times(1); + EXPECT_CALL(*span, finishSpan()); return span; })); @@ -152,7 +154,8 @@ TEST_F(StreamDecoderTest, LatencyIsMeasured) { Envoy::Upstream::HostDescriptionConstSharedPtr ptr; NiceMock stream_info; EXPECT_CALL(stream_encoder, encodeHeaders(_, true)); - decoder->onPoolReady(stream_encoder, ptr, stream_info); + decoder->onPoolReady(stream_encoder, ptr, stream_info, + {} /*absl::optional protocol*/); EXPECT_EQ(1, connect_statistic_.count()); decoder->decodeHeaders(std::move(test_header_), false); EXPECT_EQ(0, stream_decoder_export_latency_callbacks_); @@ -211,6 +214,9 @@ TEST_F(StreamDecoderTest, StreamResetReasonToResponseFlag) { ASSERT_EQ(StreamDecoder::streamResetReasonToResponseFlag( Envoy::Http::StreamResetReason::RemoteRefusedStreamReset), Envoy::StreamInfo::ResponseFlag::UpstreamRemoteReset); + ASSERT_EQ( + StreamDecoder::streamResetReasonToResponseFlag(Envoy::Http::StreamResetReason::ConnectError), + Envoy::StreamInfo::ResponseFlag::UpstreamRemoteReset); } // This test parameterization structure carries the response header name that ought to be treated @@ -245,5 +251,22 @@ TEST_P(LatencyTrackingViaResponseHeaderTest, LatencyTrackingViaResponseHeader) { EXPECT_EQ(origin_latency_statistic_.count(), expected_count); } +// Test that a single response carrying multiple valid latency response headers does not +// get tracked. This will also yield a burst of warnings, which we unfortunately cannot +// easily verify here. +TEST_F(StreamDecoderTest, LatencyTrackingWithMultipleResponseHeadersFails) { + const std::string kLatencyTrackingResponseHeader = "latency-in-response-header"; + auto decoder = new StreamDecoder( + *dispatcher_, time_system_, *this, [](bool, bool) {}, connect_statistic_, latency_statistic_, + response_header_size_statistic_, response_body_size_statistic_, origin_latency_statistic_, + request_headers_, false, 0, random_generator_, http_tracer_, kLatencyTrackingResponseHeader); + Envoy::Http::ResponseHeaderMapPtr headers{ + new Envoy::Http::TestResponseHeaderMapImpl{{":status", "200"}, + {kLatencyTrackingResponseHeader, "1"}, + {kLatencyTrackingResponseHeader, "2"}}}; + decoder->decodeHeaders(std::move(headers), true); + EXPECT_EQ(origin_latency_statistic_.count(), 0); +} + } // namespace Client } // namespace Nighthawk diff --git a/test/termination_predicate_test.cc b/test/termination_predicate_test.cc index 387883152..4ab0bab53 100644 --- a/test/termination_predicate_test.cc +++ b/test/termination_predicate_test.cc @@ -25,7 +25,7 @@ class TerminationPredicateTest : public Test { TEST_F(TerminationPredicateTest, DurationTerminationPredicateImplTest) { const auto duration = 100us; - DurationTerminationPredicateImpl pred(time_system, duration, time_system.systemTime()); + DurationTerminationPredicateImpl pred(time_system, duration, time_system.monotonicTime()); EXPECT_EQ(pred.evaluate(), TerminationPredicate::Status::PROCEED); // move to the edge. time_system.advanceTimeWait(duration); diff --git a/test/test_data/output_formatter.json.gold b/test/test_data/output_formatter.json.gold index 1048bc9fd..d1f142906 100644 --- a/test/test_data/output_formatter.json.gold +++ b/test/test_data/output_formatter.json.gold @@ -190,7 +190,8 @@ "value": "1" } ], - "execution_duration": "1s" + "execution_duration": "1s", + "execution_start": "2009-02-13T23:31:31.567Z" }, { "name": "worker_1", @@ -558,7 +559,8 @@ "value": "1" } ], - "execution_duration": "1s" + "execution_duration": "1s", + "execution_start": "2009-02-13T23:31:31.567Z" } ], "version": { diff --git a/test/test_data/output_formatter.yaml.gold b/test/test_data/output_formatter.yaml.gold index bc1b9b750..6a4255c66 100644 --- a/test/test_data/output_formatter.yaml.gold +++ b/test/test_data/output_formatter.yaml.gold @@ -126,6 +126,7 @@ results: - name: foo value: 1 execution_duration: 1s + execution_start: 2009-02-13T23:31:31.567Z - name: worker_1 statistics: - count: 3 @@ -358,6 +359,7 @@ results: - name: foo value: 1 execution_duration: 1s + execution_start: 2009-02-13T23:31:31.567Z version: version: major_number: @version_major@ diff --git a/test/worker_test.cc b/test/worker_test.cc index c30a8b323..b0fb2a280 100644 --- a/test/worker_test.cc +++ b/test/worker_test.cc @@ -45,8 +45,8 @@ class WorkerTest : public Test { TEST_F(WorkerTest, WorkerExecutesOnThread) { InSequence in_sequence; - EXPECT_CALL(tls_, registerThread(_, false)).Times(1); - EXPECT_CALL(tls_, allocateSlot()).Times(1); + EXPECT_CALL(tls_, registerThread(_, false)); + EXPECT_CALL(tls_, allocateSlot()); TestWorker worker(*api_, tls_); NiceMock dispatcher; @@ -57,7 +57,7 @@ TEST_F(WorkerTest, WorkerExecutesOnThread) { worker.start(); worker.waitForCompletion(); - EXPECT_CALL(tls_, shutdownThread()).Times(1); + EXPECT_CALL(tls_, shutdownThread()); ASSERT_TRUE(worker.ran_); worker.shutdown(); } diff --git a/tools/check_format.sh b/tools/check_format.sh index 1389fd4d5..847b28ddd 100755 --- a/tools/check_format.sh +++ b/tools/check_format.sh @@ -8,11 +8,11 @@ TO_CHECK="${2:-$PWD}" bazel run @envoy//tools:code_format/check_format.py -- \ --skip_envoy_build_rule_check --namespace_check Nighthawk \ --build_fixer_check_excluded_paths=$(realpath ".") \ - --include_dir_order envoy,nighthawk,external/source/envoy,external,api,common,source,exe,server,client,grpcpp,test_common,test \ + --include_dir_order envoy,nighthawk,external/source/envoy,external,api,common,source,exe,server,client,grpcpp,request_source,test_common,test \ $1 $TO_CHECK # The include checker doesn't support per-file checking, so we only # run it when a full check is requested. if [ $PWD == $TO_CHECK ]; then bazel run //tools:check_envoy_includes.py -fi +fi \ No newline at end of file diff --git a/tools/format_python_tools.sh b/tools/format_python_tools.sh index 2523042d5..e1657df41 100755 --- a/tools/format_python_tools.sh +++ b/tools/format_python_tools.sh @@ -26,11 +26,12 @@ EXCLUDE="--exclude=benchmarks/tmp/*,.cache/*,*/venv/*,tools/format_python_tools. # E124 Closing bracket does not match visual indentation # E125 Continuation line with same indent as next logical line # E126 Continuation line over-indented for hanging indent +# W504 line break after binary operator # We ignore false positives because of what look like pytest peculiarities # F401 Module imported but unused # F811 Redefinition of unused name from line n -flake8 . ${EXCLUDE} --ignore=E114,E111,E501,F401,F811,E124,E125,E126,D --count --show-source --statistics +flake8 . ${EXCLUDE} --ignore=E114,E111,E501,F401,F811,E124,E125,E126,W504,D --count --show-source --statistics # D = Doc comment related checks (We check both p257 AND google conventions). flake8 . ${EXCLUDE} --docstring-convention pep257 --select=D --count --show-source --statistics flake8 . ${EXCLUDE} --docstring-convention google --select=D --count --show-source --statistics