diff --git a/.bazelrc b/.bazelrc index f80346d54..9139ec35b 100644 --- a/.bazelrc +++ b/.bazelrc @@ -28,6 +28,7 @@ build --enable_platform_specific_config # Enable position independent code, this option is not supported on Windows and default on on macOS. build:linux --copt=-fPIC +build:linux --cxxopt=-std=c++17 # We already have absl in the build, define absl=1 to tell googletest to use absl for backtrace. build --define absl=1 @@ -71,6 +72,7 @@ build:clang-asan --config=asan build:clang-asan --linkopt -fuse-ld=lld # macOS ASAN/UBSAN +build:macos --cxxopt=-std=c++17 build:macos-asan --config=asan # Workaround, see https://github.com/bazelbuild/bazel/issues/6932 build:macos-asan --copt -Wno-macro-redefined @@ -87,6 +89,8 @@ build:clang-tsan --define ENVOY_CONFIG_TSAN=1 build:clang-tsan --copt -fsanitize=thread build:clang-tsan --linkopt -fsanitize=thread build:clang-tsan --linkopt -fuse-ld=lld +build:clang-tsan --build_tag_filters=-no_san,-no_tsan +build:clang-tsan --test_tag_filters=-no_san,-no_tsan # Needed due to https://github.com/libevent/libevent/issues/777 build:clang-tsan --copt -DEVENT__DISABLE_DEBUG_MODE @@ -134,23 +138,24 @@ build:coverage --strategy=CoverageReport=sandboxed,local build:coverage --experimental_use_llvm_covmap build:coverage --collect_code_coverage build:coverage --test_tag_filters=-nocoverage -build:coverage --instrumentation_filter="//source(?!/common/chromium_url|/extensions/quic_listeners/quiche/platform)[/:],//include[/:]" -coverage:test-coverage --test_arg="--log-path /dev/null" +build:coverage --instrumentation_filter="//source(?!/extensions/quic_listeners/quiche/platform)[/:],//include[/:]" coverage:test-coverage --test_arg="-l trace" -coverage:fuzz-coverage --config=asan-fuzzer +coverage:fuzz-coverage --config=plain-fuzzer coverage:fuzz-coverage --run_under=@envoy//bazel/coverage:fuzz_coverage_wrapper.sh # Remote execution: https://docs.bazel.build/versions/master/remote-execution.html -build:rbe-toolchain --host_platform=@envoy_build_tools//toolchains:rbe_ubuntu_clang_platform -build:rbe-toolchain --platforms=@envoy_build_tools//toolchains:rbe_ubuntu_clang_platform build:rbe-toolchain --action_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1 build:rbe-toolchain-clang --config=rbe-toolchain +build:rbe-toolchain-clang --platforms=@rbe_ubuntu_clang//config:platform +build:rbe-toolchain-clang --host_platform=@rbe_ubuntu_clang//config:platform build:rbe-toolchain-clang --crosstool_top=@rbe_ubuntu_clang//cc:toolchain build:rbe-toolchain-clang --extra_toolchains=@rbe_ubuntu_clang//config:cc-toolchain build:rbe-toolchain-clang --action_env=CC=clang --action_env=CXX=clang++ --action_env=PATH=/usr/sbin:/usr/bin:/sbin:/bin:/opt/llvm/bin build:rbe-toolchain-clang-libc++ --config=rbe-toolchain +build:rbe-toolchain-clang-libc++ --platforms=@rbe_ubuntu_clang_libcxx//config:platform +build:rbe-toolchain-clang-libc++ --host_platform=@rbe_ubuntu_clang_libcxx//config:platform build:rbe-toolchain-clang-libc++ --crosstool_top=@rbe_ubuntu_clang_libcxx//cc:toolchain build:rbe-toolchain-clang-libc++ --extra_toolchains=@rbe_ubuntu_clang_libcxx//config:cc-toolchain build:rbe-toolchain-clang-libc++ --action_env=CC=clang --action_env=CXX=clang++ --action_env=PATH=/usr/sbin:/usr/bin:/sbin:/bin:/opt/llvm/bin @@ -162,7 +167,13 @@ build:rbe-toolchain-msan --linkopt=-L/opt/libcxx_msan/lib build:rbe-toolchain-msan --linkopt=-Wl,-rpath,/opt/libcxx_msan/lib build:rbe-toolchain-msan --config=clang-msan +build:rbe-toolchain-tsan --linkopt=-L/opt/libcxx_tsan/lib +build:rbe-toolchain-tsan --linkopt=-Wl,-rpath,/opt/libcxx_tsan/lib +build:rbe-toolchain-tsan --config=clang-tsan + build:rbe-toolchain-gcc --config=rbe-toolchain +build:rbe-toolchain-gcc --platforms=@rbe_ubuntu_gcc//config:platform +build:rbe-toolchain-gcc --host_platform=@rbe_ubuntu_gcc//config:platform build:rbe-toolchain-gcc --crosstool_top=@rbe_ubuntu_gcc//cc:toolchain build:rbe-toolchain-gcc --extra_toolchains=@rbe_ubuntu_gcc//config:cc-toolchain @@ -206,7 +217,7 @@ build:remote-msvc-cl --config=rbe-toolchain-msvc-cl # Docker sandbox # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8 -build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:f21773ab398a879f976936f72c78c9dd3718ca1e +build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:736b8db2e1f0b55edb50719d2f8ddf383f46030b build:docker-sandbox --spawn_strategy=docker build:docker-sandbox --strategy=Javac=docker build:docker-sandbox --strategy=Closure=docker @@ -228,6 +239,10 @@ build:docker-msan --config=docker-sandbox build:docker-msan --config=rbe-toolchain-clang-libc++ build:docker-msan --config=rbe-toolchain-msan +build:docker-tsan --config=docker-sandbox +build:docker-tsan --config=rbe-toolchain-clang-libc++ +build:docker-tsan --config=rbe-toolchain-tsan + # CI configurations build:remote-ci --remote_cache=grpcs://remotebuildexecution.googleapis.com build:remote-ci --remote_executor=grpcs://remotebuildexecution.googleapis.com @@ -266,6 +281,7 @@ build:windows --define manual_stamp=manual_stamp build:windows --copt="-DCARES_STATICLIB" build:windows --copt="-DNGHTTP2_STATICLIB" build:windows --copt="-DCURL_STATICLIB" +build:windows --cxxopt="/std:c++17" # Required to work around build defects on Windows MSVC cl # Unguarded gcc pragmas in quiche are not recognized by MSVC @@ -295,4 +311,4 @@ build:windows --features=static_link_msvcrt build:windows --dynamic_mode=off try-import %workspace%/clang.bazelrc -try-import %workspace%/user.bazelrc \ No newline at end of file +try-import %workspace%/user.bazelrc diff --git a/.bazelversion b/.bazelversion index fd2a01863..bea438e9a 100644 --- a/.bazelversion +++ b/.bazelversion @@ -1 +1 @@ -3.1.0 +3.3.1 diff --git a/.circleci/config.yml b/.circleci/config.yml index f24261777..59262e25e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,6 +1,6 @@ references: - envoy-build-image: &envoy-build-image # July 1st, 2020 - envoyproxy/envoy-build-ubuntu:f21773ab398a879f976936f72c78c9dd3718ca1e + envoy-build-image: &envoy-build-image # July 17th, 2020 + envoyproxy/envoy-build-ubuntu:736b8db2e1f0b55edb50719d2f8ddf383f46030b version: 2 jobs: build: @@ -24,13 +24,13 @@ jobs: steps: - checkout - run: ci/do_ci.sh clang_tidy - test_with_valgrind: + test_gcc: docker: - image: *envoy-build-image resource_class: xlarge steps: - checkout - - run: ci/do_ci.sh test_with_valgrind + - run: ci/do_ci.sh test_gcc coverage: docker: - image: *envoy-build-image @@ -57,7 +57,9 @@ jobs: resource_class: xlarge steps: - checkout - - run: ci/do_ci.sh asan + - run: + command: ci/do_ci.sh asan + no_output_timeout: 30m tsan: docker: - image: *envoy-build-image @@ -88,8 +90,8 @@ workflows: jobs: - build - test + - test_gcc - clang_tidy - # - test_with_valgrind - coverage - asan - tsan diff --git a/.clang-tidy b/.clang-tidy index 02fb4a4af..f945af6a1 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -1,4 +1,4 @@ -Checks: 'clang-diagnostic-*,clang-analyzer-*,abseil-*,bugprone-*,modernize-*,performance-*,readability-redundant-*,readability-braces-around-statements,-abseil-no-internal-dependencies,-modernize-use-trailing-return-type,-modernize-avoid-bind' +Checks: 'clang-diagnostic-*,clang-analyzer-*,abseil-*,bugprone-*,modernize-*,performance-*,readability-redundant-*,readability-braces-around-statements,-abseil-no-internal-dependencies,-modernize-use-trailing-return-type,-modernize-avoid-bind,-modernize-use-nodiscard,-modernize-concat-nested-namespaces' WarningsAsErrors: '*' diff --git a/.gitignore b/.gitignore index a099a0c5f..2970d2e8a 100644 --- a/.gitignore +++ b/.gitignore @@ -17,4 +17,4 @@ tools/pyformat test/coverage/BUILD *.bak default.profraw -tmp-* \ No newline at end of file +tmp-* diff --git a/BUILD b/BUILD index 7b12a5738..7c0820a4b 100644 --- a/BUILD +++ b/BUILD @@ -30,6 +30,7 @@ envoy_cc_binary( name = "nighthawk_test_server", repository = "@envoy", deps = [ + "//source/server:http_dynamic_delay_filter_config", "//source/server:http_test_server_filter_config", "@envoy//source/exe:envoy_main_entry_lib", ], diff --git a/api/server/response_options.proto b/api/server/response_options.proto index 69b3bd7d6..0644d3d0e 100644 --- a/api/server/response_options.proto +++ b/api/server/response_options.proto @@ -5,8 +5,19 @@ package nighthawk.server; import "google/protobuf/wrappers.proto"; import "validate/validate.proto"; import "envoy/api/v2/core/base.proto"; +import "google/protobuf/duration.proto"; -// Options that control the test server response. +message ConcurrencyBasedLinearDelay { + // Minimal delay to add to replies. + google.protobuf.Duration minimal_delay = 1 [(validate.rules).duration.gte.nanos = 0]; + // Factor to use when adding latency as concurrency increases. + google.protobuf.Duration concurrency_delay_factor = 2 [(validate.rules).duration.gte.nanos = 0]; +} + +// Options that control the test server response. Can be provided via request +// headers as well as via static file-based configuration. In case both are +// provided, a merge will happen, in which case the header-provided +// configuration will override. message ResponseOptions { // List of additional response headers. repeated envoy.api.v2.core.HeaderValueOption response_headers = 1; @@ -14,4 +25,11 @@ message ResponseOptions { uint32 response_body_size = 2 [(validate.rules).uint32 = {lte: 4194304}]; // If true, then echo request headers in the response body. bool echo_request_headers = 3; + + oneof oneof_delay_options { + // Static delay duration. + google.protobuf.Duration static_delay = 4 [(validate.rules).duration.gte.nanos = 0]; + // Concurrency based linear delay configuration. + ConcurrencyBasedLinearDelay concurrency_based_linear_delay = 5; + } } diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index c3dc11867..7071412de 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,10 +1,10 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -ENVOY_COMMIT = "f6b86a58b264b46a57d71a9b3b0989b2969df408" # July 2nd, 2020 -ENVOY_SHA = "5c802266f0cdc5193b6e0247a0f5f20f39f6cc36b688b194e60a853148ba438a" +ENVOY_COMMIT = "13da8f415c372e7fde3b0105f58f57348e84acb5" # July 17th, 2020 +ENVOY_SHA = "525307fb73125c8c6e4ce4761c9f09b781cd7b7602a8711afe07be45ad55de52" -HDR_HISTOGRAM_C_VERSION = "0.9.13" # Feb 22nd, 2020 -HDR_HISTOGRAM_C_SHA = "2bd4a4631b64f2f8cf968ef49dd03ff3c51b487c3c98a01217ae4cf4a35b8310" +HDR_HISTOGRAM_C_VERSION = "0.11.0" # July 14th, 2020 +HDR_HISTOGRAM_C_SHA = "c00696b3d81776675aa2bc62d3642e31bd8a48cc9619c9bd7d4a78762896e353" def nighthawk_dependencies(): http_archive( diff --git a/benchmarks/BUILD b/benchmarks/BUILD index 9b6ed35b1..179c3455f 100644 --- a/benchmarks/BUILD +++ b/benchmarks/BUILD @@ -7,6 +7,7 @@ py_binary( srcs = [ "benchmarks.py", ], + srcs_version = "PY2AND3", deps = [ ":benchmarks_envoy_proxy_lib", "//test/integration:integration_test_base_lean", @@ -20,6 +21,7 @@ py_test( "test/test_discovery.py", ], main = "benchmarks.py", + srcs_version = "PY2AND3", deps = [ ":benchmarks_envoy_proxy_lib", "//test/integration:integration_test_base", @@ -35,4 +37,5 @@ py_library( "configurations/envoy_proxy.yaml", "test/templates/simple_plot.html", ], + srcs_version = "PY2AND3", ) diff --git a/benchmarks/envoy_proxy.py b/benchmarks/envoy_proxy.py index 9e7fd1909..06ce8bc08 100644 --- a/benchmarks/envoy_proxy.py +++ b/benchmarks/envoy_proxy.py @@ -39,13 +39,12 @@ def __init__(self, config_template_path, server_ip, ip_version, parameters=dict( tag: String. Supply this to get recognizeable output locations (optional). """ # If no explicit envoy path is passed, we'll use nighthawk_test_server. - super(EnvoyProxyServer, self).__init__( - os.getenv("ENVOY_PATH", "nighthawk_test_server"), - config_template_path, - server_ip, - ip_version, - parameters=parameters, - tag=tag) + super(EnvoyProxyServer, self).__init__(os.getenv("ENVOY_PATH", "nighthawk_test_server"), + config_template_path, + server_ip, + ip_version, + parameters=parameters, + tag=tag) self.docker_image = os.getenv("ENVOY_DOCKER_IMAGE_TO_TEST", "") @@ -83,15 +82,14 @@ def setUp(self): # TODO(oschaaf): how should this interact with multiple backends? self.parameters["proxy_ip"] = self.test_server.server_ip self.parameters["server_port"] = self.test_server.server_port - proxy_server = EnvoyProxyServer( - self._proxy_config, - self.server_ip, - self.ip_version, - parameters=self.parameters, - tag=self.tag) + proxy_server = EnvoyProxyServer(self._proxy_config, + self.server_ip, + self.ip_version, + parameters=self.parameters, + tag=self.tag) assert (proxy_server.start()) - logging.info("envoy proxy listening at {ip}:{port}".format( - ip=proxy_server.server_ip, port=proxy_server.server_port)) + logging.info("envoy proxy listening at {ip}:{port}".format(ip=proxy_server.server_ip, + port=proxy_server.server_port)) self.proxy_server = proxy_server def tearDown(self): diff --git a/benchmarks/test/test_discovery.py b/benchmarks/test/test_discovery.py index 09f5e1636..bd5eb21d9 100644 --- a/benchmarks/test/test_discovery.py +++ b/benchmarks/test/test_discovery.py @@ -11,7 +11,7 @@ import os from test.integration.integration_test_fixtures import (http_test_server_fixture, https_test_server_fixture) -from test.integration import utility +from test.integration import asserts from envoy_proxy import (inject_envoy_http_proxy_fixture, proxy_config) from rules_python.python.runfiles import runfiles from shutil import copyfile @@ -49,11 +49,11 @@ def _run_benchmark(fixture, connection_counter = "upstream_cx_http1_total" # Some arbitrary sanity checks - utility.assertCounterGreaterEqual(counters, "benchmark.http_2xx", + asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", (concurrency * rps * duration) * 0.99) - utility.assertGreater(counters["upstream_cx_rx_bytes_total"], response_count * response_size) - utility.assertGreater(counters["upstream_cx_tx_bytes_total"], request_count * request_body_size) - utility.assertCounterEqual(counters, connection_counter, concurrency * max_connections) + asserts.assertGreater(counters["upstream_cx_rx_bytes_total"], response_count * response_size) + asserts.assertGreater(counters["upstream_cx_tx_bytes_total"], request_count * request_body_size) + asserts.assertCounterEqual(counters, connection_counter, concurrency * max_connections) # Could potentially set thresholds on acceptable latency here. @@ -76,9 +76,8 @@ def _run_benchmark(fixture, with open(os.path.join(fixture.test_server.tmpdir, "proxy_version.txt"), "w") as f: f.write(fixture.proxy_server.getCliVersionString()) r = runfiles.Create() - copyfile( - r.Rlocation("nighthawk/benchmarks/test/templates/simple_plot.html"), - os.path.join(fixture.test_server.tmpdir, "simple_plot.html")) + copyfile(r.Rlocation("nighthawk/benchmarks/test/templates/simple_plot.html"), + os.path.join(fixture.test_server.tmpdir, "simple_plot.html")) # Test via injected Envoy diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 39c618052..92658d437 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -7,31 +7,25 @@ set -u export BUILDIFIER_BIN="${BUILDIFIER_BIN:=/usr/local/bin/buildifier}" export BUILDOZER_BIN="${BUILDOZER_BIN:=/usr/local/bin/buildozer}" export NUM_CPUS=${NUM_CPUS:=$(grep -c ^processor /proc/cpuinfo)} -export CIRCLECI=${CIRCLECI:="")} +export CIRCLECI=${CIRCLECI:=""} export BAZEL_EXTRA_TEST_OPTIONS=${BAZEL_EXTRA_TEST_OPTIONS:=""} export BAZEL_OPTIONS=${BAZEL_OPTIONS:=""} export BAZEL_BUILD_EXTRA_OPTIONS=${BAZEL_BUILD_EXTRA_OPTIONS:=""} export SRCDIR=${SRCDIR:="${PWD}"} +export CLANG_FORMAT=clang-format function do_build () { - bazel build $BAZEL_BUILD_OPTIONS --verbose_failures=true //:nighthawk + bazel build $BAZEL_BUILD_OPTIONS //:nighthawk tools/update_cli_readme_documentation.sh --mode check } function do_opt_build () { - bazel build $BAZEL_BUILD_OPTIONS -c opt --verbose_failures=true //:nighthawk + bazel build $BAZEL_BUILD_OPTIONS -c opt //:nighthawk } function do_test() { - bazel test $BAZEL_BUILD_OPTIONS $BAZEL_TEST_OPTIONS \ - --test_output=all \ - //test/... -} - -function do_test_with_valgrind() { - apt-get update && apt-get install valgrind && \ - bazel build $BAZEL_BUILD_OPTIONS -c dbg //test/... && \ - nighthawk/tools/valgrind-tests.sh + bazel build $BAZEL_BUILD_OPTIONS //test/... + bazel test $BAZEL_TEST_OPTIONS --test_output=all //test/... } function do_clang_tidy() { @@ -41,15 +35,17 @@ function do_clang_tidy() { function do_coverage() { export TEST_TARGETS="//test/..." echo "bazel coverage build with tests ${TEST_TARGETS}" - - # Reduce the amount of memory Bazel tries to use to prevent it from launching too many subprocesses. - # This should prevent the system from running out of memory and killing tasks. See discussion on - # https://github.com/envoyproxy/envoy/pull/5611. - [ -z "$CIRCLECI" ] || export BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --local_ram_resources=12288" test/run_nighthawk_bazel_coverage.sh ${TEST_TARGETS} exit 0 } +function setup_gcc_toolchain() { + export CC=gcc + export CXX=g++ + export BAZEL_COMPILER=gcc + echo "$CC/$CXX toolchain configured" +} + function setup_clang_toolchain() { export PATH=/opt/llvm/bin:$PATH export CC=clang @@ -93,7 +89,10 @@ function do_tsan() { echo "bazel TSAN debug build with tests" echo "Building and testing envoy tests..." cd "${SRCDIR}" - [ -z "$CIRCLECI" ] || export BAZEL_BUILD_OPTIONS="${BAZEL_TEST_OPTIONS} --local_ram_resources=12288" + run_bazel build ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-tsan -- //source/exe/... && \ + run_bazel build ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-tsan -- //source/server/... && \ + run_bazel build ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-tsan -- //test/mocks/... && \ + run_bazel build ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-tsan -- //test/... && \ run_bazel test ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-tsan //test/... } @@ -150,21 +149,6 @@ function do_fix_format() { ./tools/format_python_tools.sh fix } -if [ -n "$CIRCLECI" ]; then - if [[ -f "${HOME:-/root}/.gitconfig" ]]; then - mv "${HOME:-/root}/.gitconfig" "${HOME:-/root}/.gitconfig_save" - echo 1 - fi - # We constrain parallelism in CI to avoid running out of memory. - NUM_CPUS=8 - if [[ "$1" == "asan" ]]; then - NUM_CPUS=5 - fi - if [[ "$1" == "coverage" ]]; then - NUM_CPUS=3 - fi -fi - if grep 'docker\|lxc' /proc/1/cgroup; then # Create a fake home. Python site libs tries to do getpwuid(3) if we don't and the CI # Docker image gets confused as it has no passwd entry when running non-root @@ -190,65 +174,91 @@ fi export BAZEL_EXTRA_TEST_OPTIONS="--test_env=ENVOY_IP_TEST_VERSIONS=v4only ${BAZEL_EXTRA_TEST_OPTIONS}" export BAZEL_BUILD_OPTIONS=" \ --verbose_failures ${BAZEL_OPTIONS} --action_env=HOME --action_env=PYTHONUSERBASE \ ---jobs=${NUM_CPUS} --show_task_finish --experimental_generate_json_trace_profile ${BAZEL_BUILD_EXTRA_OPTIONS}" +--experimental_local_memory_estimate \ +--show_task_finish --experimental_generate_json_trace_profile ${BAZEL_BUILD_EXTRA_OPTIONS}" + +if [ -n "$CIRCLECI" ]; then + if [[ -f "${HOME:-/root}/.gitconfig" ]]; then + mv "${HOME:-/root}/.gitconfig" "${HOME:-/root}/.gitconfig_save" + echo 1 + fi + + # Asan has huge memory requirements in its link steps. + # As of the new coverage methodology introduced in Envoy, that has grown memory requirements too. + # Hence we heavily reduce parallellism, to avoid being OOM killed. + if [[ "$1" == "coverage" ]]; then + NUM_CPUS=4 + elif [[ "$1" == "asan" ]]; then + NUM_CPUS=3 + else + NUM_CPUS=8 + fi +fi +echo "Running with ${NUM_CPUS} cpus" +BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --jobs=${NUM_CPUS}" + export BAZEL_TEST_OPTIONS="${BAZEL_BUILD_OPTIONS} --test_env=HOME --test_env=PYTHONUSERBASE \ --test_env=UBSAN_OPTIONS=print_stacktrace=1 \ --cache_test_results=no --test_output=all ${BAZEL_EXTRA_TEST_OPTIONS}" -setup_clang_toolchain -export CLANG_FORMAT=clang-format - case "$1" in build) + setup_clang_toolchain do_build exit 0 ;; test) + setup_clang_toolchain do_test exit 0 ;; - test_with_valgrind) - do_test_with_valgrind + test_gcc) + setup_gcc_toolchain + do_test exit 0 ;; clang_tidy) - if [ -n "$CIRCLECI" ]; then - # Decrease parallelism to avoid running out of memory - NUM_CPUS=7 - fi + setup_clang_toolchain do_clang_tidy exit 0 ;; coverage) + setup_clang_toolchain do_coverage exit 0 ;; asan) + setup_clang_toolchain do_asan exit 0 ;; tsan) + setup_clang_toolchain do_tsan exit 0 ;; docker) + setup_clang_toolchain do_docker exit 0 ;; check_format) + setup_clang_toolchain do_check_format exit 0 ;; fix_format) + setup_clang_toolchain do_fix_format exit 0 ;; benchmark_with_own_binaries) + setup_clang_toolchain do_benchmark_with_own_binaries exit 0 ;; *) - echo "must be one of [build,test,clang_tidy,test_with_valgrind,coverage,asan,tsan,benchmark_with_own_binaries,docker,check_format,fix_format]" + echo "must be one of [build,test,clang_tidy,coverage,asan,tsan,benchmark_with_own_binaries,docker,check_format,fix_format,test_gcc]" exit 1 ;; esac diff --git a/requirements.txt b/requirements.txt index 4ce0db090..9823650f6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,3 +3,5 @@ pytest pytest-dependency pytest-xdist pyyaml +zipp +importlib_metadata \ No newline at end of file diff --git a/source/client/BUILD b/source/client/BUILD index e61a60beb..19c7ecda1 100644 --- a/source/client/BUILD +++ b/source/client/BUILD @@ -46,6 +46,7 @@ envoy_cc_library( "//include/nighthawk/common:base_includes", "//source/common:request_source_impl_lib", "//source/common:nighthawk_common_lib", + "@envoy//source/common/common:random_generator_lib_with_external_headers", "@envoy//source/common/access_log:access_log_manager_lib_with_external_headers", "@envoy//source/common/api:api_lib_with_external_headers", "@envoy//source/common/common:cleanup_lib_with_external_headers", diff --git a/source/client/benchmark_client_impl.h b/source/client/benchmark_client_impl.h index a40e90a19..e835f58f3 100644 --- a/source/client/benchmark_client_impl.h +++ b/source/client/benchmark_client_impl.h @@ -15,6 +15,7 @@ #include "nighthawk/common/statistic.h" #include "external/envoy/source/common/common/logger.h" +#include "external/envoy/source/common/common/random_generator.h" #include "external/envoy/source/common/http/http1/conn_pool.h" #include "external/envoy/source/common/http/http2/conn_pool.h" #include "external/envoy/source/common/runtime/runtime_impl.h" @@ -129,7 +130,7 @@ class BenchmarkClientHttpImpl : public BenchmarkClient, uint32_t max_active_requests_{UINT32_MAX}; uint32_t max_requests_per_connection_{UINT32_MAX}; Envoy::Event::TimerPtr timer_; - Envoy::Runtime::RandomGeneratorImpl generator_; + Envoy::Random::RandomGeneratorImpl generator_; uint64_t requests_completed_{}; uint64_t requests_initiated_{}; bool measure_latencies_{}; diff --git a/source/client/process_impl.h b/source/client/process_impl.h index 11d265fb8..ebb41990d 100644 --- a/source/client/process_impl.h +++ b/source/client/process_impl.h @@ -16,6 +16,7 @@ #include "external/envoy/source/common/access_log/access_log_manager_impl.h" #include "external/envoy/source/common/common/logger.h" +#include "external/envoy/source/common/common/random_generator.h" #include "external/envoy/source/common/event/real_time_system.h" #include "external/envoy/source/common/grpc/context_impl.h" #include "external/envoy/source/common/http/context_impl.h" @@ -133,7 +134,7 @@ class ProcessImpl : public Process, public Envoy::Logger::Loggablemutable_headers(); - headers->iterate( - [](const Envoy::Http::HeaderEntry& header, - void* context) -> Envoy::Http::RequestHeaderMap::Iterate { - addHeader(static_cast(context), - header.key().getStringView(), header.value().getStringView()); - return Envoy::Http::RequestHeaderMap::Iterate::Continue; - }, - request_headers); + headers->iterate([&request_headers](const Envoy::Http::HeaderEntry& header) + -> Envoy::Http::HeaderMap::Iterate { + addHeader(request_headers, header.key().getStringView(), header.value().getStringView()); + return Envoy::Http::RequestHeaderMap::Iterate::Continue; + }); // TODO(oschaaf): add static configuration for other fields plus expectations ok = ok && stream->Write(response); } diff --git a/source/client/stream_decoder.h b/source/client/stream_decoder.h index 869a8d57e..d4b2c4fc1 100644 --- a/source/client/stream_decoder.h +++ b/source/client/stream_decoder.h @@ -12,6 +12,7 @@ #include "nighthawk/common/request_source.h" #include "nighthawk/common/statistic.h" +#include "external/envoy/source/common/common/random_generator.h" #include "external/envoy/source/common/http/header_map_impl.h" #include "external/envoy/source/common/stream_info/stream_info_impl.h" #include "external/envoy/source/common/tracing/http_tracer_impl.h" @@ -42,7 +43,7 @@ class StreamDecoder : public Envoy::Http::ResponseDecoder, Statistic& latency_statistic, Statistic& response_header_sizes_statistic, Statistic& response_body_sizes_statistic, HeaderMapPtr request_headers, bool measure_latencies, uint32_t request_body_size, - Envoy::Runtime::RandomGenerator& random_generator, + Envoy::Random::RandomGenerator& random_generator, Envoy::Tracing::HttpTracerSharedPtr& http_tracer) : dispatcher_(dispatcher), time_source_(time_source), decoder_completion_callback_(decoder_completion_callback), @@ -110,7 +111,7 @@ class StreamDecoder : public Envoy::Http::ResponseDecoder, const uint32_t request_body_size_; Envoy::Tracing::EgressConfigImpl config_; Envoy::StreamInfo::StreamInfoImpl stream_info_; - Envoy::Runtime::RandomGenerator& random_generator_; + Envoy::Random::RandomGenerator& random_generator_; Envoy::Tracing::HttpTracerSharedPtr& http_tracer_; Envoy::Tracing::SpanPtr active_span_; Envoy::StreamInfo::UpstreamTiming upstream_timing_; diff --git a/source/common/BUILD b/source/common/BUILD index f576ced60..c77ede286 100644 --- a/source/common/BUILD +++ b/source/common/BUILD @@ -109,7 +109,7 @@ envoy_cc_library( "@envoy//source/common/http:utility_lib_with_external_headers", "@envoy//source/common/network:utility_lib_with_external_headers", "@envoy//source/common/protobuf:utility_lib_with_external_headers", - "@envoy//source/common/stats:stats_lib_with_external_headers", + "@envoy//source/common/stats:histogram_lib_with_external_headers", "@envoy//source/exe:envoy_common_lib_with_external_headers", "@envoy//source/server/config_validation:server_lib_with_external_headers", ], diff --git a/source/common/statistic_impl.cc b/source/common/statistic_impl.cc index bd5750467..b4a697a66 100644 --- a/source/common/statistic_impl.cc +++ b/source/common/statistic_impl.cc @@ -236,4 +236,102 @@ nighthawk::client::Statistic HdrStatistic::toProto(SerializationDomain domain) c return proto; } -} // namespace Nighthawk \ No newline at end of file +CircllhistStatistic::CircllhistStatistic() { + histogram_ = hist_alloc(); + ASSERT(histogram_ != nullptr); +} + +CircllhistStatistic::~CircllhistStatistic() { hist_free(histogram_); } + +void CircllhistStatistic::addValue(uint64_t value) { + hist_insert_intscale(histogram_, value, 0, 1); + StatisticImpl::addValue(value); +} +double CircllhistStatistic::mean() const { return hist_approx_mean(histogram_); } +double CircllhistStatistic::pvariance() const { return pstdev() * pstdev(); } +double CircllhistStatistic::pstdev() const { + return count() == 0 ? std::nan("") : hist_approx_stddev(histogram_); +} + +StatisticPtr CircllhistStatistic::combine(const Statistic& statistic) const { + auto combined = std::make_unique(); + const auto& stat = dynamic_cast(statistic); + hist_accumulate(combined->histogram_, &this->histogram_, /*cnt=*/1); + hist_accumulate(combined->histogram_, &stat.histogram_, /*cnt=*/1); + + combined->min_ = std::min(this->min(), stat.min()); + combined->max_ = std::max(this->max(), stat.max()); + combined->count_ = this->count() + stat.count(); + return combined; +} + +StatisticPtr CircllhistStatistic::createNewInstanceOfSameType() const { + return std::make_unique(); +} + +nighthawk::client::Statistic CircllhistStatistic::toProto(SerializationDomain domain) const { + nighthawk::client::Statistic proto = StatisticImpl::toProto(domain); + if (count() == 0) { + return proto; + } + + // List of quantiles is based on hdr_proto_json.gold. + const std::vector quantiles{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.55, 0.6, + 0.65, 0.7, 0.75, 0.775, 0.8, 0.825, 0.85, 0.875, + 0.90, 0.925, 0.95, 0.975, 0.99, 0.995, 0.999, 1}; + std::vector computed_quantiles(quantiles.size(), 0.0); + hist_approx_quantile(histogram_, quantiles.data(), quantiles.size(), computed_quantiles.data()); + for (size_t i = 0; i < quantiles.size(); i++) { + nighthawk::client::Percentile* percentile = proto.add_percentiles(); + if (domain == Statistic::SerializationDomain::DURATION) { + setDurationFromNanos(*percentile->mutable_duration(), + static_cast(computed_quantiles[i])); + } else { + percentile->set_raw_value(computed_quantiles[i]); + } + percentile->set_percentile(quantiles[i]); + percentile->set_count(hist_approx_count_below(histogram_, computed_quantiles[i])); + } + + return proto; +} + +SinkableStatistic::SinkableStatistic(Envoy::Stats::Scope& scope, absl::optional worker_id) + : Envoy::Stats::HistogramImplHelper(scope.symbolTable()), scope_(scope), worker_id_(worker_id) { +} + +SinkableStatistic::~SinkableStatistic() { + // We must explicitly free the StatName here in order to supply the + // SymbolTable reference. + MetricImpl::clear(scope_.symbolTable()); +} + +Envoy::Stats::Histogram::Unit SinkableStatistic::unit() const { + return Envoy::Stats::Histogram::Unit::Unspecified; +} + +Envoy::Stats::SymbolTable& SinkableStatistic::symbolTable() { return scope_.symbolTable(); } + +SinkableHdrStatistic::SinkableHdrStatistic(Envoy::Stats::Scope& scope, + absl::optional worker_id) + : SinkableStatistic(scope, worker_id) {} + +void SinkableHdrStatistic::recordValue(uint64_t value) { + addValue(value); + // Currently in Envoy Scope implementation, deliverHistogramToSinks() will flush the histogram + // value directly to stats Sinks. + scope_.deliverHistogramToSinks(*this, value); +} + +SinkableCircllhistStatistic::SinkableCircllhistStatistic(Envoy::Stats::Scope& scope, + absl::optional worker_id) + : SinkableStatistic(scope, worker_id) {} + +void SinkableCircllhistStatistic::recordValue(uint64_t value) { + addValue(value); + // Currently in Envoy Scope implementation, deliverHistogramToSinks() will flush the histogram + // value directly to stats Sinks. + scope_.deliverHistogramToSinks(*this, value); +} + +} // namespace Nighthawk diff --git a/source/common/statistic_impl.h b/source/common/statistic_impl.h index 981133b44..d7e1f240f 100644 --- a/source/common/statistic_impl.h +++ b/source/common/statistic_impl.h @@ -7,6 +7,7 @@ #include "external/dep_hdrhistogram_c/src/hdr_histogram.h" #include "external/envoy/source/common/common/logger.h" +#include "external/envoy/source/common/stats/histogram_impl.h" #include "common/frequency.h" @@ -150,4 +151,94 @@ class HdrStatistic : public StatisticImpl { struct hdr_histogram* histogram_; }; -} // namespace Nighthawk \ No newline at end of file +/** + * CircllhistStatistic uses Circllhist under the hood to compute statistics. + * Circllhist is used in the implementation of Envoy Histograms, compared to HdrHistogram it trades + * precision for fast performance in merge and insertion. For more info, please see + * https://github.com/circonus-labs/libcircllhist + */ +class CircllhistStatistic : public StatisticImpl { +public: + CircllhistStatistic(); + ~CircllhistStatistic() override; + + void addValue(uint64_t value) override; + double mean() const override; + double pvariance() const override; + double pstdev() const override; + StatisticPtr combine(const Statistic& statistic) const override; + // circllhist has low significant digit precision as a result of base 10 + // algorithm. + uint64_t significantDigits() const override { return 1; } + StatisticPtr createNewInstanceOfSameType() const override; + nighthawk::client::Statistic toProto(SerializationDomain domain) const override; + +private: + histogram_t* histogram_; +}; + +/** + * In order to be able to flush a histogram value to downstream Envoy stats Sinks, abstract class + * SinkableStatistic takes the Scope reference in the constructor and wraps the + * Envoy::Stats::HistogramHelper interface. Implementation of sinkable Nighthawk Statistic class + * will inherit from this class. + */ +class SinkableStatistic : public Envoy::Stats::HistogramImplHelper { +public: + // Calling HistogramImplHelper(SymbolTable& symbol_table) constructor to construct an empty + // MetricImpl. This is to bypass the complicated logic of setting up SymbolTable/StatName in + // Envoy. + SinkableStatistic(Envoy::Stats::Scope& scope, absl::optional worker_id); + ~SinkableStatistic() override; + + // Currently Envoy Histogram Unit supports {Unspecified, Bytes, Microseconds, Milliseconds}. By + // default, Nighthawk::Statistic uses nanosecond as the unit of latency histograms, so Unspecified + // is returned here to isolate Nighthawk Statistic from Envoy Histogram Unit. + Envoy::Stats::Histogram::Unit unit() const override; + Envoy::Stats::SymbolTable& symbolTable() override; + // Return the id of the worker where this statistic is defined. Per worker + // statistic should always set worker_id. Return absl::nullopt when the + // statistic is not defined per worker. + const absl::optional worker_id() { return worker_id_; } + +protected: + // This is used in child class for delivering the histogram data to sinks. + Envoy::Stats::Scope& scope_; + +private: + // worker_id can be used in downstream stats Sinks as the stats tag. + absl::optional worker_id_; +}; + +// Implementation of sinkable Nighthawk Statistic with HdrHistogram. +class SinkableHdrStatistic : public SinkableStatistic, public HdrStatistic { +public: + // The constructor takes the Scope reference which is used to flush a histogram value to + // downstream stats Sinks through deliverHistogramToSinks(). + SinkableHdrStatistic(Envoy::Stats::Scope& scope, absl::optional worker_id = absl::nullopt); + + // Envoy::Stats::Histogram + void recordValue(uint64_t value) override; + bool used() const override { return count() > 0; } + // Overriding name() to return Nighthawk::Statistic::id(). + std::string name() const override { return id(); } + std::string tagExtractedName() const override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } +}; + +// Implementation of sinkable Nighthawk Statistic with Circllhist Histogram. +class SinkableCircllhistStatistic : public SinkableStatistic, public CircllhistStatistic { +public: + // The constructor takes the Scope reference which is used to flush a histogram value to + // downstream stats Sinks through deliverHistogramToSinks(). + SinkableCircllhistStatistic(Envoy::Stats::Scope& scope, + absl::optional worker_id = absl::nullopt); + + // Envoy::Stats::Histogram + void recordValue(uint64_t value) override; + bool used() const override { return count() > 0; } + // Overriding name() to return Nighthawk::Statistic::id(). + std::string name() const override { return id(); } + std::string tagExtractedName() const override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } +}; + +} // namespace Nighthawk diff --git a/source/server/BUILD b/source/server/BUILD index 1b304eeb8..faf3288bf 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -8,16 +8,53 @@ licenses(["notice"]) # Apache 2 envoy_package() +envoy_cc_library( + name = "well_known_headers_lib", + hdrs = ["well_known_headers.h"], + repository = "@envoy", + deps = [ + "@envoy//source/common/http:headers_lib_with_external_headers", + "@envoy//source/common/singleton:const_singleton_with_external_headers", + ], +) + +envoy_cc_library( + name = "configuration_lib", + srcs = ["configuration.cc"], + hdrs = ["configuration.h"], + repository = "@envoy", + deps = [ + "//api/server:response_options_proto_cc_proto", + "@envoy//source/common/protobuf:message_validator_lib_with_external_headers", + "@envoy//source/common/protobuf:utility_lib_with_external_headers", + "@envoy//source/common/singleton:const_singleton_with_external_headers", + ], +) + envoy_cc_library( name = "http_test_server_filter_lib", srcs = ["http_test_server_filter.cc"], hdrs = ["http_test_server_filter.h"], repository = "@envoy", deps = [ + ":configuration_lib", + ":well_known_headers_lib", + "//api/server:response_options_proto_cc_proto", + "@envoy//source/exe:envoy_common_lib_with_external_headers", + ], +) + +envoy_cc_library( + name = "http_dynamic_delay_filter_lib", + srcs = ["http_dynamic_delay_filter.cc"], + hdrs = ["http_dynamic_delay_filter.h"], + repository = "@envoy", + deps = [ + ":configuration_lib", + ":well_known_headers_lib", "//api/server:response_options_proto_cc_proto", - "@envoy//source/common/protobuf:message_validator_lib_with_external_headers", - "@envoy//source/common/protobuf:utility_lib_with_external_headers", "@envoy//source/exe:envoy_common_lib_with_external_headers", + "@envoy//source/extensions/filters/http/fault:fault_filter_lib", ], ) @@ -30,3 +67,13 @@ envoy_cc_library( "@envoy//include/envoy/server:filter_config_interface_with_external_headers", ], ) + +envoy_cc_library( + name = "http_dynamic_delay_filter_config", + srcs = ["http_dynamic_delay_filter_config.cc"], + repository = "@envoy", + deps = [ + ":http_dynamic_delay_filter_lib", + "@envoy//include/envoy/server:filter_config_interface_with_external_headers", + ], +) diff --git a/source/server/README.md b/source/server/README.md index 85d2d23a0..e65b2768a 100644 --- a/source/server/README.md +++ b/source/server/README.md @@ -16,48 +16,46 @@ bazel build -c opt :nighthawk_test_server ## Configuring the test server - `test-server.yaml` sample content ```yaml static_resources: listeners: - # define an origin server on :10000 that always returns "lorem ipsum..." - - address: - socket_address: - address: 0.0.0.0 - port_value: 10000 - filter_chains: - - filters: - - name: envoy.http_connection_manager - config: - generate_request_id: false - codec_type: auto - stat_prefix: ingress_http - route_config: - name: local_route - virtual_hosts: - - name: service - domains: - - "*" - http_filters: - - name: envoy.fault - config: - max_active_faults: 100 - delay: - header_delay: {} - percentage: - numerator: 100 - - name: test-server # before envoy.router because order matters! - config: - response_body_size: 10 - response_headers: - - { header: { key: "foo", value: "bar"} } - - { header: { key: "foo", value: "bar2"}, append: true } - - { header: { key: "x-nh", value: "1"}} - - name: envoy.router - config: - dynamic_stats: false + # define an origin server on :10000 that always returns "lorem ipsum..." + - address: + socket_address: + address: 0.0.0.0 + port_value: 10000 + filter_chains: + - filters: + - name: envoy.http_connection_manager + config: + generate_request_id: false + codec_type: auto + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: service + domains: + - "*" + http_filters: + - name: dynamic-delay + config: + static_delay: 0.5s + - name: test-server # before envoy.router because order matters! + config: + response_body_size: 10 + response_headers: + - { header: { key: "foo", value: "bar" } } + - { + header: { key: "foo", value: "bar2" }, + append: true, + } + - { header: { key: "x-nh", value: "1" } } + - name: envoy.router + config: + dynamic_stats: false admin: access_log_path: /tmp/envoy.log address: @@ -68,21 +66,23 @@ admin: ## Response Options config -The ResponseOptions proto can be used in the test-server filter config or passed in `x-nighthawk-test-server-config`` -request header. +The [ResponseOptions proto](/api/server/response_options.proto) is shared by +the `Test Server` and `Dynamic Delay` filter extensions. Each filter will +interpret the parts that are relevant to it. This allows specifying what +a response should look like in a single message, which can be done at request +time via the optional `x-nighthawk-test-server-config` request-header. -The following parameters are available: +### Test Server -* `response_body_size` - number of 'a' characters repeated in the response body. -* `response_headers` - list of headers to add to response. If `append` is set to +- `response_body_size` - number of 'a' characters repeated in the response body. +- `response_headers` - list of headers to add to response. If `append` is set to `true`, then the header is appended. -* `echo_request_headers` - if set to `true`, then append the dump of request headers to the response +- `echo_request_headers` - if set to `true`, then append the dump of request headers to the response body. -The response options could be used to test and debug proxy or server configuration, for -example, to verify request headers that are added by intermediate proxy: +The response options above could be used to test and debug proxy or server configuration, for example, to verify request headers that are added by intermediate proxy: -``` +```bash $ curl -6 -v [::1]:8080/nighthawk * Trying ::1:8080... @@ -122,15 +122,28 @@ Request Headers: This example shows that intermediate proxy has added `x-forwarded-proto` and `x-forwarded-for` request headers. -## Running the test server +### Dynamic Delay + +The Dynamic Delay interprets the `oneof_delay_options` part in the [ResponseOptions proto](/api/server/response_options.proto). If specified, it can be used to: + +- Configure a static delay via `static_delay`. +- Configure a delay which linearly increase as the number of active requests grows, representing a simplified model of an overloaded server, via `concurrency_based_linear_delay`. +All delays have a millisecond-level granularity. + +At the time of writing this, there is a [known issue](https://github.com/envoyproxy/nighthawk/issues/392) with merging configuration provided via +request headers into the statically configured configuration. The current recommendation is to +use either static, or dynamic configuration (delivered per request header), but not both at the +same time. + +## Running the test server ``` # If you already have Envoy running, you might need to set --base-id to allow the test-server to start. ➜ /bazel-bin/nighthawk/source/server/server --config-path /path/to/test-server-server.yaml # Verify the test server with a curl command similar to: -➜ curl -H "x-envoy-fault-delay-request: 1000" -H "x-nighthawk-test-server-config: {response_body_size:20}" -vv 127.0.0.1:10000 +➜ curl -H "x-nighthawk-test-server-config: {response_body_size:20, static_delay: \"0s\"}" -vv 127.0.0.1:10000 ``` ```bash @@ -144,9 +157,7 @@ bazel-bin/nighthawk_test_server [--disable-extensions ] [--use-fake-symbol-table ] [--cpuset-threads] [--enable-mutex-tracing] -[--disable-hot-restart] -[--max-obj-name-len ] -[--max-stats ] [--mode +[--disable-hot-restart] [--mode ] [--parent-shutdown-time-s ] [--drain-strategy ] [--drain-time-s ] @@ -193,12 +204,6 @@ Enable mutex contention tracing functionality --disable-hot-restart Disable hot restart functionality ---max-obj-name-len -Deprecated and unused; please do not specify. - ---max-stats -Deprecated and unused; please do not specify. - --mode One of 'serve' (default; validate configs and then serve traffic normally) or 'validate' (validate configs and exit). diff --git a/source/server/configuration.cc b/source/server/configuration.cc new file mode 100644 index 000000000..8ddc92fb6 --- /dev/null +++ b/source/server/configuration.cc @@ -0,0 +1,45 @@ +#include "server/configuration.h" + +#include + +#include "external/envoy/source/common/protobuf/message_validator_impl.h" +#include "external/envoy/source/common/protobuf/utility.h" + +#include "api/server/response_options.pb.validate.h" + +#include "absl/strings/numbers.h" + +namespace Nighthawk { +namespace Server { +namespace Configuration { + +bool mergeJsonConfig(absl::string_view json, nighthawk::server::ResponseOptions& config, + std::string& error_message) { + error_message = ""; + try { + nighthawk::server::ResponseOptions json_config; + auto& validation_visitor = Envoy::ProtobufMessage::getStrictValidationVisitor(); + Envoy::MessageUtil::loadFromJson(std::string(json), json_config, validation_visitor); + config.MergeFrom(json_config); + Envoy::MessageUtil::validate(config, validation_visitor); + } catch (const Envoy::EnvoyException& exception) { + error_message = fmt::format("Error merging json config: {}", exception.what()); + } + return error_message == ""; +} + +void applyConfigToResponseHeaders(Envoy::Http::ResponseHeaderMap& response_headers, + nighthawk::server::ResponseOptions& response_options) { + for (const auto& header_value_option : response_options.response_headers()) { + const auto& header = header_value_option.header(); + auto lower_case_key = Envoy::Http::LowerCaseString(header.key()); + if (!header_value_option.append().value()) { + response_headers.remove(lower_case_key); + } + response_headers.addCopy(lower_case_key, header.value()); + } +} + +} // namespace Configuration +} // namespace Server +} // namespace Nighthawk diff --git a/source/server/configuration.h b/source/server/configuration.h new file mode 100644 index 000000000..ec1f77165 --- /dev/null +++ b/source/server/configuration.h @@ -0,0 +1,36 @@ +#pragma once + +#include + +#include "envoy/http/header_map.h" + +#include "api/server/response_options.pb.h" + +namespace Nighthawk { +namespace Server { +namespace Configuration { + +/** + * Merges a json string containing configuration into a ResponseOptions instance. + * + * @param json Json-formatted seralization of ResponseOptions to merge into the configuration. + * @param config The target that the json string should be merged into. + * @param error_message Set to an error message if one occurred, else set to an empty string. + * @return bool false if an error occurred. + */ +bool mergeJsonConfig(absl::string_view json, nighthawk::server::ResponseOptions& config, + std::string& error_message); + +/** + * Applies ResponseOptions onto a HeaderMap containing response headers. + * + * @param response_headers Response headers to transform to reflect the passed in response + * options. + * @param response_options Configuration specifying how to transform the header map. + */ +void applyConfigToResponseHeaders(Envoy::Http::ResponseHeaderMap& response_headers, + nighthawk::server::ResponseOptions& response_options); + +} // namespace Configuration +} // namespace Server +} // namespace Nighthawk diff --git a/source/server/http_dynamic_delay_filter.cc b/source/server/http_dynamic_delay_filter.cc new file mode 100644 index 000000000..542cedefa --- /dev/null +++ b/source/server/http_dynamic_delay_filter.cc @@ -0,0 +1,113 @@ +#include "server/http_dynamic_delay_filter.h" + +#include + +#include "envoy/server/filter_config.h" + +#include "server/configuration.h" +#include "server/well_known_headers.h" + +#include "absl/strings/str_cat.h" + +namespace Nighthawk { +namespace Server { + +HttpDynamicDelayDecoderFilterConfig::HttpDynamicDelayDecoderFilterConfig( + nighthawk::server::ResponseOptions proto_config, Envoy::Runtime::Loader& runtime, + const std::string& stats_prefix, Envoy::Stats::Scope& scope, Envoy::TimeSource& time_source) + : server_config_(std::move(proto_config)), runtime_(runtime), + stats_prefix_(absl::StrCat(stats_prefix, "dynamic-delay.")), scope_(scope), + time_source_(time_source) {} + +HttpDynamicDelayDecoderFilter::HttpDynamicDelayDecoderFilter( + HttpDynamicDelayDecoderFilterConfigSharedPtr config) + : Envoy::Extensions::HttpFilters::Fault::FaultFilter( + translateOurConfigIntoFaultFilterConfig(*config)), + config_(std::move(config)) { + config_->incrementFilterInstanceCount(); +} + +HttpDynamicDelayDecoderFilter::~HttpDynamicDelayDecoderFilter() { + RELEASE_ASSERT(destroyed_, "onDestroy() not called"); +} + +void HttpDynamicDelayDecoderFilter::onDestroy() { + destroyed_ = true; + config_->decrementFilterInstanceCount(); + Envoy::Extensions::HttpFilters::Fault::FaultFilter::onDestroy(); +} + +Envoy::Http::FilterHeadersStatus +HttpDynamicDelayDecoderFilter::decodeHeaders(Envoy::Http::RequestHeaderMap& headers, + bool end_stream) { + response_options_ = config_->server_config(); + std::string error_message; + if (!computeResponseOptions(headers, error_message)) { + decoder_callbacks_->sendLocalReply( + static_cast(500), + fmt::format("dynamic-delay didn't understand the request: {}", error_message), nullptr, + absl::nullopt, ""); + return Envoy::Http::FilterHeadersStatus::StopIteration; + } + const absl::optional delay_ms = + computeDelayMs(response_options_, config_->approximateFilterInstances()); + maybeRequestFaultFilterDelay(delay_ms, headers); + return Envoy::Extensions::HttpFilters::Fault::FaultFilter::decodeHeaders(headers, end_stream); +} + +bool HttpDynamicDelayDecoderFilter::computeResponseOptions( + const Envoy::Http::RequestHeaderMap& headers, std::string& error_message) { + response_options_ = config_->server_config(); + const auto* request_config_header = headers.get(TestServer::HeaderNames::get().TestServerConfig); + if (request_config_header) { + if (!Configuration::mergeJsonConfig(request_config_header->value().getStringView(), + response_options_, error_message)) { + return false; + } + } + return true; +} + +absl::optional HttpDynamicDelayDecoderFilter::computeDelayMs( + const nighthawk::server::ResponseOptions& response_options, const uint64_t concurrency) { + absl::optional delay_ms; + if (response_options.has_static_delay()) { + delay_ms = + Envoy::Protobuf::util::TimeUtil::DurationToMilliseconds(response_options.static_delay()); + } else if (response_options.has_concurrency_based_linear_delay()) { + const nighthawk::server::ConcurrencyBasedLinearDelay& concurrency_config = + response_options.concurrency_based_linear_delay(); + delay_ms = computeConcurrencyBasedLinearDelayMs(concurrency, concurrency_config.minimal_delay(), + concurrency_config.concurrency_delay_factor()); + } + return delay_ms; +} + +void HttpDynamicDelayDecoderFilter::maybeRequestFaultFilterDelay( + const absl::optional delay_ms, Envoy::Http::RequestHeaderMap& headers) { + if (delay_ms.has_value() && delay_ms > 0) { + // Emit header to communicate the delay we desire to the fault filter extension. + const Envoy::Http::LowerCaseString key("x-envoy-fault-delay-request"); + headers.setCopy(key, absl::StrCat(*delay_ms)); + } +} + +Envoy::Extensions::HttpFilters::Fault::FaultFilterConfigSharedPtr +HttpDynamicDelayDecoderFilter::translateOurConfigIntoFaultFilterConfig( + HttpDynamicDelayDecoderFilterConfig& config) { + envoy::extensions::filters::http::fault::v3::HTTPFault fault_config; + fault_config.mutable_max_active_faults()->set_value(UINT32_MAX); + fault_config.mutable_delay()->mutable_percentage()->set_numerator(100); + fault_config.mutable_delay()->mutable_header_delay(); + return std::make_shared( + fault_config, config.runtime(), config.stats_prefix(), config.scope(), config.time_source()); +} + +void HttpDynamicDelayDecoderFilter::setDecoderFilterCallbacks( + Envoy::Http::StreamDecoderFilterCallbacks& callbacks) { + decoder_callbacks_ = &callbacks; + Envoy::Extensions::HttpFilters::Fault::FaultFilter::setDecoderFilterCallbacks(callbacks); +} + +} // namespace Server +} // namespace Nighthawk diff --git a/source/server/http_dynamic_delay_filter.h b/source/server/http_dynamic_delay_filter.h new file mode 100644 index 000000000..031ec9499 --- /dev/null +++ b/source/server/http_dynamic_delay_filter.h @@ -0,0 +1,187 @@ +#pragma once + +#include +#include + +#include "envoy/server/filter_config.h" + +#include "api/server/response_options.pb.h" + +#include "extensions/filters/http/fault/fault_filter.h" + +namespace Nighthawk { +namespace Server { + +/** + * Filter configuration container class for the dynamic delay extension. + * Instances of this class will be shared accross instances of HttpDynamicDelayDecoderFilter. + * The methods for getting and manipulating (global) active filter instance counts are thread safe. + */ +class HttpDynamicDelayDecoderFilterConfig { + +public: + /** + * Constructs a new HttpDynamicDelayDecoderFilterConfig instance. + * + * @param proto_config The proto configuration of the filter. Will be tranlated internally into + * the right configuration for the base class structure (the failt filter and config). + * @param runtime Envoy runtime to be used by the filter. + * @param stats_prefix Prefix to use by the filter when it names statistics. E.g. + * dynamic-delay.fault.delays_injected: 1 + * @param scope Statistics scope to be used by the filter. + * @param time_source Time source to be used by the filter. + */ + HttpDynamicDelayDecoderFilterConfig(nighthawk::server::ResponseOptions proto_config, + Envoy::Runtime::Loader& runtime, + const std::string& stats_prefix, Envoy::Stats::Scope& scope, + Envoy::TimeSource& time_source); + + /** + * @return const nighthawk::server::ResponseOptions& read-only reference to the proto config + * object. + */ + const nighthawk::server::ResponseOptions& server_config() const { return server_config_; } + + /** + * Increments the number of globally active filter instances. + */ + void incrementFilterInstanceCount() { instances()++; } + + /** + * Decrements the number of globally active filter instances. + */ + void decrementFilterInstanceCount() { instances()--; } + + /** + * @return uint64_t the approximate number of globally active HttpDynamicDelayDecoderFilter + * instances. Approximate, because by the time the value is consumed it might have changed. + */ + uint64_t approximateFilterInstances() const { return instances(); } + + /** + * @return Envoy::Runtime::Loader& to be used by filter instantiations associated to this. + */ + Envoy::Runtime::Loader& runtime() { return runtime_; } + + /** + * @return Envoy::Stats::Scope& to be used by filter instantiations associated to this. + */ + Envoy::Stats::Scope& scope() { return scope_; } + + /** + * @return Envoy::TimeSource& to be used by filter instantiations associated to this. + */ + Envoy::TimeSource& time_source() { return time_source_; } + + /** + * @return std::string to be used by filter instantiations associated to this. + */ + std::string stats_prefix() { return stats_prefix_; } + +private: + const nighthawk::server::ResponseOptions server_config_; + static std::atomic& instances() { + // We lazy-init the atomic to avoid static initialization / a fiasco. + MUTABLE_CONSTRUCT_ON_FIRST_USE(std::atomic, 0); // NOLINT + } + + Envoy::Runtime::Loader& runtime_; + const std::string stats_prefix_; + Envoy::Stats::Scope& scope_; + Envoy::TimeSource& time_source_; +}; + +using HttpDynamicDelayDecoderFilterConfigSharedPtr = + std::shared_ptr; + +/** + * Extension that controls the fault filter extension by supplying it with a request + * header that triggers it to induce a delay under the hood. + * In the future, we may look into injecting the fault filter ourselves with the right + * configuration, either directly/verbatim, or by including a derivation of it in + * this code base, thereby making it all transparant to the user / eliminating the need + * to configure the fault filter and make NH take full ownership at the feature level. + */ +class HttpDynamicDelayDecoderFilter : public Envoy::Extensions::HttpFilters::Fault::FaultFilter { +public: + HttpDynamicDelayDecoderFilter(HttpDynamicDelayDecoderFilterConfigSharedPtr); + ~HttpDynamicDelayDecoderFilter() override; + + // Http::StreamFilterBase + void onDestroy() override; + + // Http::StreamDecoderFilter + Envoy::Http::FilterHeadersStatus decodeHeaders(Envoy::Http::RequestHeaderMap&, bool) override; + void setDecoderFilterCallbacks(Envoy::Http::StreamDecoderFilterCallbacks&) override; + + /** + * Compute the response options based on the static configuration and optional configuration + * provided via the request headers. After a successfull call the response_options_ field will + * have been modified to reflect request-level configuration. + * + * @param request_headers The request headers set to inspect for configuration. + * @param error_message Set to an error message if the request-level configuration couldn't be + * interpreted. + * @return true iff the configuration was successfully computed. + */ + bool computeResponseOptions(const Envoy::Http::RequestHeaderMap& request_headers, + std::string& error_message); + + /** + * Compute the concurrency based linear delay in milliseconds. + * + * @param concurrency indicating the number of concurrently active requests. + * @param minimal_delay gets unconditionally included in the return value. + * @param delay_factor added for each increase in the number of active requests. + * @return int64_t the computed delay in milliseconds. + */ + static int64_t + computeConcurrencyBasedLinearDelayMs(const uint64_t concurrency, + const Envoy::ProtobufWkt::Duration& minimal_delay, + const Envoy::ProtobufWkt::Duration& delay_factor) { + return std::round(Envoy::Protobuf::util::TimeUtil::DurationToNanoseconds( + minimal_delay + (concurrency * delay_factor)) / + 1e6); + } + + /** + * Compute the delay in milliseconds, based on provided response options and number of active + * requests. + * + * @param response_options Response options configuration. + * @param concurrency The number of concurrenct active requests. + * @return absl::optional The computed delay in milliseconds, if any. + */ + static absl::optional + computeDelayMs(const nighthawk::server::ResponseOptions& response_options, + const uint64_t concurrency); + + /** + * Communicate to the fault filter, which should be running after this filter, that a delay should + * be inserted. The request is only made when the passed delay is set to a value > 0. + * + * @param delay_ms The delay in milliseconds that should be propagated, if any. When not set or <= + * 0, the call will be a no-op. + * @param request_headers The request headers that will be modified to instruct the fault filter. + */ + static void maybeRequestFaultFilterDelay(const absl::optional delay_ms, + Envoy::Http::RequestHeaderMap& request_headers); + + /** + * Translates our options into a configuration of the fault filter base class where needed. + * + * @param config Dynamic delay configuration. + * @return Envoy::Extensions::HttpFilters::Fault::FaultFilterConfigSharedPtr + */ + static Envoy::Extensions::HttpFilters::Fault::FaultFilterConfigSharedPtr + translateOurConfigIntoFaultFilterConfig(HttpDynamicDelayDecoderFilterConfig& config); + +private: + const HttpDynamicDelayDecoderFilterConfigSharedPtr config_; + Envoy::Http::StreamDecoderFilterCallbacks* decoder_callbacks_; + nighthawk::server::ResponseOptions response_options_; + bool destroyed_{false}; +}; + +} // namespace Server +} // namespace Nighthawk diff --git a/source/server/http_dynamic_delay_filter_config.cc b/source/server/http_dynamic_delay_filter_config.cc new file mode 100644 index 000000000..336a5da7b --- /dev/null +++ b/source/server/http_dynamic_delay_filter_config.cc @@ -0,0 +1,60 @@ +#include + +#include "envoy/registry/registry.h" + +#include "external/envoy/source/common/protobuf/message_validator_impl.h" + +#include "api/server/response_options.pb.h" +#include "api/server/response_options.pb.validate.h" + +#include "server/http_dynamic_delay_filter.h" + +namespace Nighthawk { +namespace Server { +namespace Configuration { +namespace { + +class HttpDynamicDelayDecoderFilterConfigFactory + : public Envoy::Server::Configuration::NamedHttpFilterConfigFactory { +public: + Envoy::Http::FilterFactoryCb + createFilterFactoryFromProto(const Envoy::Protobuf::Message& proto_config, const std::string&, + Envoy::Server::Configuration::FactoryContext& context) override { + + auto& validation_visitor = Envoy::ProtobufMessage::getStrictValidationVisitor(); + return createFilter( + Envoy::MessageUtil::downcastAndValidate( + proto_config, validation_visitor), + context); + } + + Envoy::ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return Envoy::ProtobufTypes::MessagePtr{new nighthawk::server::ResponseOptions()}; + } + + std::string name() const override { return "dynamic-delay"; } + +private: + Envoy::Http::FilterFactoryCb createFilter(const nighthawk::server::ResponseOptions& proto_config, + Envoy::Server::Configuration::FactoryContext& context) { + Nighthawk::Server::HttpDynamicDelayDecoderFilterConfigSharedPtr config = + std::make_shared( + Nighthawk::Server::HttpDynamicDelayDecoderFilterConfig( + proto_config, context.runtime(), "" /*stats_prefix*/, context.scope(), + context.timeSource())); + + return [config](Envoy::Http::FilterChainFactoryCallbacks& callbacks) -> void { + auto* filter = new Nighthawk::Server::HttpDynamicDelayDecoderFilter(config); + callbacks.addStreamDecoderFilter(Envoy::Http::StreamDecoderFilterSharedPtr{filter}); + }; + } +}; + +} // namespace + +static Envoy::Registry::RegisterFactory + register_; +} // namespace Configuration +} // namespace Server +} // namespace Nighthawk diff --git a/source/server/http_test_server_filter.cc b/source/server/http_test_server_filter.cc index b43c413ed..cf01105cf 100644 --- a/source/server/http_test_server_filter.cc +++ b/source/server/http_test_server_filter.cc @@ -4,10 +4,8 @@ #include "envoy/server/filter_config.h" -#include "external/envoy/source/common/protobuf/message_validator_impl.h" -#include "external/envoy/source/common/protobuf/utility.h" - -#include "api/server/response_options.pb.validate.h" +#include "server/configuration.h" +#include "server/well_known_headers.h" #include "absl/strings/numbers.h" @@ -24,37 +22,8 @@ HttpTestServerDecoderFilter::HttpTestServerDecoderFilter( void HttpTestServerDecoderFilter::onDestroy() {} -bool HttpTestServerDecoderFilter::mergeJsonConfig(absl::string_view json, - nighthawk::server::ResponseOptions& config, - absl::optional& error_message) { - error_message = absl::nullopt; - try { - nighthawk::server::ResponseOptions json_config; - auto& validation_visitor = Envoy::ProtobufMessage::getStrictValidationVisitor(); - Envoy::MessageUtil::loadFromJson(std::string(json), json_config, validation_visitor); - config.MergeFrom(json_config); - Envoy::MessageUtil::validate(config, validation_visitor); - } catch (const Envoy::EnvoyException& exception) { - error_message.emplace(fmt::format("Error merging json config: {}", exception.what())); - } - return error_message == absl::nullopt; -} - -void HttpTestServerDecoderFilter::applyConfigToResponseHeaders( - Envoy::Http::ResponseHeaderMap& response_headers, - nighthawk::server::ResponseOptions& response_options) { - for (const auto& header_value_option : response_options.response_headers()) { - const auto& header = header_value_option.header(); - auto lower_case_key = Envoy::Http::LowerCaseString(header.key()); - if (!header_value_option.append().value()) { - response_headers.remove(lower_case_key); - } - response_headers.addCopy(lower_case_key, header.value()); - } -} - void HttpTestServerDecoderFilter::sendReply() { - if (error_message_ == absl::nullopt) { + if (!json_merge_error_) { std::string response_body(base_config_.response_body_size(), 'a'); if (request_headers_dump_.has_value()) { response_body += *request_headers_dump_; @@ -62,13 +31,13 @@ void HttpTestServerDecoderFilter::sendReply() { decoder_callbacks_->sendLocalReply( static_cast(200), response_body, [this](Envoy::Http::ResponseHeaderMap& direct_response_headers) { - applyConfigToResponseHeaders(direct_response_headers, base_config_); + Configuration::applyConfigToResponseHeaders(direct_response_headers, base_config_); }, absl::nullopt, ""); } else { decoder_callbacks_->sendLocalReply( static_cast(500), - fmt::format("test-server didn't understand the request: {}", *error_message_), nullptr, + fmt::format("test-server didn't understand the request: {}", error_message_), nullptr, absl::nullopt, ""); } } @@ -80,7 +49,8 @@ HttpTestServerDecoderFilter::decodeHeaders(Envoy::Http::RequestHeaderMap& header base_config_ = config_->server_config(); const auto* request_config_header = headers.get(TestServer::HeaderNames::get().TestServerConfig); if (request_config_header) { - mergeJsonConfig(request_config_header->value().getStringView(), base_config_, error_message_); + json_merge_error_ = !Configuration::mergeJsonConfig( + request_config_header->value().getStringView(), base_config_, error_message_); } if (base_config_.echo_request_headers()) { std::stringstream headers_dump; diff --git a/source/server/http_test_server_filter.h b/source/server/http_test_server_filter.h index f7ba094a2..6f8d2ace1 100644 --- a/source/server/http_test_server_filter.h +++ b/source/server/http_test_server_filter.h @@ -9,17 +9,6 @@ namespace Nighthawk { namespace Server { -namespace TestServer { - -class HeaderNameValues { -public: - const Envoy::Http::LowerCaseString TestServerConfig{"x-nighthawk-test-server-config"}; -}; - -using HeaderNames = Envoy::ConstSingleton; - -} // namespace TestServer - // Basically this is left in as a placeholder for further configuration. class HttpTestServerDecoderFilterConfig { public: @@ -46,33 +35,13 @@ class HttpTestServerDecoderFilter : public Envoy::Http::StreamDecoderFilter { Envoy::Http::FilterTrailersStatus decodeTrailers(Envoy::Http::RequestTrailerMap&) override; void setDecoderFilterCallbacks(Envoy::Http::StreamDecoderFilterCallbacks&) override; - /** - * Merges a json string containing configuration into a ResponseOptions instance. - * - * @param json Json-formatted seralization of ResponseOptions to merge into the configuration. - * @param config The target that the json string should be merged into. - * @param error_message Will contain an error message iff an error occurred. - * @return bool false iff an error occurred. - */ - bool mergeJsonConfig(absl::string_view json, nighthawk::server::ResponseOptions& config, - absl::optional& error_message); - - /** - * Applies ResponseOptions onto a HeaderMap containing response headers. - * - * @param response_headers Response headers to transform to reflect the passed in response - * options. - * @param response_options Configuration specifying how to transform the header map. - */ - void applyConfigToResponseHeaders(Envoy::Http::ResponseHeaderMap& response_headers, - nighthawk::server::ResponseOptions& response_options); - private: void sendReply(); const HttpTestServerDecoderFilterConfigSharedPtr config_; Envoy::Http::StreamDecoderFilterCallbacks* decoder_callbacks_; nighthawk::server::ResponseOptions base_config_; - absl::optional error_message_; + bool json_merge_error_{false}; + std::string error_message_; absl::optional request_headers_dump_; }; diff --git a/source/server/well_known_headers.h b/source/server/well_known_headers.h new file mode 100644 index 000000000..e64eb6c82 --- /dev/null +++ b/source/server/well_known_headers.h @@ -0,0 +1,22 @@ +#pragma once + +#include + +#include "envoy/http/header_map.h" + +#include "external/envoy/source/common/singleton/const_singleton.h" + +namespace Nighthawk { +namespace Server { +namespace TestServer { + +class HeaderNameValues { +public: + const Envoy::Http::LowerCaseString TestServerConfig{"x-nighthawk-test-server-config"}; +}; + +using HeaderNames = Envoy::ConstSingleton; + +} // namespace TestServer +} // namespace Server +} // namespace Nighthawk diff --git a/test/BUILD b/test/BUILD index 868394027..ac7cdcacc 100644 --- a/test/BUILD +++ b/test/BUILD @@ -195,13 +195,17 @@ envoy_cc_test( envoy_cc_test( name = "statistic_test", srcs = ["statistic_test.cc"], - data = ["test_data/hdr_proto_json.gold"], + data = [ + "test_data/circllhist_proto_json.gold", + "test_data/hdr_proto_json.gold", + ], repository = "@envoy", deps = [ "//source/common:nighthawk_common_lib", "//test/test_common:environment_lib", "@envoy//source/common/protobuf:utility_lib_with_external_headers", "@envoy//source/common/stats:isolated_store_lib_with_external_headers", + "@envoy//test/mocks/stats:stats_mocks", ], ) @@ -238,6 +242,7 @@ envoy_cc_test( repository = "@envoy", deps = [ "//source/common:nighthawk_common_lib", + "@envoy//source/common/common:random_generator_lib_with_external_headers", "@envoy//source/common/stats:isolated_store_lib_with_external_headers", "@envoy//test/mocks/local_info:local_info_mocks", "@envoy//test/mocks/protobuf:protobuf_mocks", diff --git a/test/benchmark_http_client_test.cc b/test/benchmark_http_client_test.cc index 0539f487b..f6d67efe2 100644 --- a/test/benchmark_http_client_test.cc +++ b/test/benchmark_http_client_test.cc @@ -1,5 +1,6 @@ #include +#include "external/envoy/source/common/common/random_generator.h" #include "external/envoy/source/common/http/header_map_impl.h" #include "external/envoy/source/common/network/utility.h" #include "external/envoy/source/common/runtime/runtime_impl.h" @@ -146,7 +147,7 @@ class BenchmarkClientHttpTest : public Test { Envoy::Stats::IsolatedStoreImpl store_; Envoy::Api::ApiPtr api_; Envoy::Event::DispatcherPtr dispatcher_; - Envoy::Runtime::RandomGeneratorImpl generator_; + Envoy::Random::RandomGeneratorImpl generator_; NiceMock tls_; NiceMock runtime_; std::unique_ptr client_; diff --git a/test/client_test.cc b/test/client_test.cc index 0840c2c3c..6cde01f27 100644 --- a/test/client_test.cc +++ b/test/client_test.cc @@ -26,6 +26,7 @@ class ClientTest : public testing::Test {}; TEST_F(ClientTest, NormalRun) { Main program(Nighthawk::Client::TestUtility::createOptionsImpl( "foo --duration 1 --rps 10 http://localhost:63657/")); + EXPECT_FALSE(program.run()); } diff --git a/test/client_worker_test.cc b/test/client_worker_test.cc index 4a7b59517..c2a7a9dae 100644 --- a/test/client_worker_test.cc +++ b/test/client_worker_test.cc @@ -3,6 +3,7 @@ #include "envoy/upstream/cluster_manager.h" +#include "external/envoy/source/common/common/random_generator.h" #include "external/envoy/source/common/runtime/runtime_impl.h" #include "external/envoy/source/common/stats/isolated_store_impl.h" #include "external/envoy/test/mocks/local_info/mocks.h" @@ -90,7 +91,7 @@ class ClientWorkerTest : public Test { MockBenchmarkClient* benchmark_client_; MockSequencer* sequencer_; MockRequestSource* request_generator_; - Envoy::Runtime::RandomGeneratorImpl rand_; + Envoy::Random::RandomGeneratorImpl rand_; NiceMock dispatcher_; std::unique_ptr loader_; NiceMock local_info_; diff --git a/test/integration/BUILD b/test/integration/BUILD index 2b16b3d18..0ec3002cf 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -29,6 +29,7 @@ py_library( py_library( name = "integration_test_base_lean", srcs = [ + "asserts.py", "common.py", "integration_test_fixtures.py", "nighthawk_grpc_service.py", diff --git a/test/integration/asserts.py b/test/integration/asserts.py new file mode 100644 index 000000000..ab7413e24 --- /dev/null +++ b/test/integration/asserts.py @@ -0,0 +1,98 @@ +"""A library with assertation helpers for unit tests.""" + + +def assertEqual(a, b): + """Assert that arguments a and b are equal.""" + assert a == b + + +def assertGreater(a, b): + """Assert that a is greather than b.""" + assert a > b + + +def assertGreaterEqual(a, b): + """Assert that a is greather than or equal to b.""" + assert a >= b + + +def assertLessEqual(a, b): + """Assert that a is less than or equal to b.""" + assert a <= b + + +def assertNotIn(a, b): + """Assert that a is not contained in b.""" + assert a not in b + + +def assertIn(a, b): + """Assert that a is contained in b.""" + assert a in b + + +def assertBetweenInclusive(a, min_value, max_value): + """Assert that the passed value is between min_value and max_value, inclusive.""" + assertGreaterEqual(a, min_value) + assertLessEqual(a, max_value) + + +def assertCounterEqual(counters, name, value): + """Assert that a counter with the specified name is present with the specified value. + + Args: + counters (map): Counter values keyed by name. + name (string): Name of the counter under test. + value ([int]): Value that the counter value will be compared against. + """ + assertIn(name, counters) + assertEqual(counters[name], value) + + +def assertCounterGreater(counters, name, value): + """Assert that a counter with the specified name is present and its value is greater than the specified value. + + Args: + counters (map): Counter values keyed by name. + name (string): Name of the counter under test. + value ([int]): Value that the counter value will be compared against. + """ + assertIn(name, counters) + assertGreater(counters[name], value) + + +def assertCounterGreaterEqual(counters, name, value): + """Assert that a counter with the specified name is present and its value is greater than or equal to the specified value. + + Args: + counters (map): Counter values keyed by name. + name (string): Name of the counter under test. + value ([int]): Value that the counter value will be compared against. + """ + assertIn(name, counters) + assertGreaterEqual(counters[name], value) + + +def assertCounterLessEqual(counters, name, value): + """Assert that a counter with the specified name is present and its value is less than or equal to the specified value. + + Args: + counters (map): Counter values keyed by name. + name (string): Name of the counter under test. + value ([int]): Value that the counter value will be compared against. + """ + assertIn(name, counters) + assertLessEqual(counters[name], value) + + +def assertCounterBetweenInclusive(counters, name, min_value, max_value): + """Assert that a counter with the specified name is present and its value is between the specified minimum and maximum, inclusive. + + Args: + counters (map): Counter values keyed by name. + name (string): Name of the counter under test. + min_value ([int]): Minimum value that the counter may have. + max_value ([int]): Maximum value that the counter may have. + """ + assertIn(name, counters) + assertBetweenInclusive(counters[name], min_value, max_value) diff --git a/test/integration/common.py b/test/integration/common.py index 9f71fef9b..01205f099 100644 --- a/test/integration/common.py +++ b/test/integration/common.py @@ -1,12 +1,16 @@ +"""Miscellaneous utilities used in the integration tests.""" + from enum import Enum class NighthawkException(Exception): - pass + """Base exception class for Nighthawk's python code.""" # TODO(oschaaf): When we're on python 3 teach IpVersion below how to render itself to a string. class IpVersion(Enum): + """Enumerate IP versions.""" + UNKNOWN = 1 IPV4 = 2 IPV6 = 3 diff --git a/test/integration/configurations/nighthawk_http_origin.yaml b/test/integration/configurations/nighthawk_http_origin.yaml index cf39a6ffc..08247b781 100644 --- a/test/integration/configurations/nighthawk_http_origin.yaml +++ b/test/integration/configurations/nighthawk_http_origin.yaml @@ -23,6 +23,7 @@ static_resources: domains: - "*" http_filters: + - name: dynamic-delay - name: envoy.fault config: max_active_faults: 100 diff --git a/test/integration/integration_test.py b/test/integration/integration_test.py index 951d56b32..b715408f4 100644 --- a/test/integration/integration_test.py +++ b/test/integration/integration_test.py @@ -1,13 +1,11 @@ #!/usr/bin/env python3 -"""@package integration_test.py -Entry point for our integration testing -""" +"""Entry point for our integration testing.""" import logging import os import sys import pytest -from utility import isSanitizerRun +from test.integration import utility if __name__ == '__main__': path = os.path.dirname(os.path.realpath(__file__)) @@ -18,13 +16,13 @@ "-vvvv", "--showlocals", # Don't abbreviate/truncate long values in asserts. "-p", - "no:cacheprovider", # Avoid a bunch of warnings on readonly filesystems + "no:cacheprovider", # Avoid a bunch of warnings on readonly filesystems "-k", test_selection_arg, # Passed in via BUILD/py_test() "-x", path, "-n", - "4" if isSanitizerRun() else "20" # Number of tests to run in parallel + "4" if utility.isSanitizerRun() else "20" # Number of tests to run in parallel ], plugins=["xdist"]) exit(r) diff --git a/test/integration/integration_test_fixtures.py b/test/integration/integration_test_fixtures.py index 2e73ce908..e256d53e4 100644 --- a/test/integration/integration_test_fixtures.py +++ b/test/integration/integration_test_fixtures.py @@ -1,6 +1,4 @@ -"""@package integration_test_fixtures -Base classes for Nighthawk integration tests -""" +"""Base classes for Nighthawk integration tests.""" import json import logging @@ -21,6 +19,16 @@ def determineIpVersionsFromEnvironment(): + """Determine the IP version(s) for test execution from the environment. + + Raises: + NighthawkException: raised when no ip version could be determined, or + an invalid one was encountered. + + Returns: + A list of test.integration.common.IpVersion with ip versions obtained from the + ENVOY_IP_TEST_VERSIONS environment variable. + """ env_versions = os.environ.get("ENVOY_IP_TEST_VERSIONS", "all") if env_versions == "v4only": versions = [IpVersion.IPV4] @@ -34,7 +42,8 @@ def determineIpVersionsFromEnvironment(): class IntegrationTestBase(): - """ + """Base class for integration test fixtures. + IntegrationTestBase facilitates testing against the Nighthawk test server, by determining a free port, and starting it up in a separate process in setUp(). @@ -48,7 +57,8 @@ class IntegrationTestBase(): """ def __init__(self, ip_version, server_config, backend_count=1): - """ + """Initialize the IntegrationTestBase instance. + Args: ip_version: a single IP mode that this instance will test: IpVersion.IPV4 or IpVersion.IPV6 server_config: path to the server configuration @@ -58,7 +68,7 @@ def __init__(self, ip_version, server_config, backend_count=1): server_ip: string containing the server ip that will be used to listen tag: String. Supply this to get recognizeable output locations. parameters: Dictionary. Supply this to provide template parameter replacement values. - grpc_service: NighthawkGrpcService instance or None. Set by startNighthawkGrpcService(). + grpc_service: NighthawkGrpcService instance or None. Set by startNighthawkGrpcService(). test_server: NighthawkTestServer instance, set during setUp(). nighthawk_client_path: String, path to the nighthawk_client binary. """ @@ -84,8 +94,9 @@ def __init__(self, ip_version, server_config, backend_count=1): # TODO(oschaaf): For the NH test server, add a way to let it determine a port by itself and pull that # out. def getFreeListenerPortForAddress(self, address): - """ - Determines a free port and returns that. Theoretically it is possible that another process + """Determine a free port and returns that. + + Theoretically it is possible that another process will steal the port before our caller is able to leverage it, but we take that chance. The upside is that we can push the port upon the server we are about to start through configuration which is compatible accross servers. @@ -96,8 +107,9 @@ def getFreeListenerPortForAddress(self, address): return port def setUp(self): - """ - Performs sanity checks and starts up the server. Upon exit the server is ready to accept connections. + """Perform sanity checks and start up the server. + + Upon exit the server is ready to accept connections. """ if os.getenv("NH_DOCKER_IMAGE", "") == "": assert os.path.exists( @@ -112,10 +124,8 @@ def setUp(self): assert self._tryStartTestServers(), "Test server(s) failed to start" def tearDown(self): - """ - Stops the server. - """ - if not self.grpc_service is None: + """Stop the server.""" + if self.grpc_service is not None: assert (self.grpc_service.stop() == 0) any_failed = False @@ -126,13 +136,12 @@ def tearDown(self): def _tryStartTestServers(self): for i in range(self._backend_count): - test_server = NighthawkTestServer( - self._nighthawk_test_server_path, - self._nighthawk_test_config_path, - self.server_ip, - self.ip_version, - parameters=self.parameters, - tag=self.tag) + test_server = NighthawkTestServer(self._nighthawk_test_server_path, + self._nighthawk_test_config_path, + self.server_ip, + self.ip_version, + parameters=self.parameters, + tag=self.tag) if not test_server.start(): return False self._test_servers.append(test_server) @@ -141,34 +150,26 @@ def _tryStartTestServers(self): return True def getGlobalResults(self, parsed_json): - """ - Utility to find the global/aggregated result in the json output - """ + """Find the global/aggregated result in the json output.""" global_result = [x for x in parsed_json["results"] if x["name"] == "global"] assert (len(global_result) == 1) return global_result[0] def getNighthawkCounterMapFromJson(self, parsed_json): - """ - Utility method to get the counters from the json indexed by name. - """ + """Get the counters from the json indexed by name.""" return { counter["name"]: int(counter["value"]) for counter in self.getGlobalResults(parsed_json)["counters"] } def getNighthawkGlobalHistogramsbyIdFromJson(self, parsed_json): - """ - Utility method to get the global histograms from the json indexed by id. - """ + """Get the global histograms from the json indexed by id.""" return { statistic["id"]: statistic for statistic in self.getGlobalResults(parsed_json)["statistics"] } def getTestServerRootUri(self, https=False): - """ - Utility for getting the http://host:port/ that can be used to query the server we started in setUp() - """ + """Get the http://host:port/ that can be used to query the server we started in setUp().""" uri_host = self.server_ip if self.ip_version == IpVersion.IPV6: uri_host = "[%s]" % self.server_ip @@ -177,10 +178,7 @@ def getTestServerRootUri(self, https=False): return uri def getAllTestServerRootUris(self, https=False): - """ - Utility for getting the list of http://host:port/ that can be used to query the servers we started - in setUp() - """ + """Get the list of http://host:port/ that can be used to query the servers we started in setUp().""" uri_host = self.server_ip if self.ip_version == IpVersion.IPV6: uri_host = "[%s]" % self.server_ip @@ -191,24 +189,18 @@ def getAllTestServerRootUris(self, https=False): ] def getTestServerStatisticsJson(self): - """ - Utility to grab a statistics snapshot from the test server. - """ + """Grab a statistics snapshot from the test server.""" return self.test_server.fetchJsonFromAdminInterface("/stats?format=json") def getAllTestServerStatisticsJsons(self): - """ - Utility to grab a statistics snapshot from multiple test servers. - """ + """Grab a statistics snapshot from multiple test servers.""" return [ test_server.fetchJsonFromAdminInterface("/stats?format=json") for test_server in self._test_servers ] def getServerStatFromJson(self, server_stats_json, name): - """ - Utility to extract one statistic from a single json snapshot. - """ + """Extract one statistic from a single json snapshot.""" counters = server_stats_json["stats"] for counter in counters: if counter["name"] == name: @@ -216,9 +208,10 @@ def getServerStatFromJson(self, server_stats_json, name): return None def runNighthawkClient(self, args, expect_failure=False, timeout=30, as_json=True): - """ - Runs Nighthawk against the test server, returning a json-formatted result - and logs. If the timeout is exceeded an exception will be raised. + """Run Nighthawk against the test server. + + Returns a string containing json-formatted result plus logs. + If the timeout is exceeded an exception will be raised. """ # Copy the args so our modifications to it stay local. args = args.copy() @@ -256,26 +249,31 @@ def transformNighthawkJson(self, json, format="human"): json: String containing raw json output obtained via nighthawk_client --output-format=json format: String that specifies the desired output format. Must be one of [human|yaml|dotted-string|fortio]. Optional, defaults to "human". """ - # TODO(oschaaf): validate format arg. args = [] if os.getenv("NH_DOCKER_IMAGE", "") != "": args = ["docker", "run", "--rm", "-i", os.getenv("NH_DOCKER_IMAGE")] args = args + [self._nighthawk_output_transform_path, "--output-format", format] logging.info("Nighthawk output transform popen() args: %s" % args) - client_process = subprocess.Popen( - args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + client_process = subprocess.Popen(args, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) logging.info("Nighthawk client popen() args: [%s]" % args) stdout, stderr = client_process.communicate(input=json.encode()) - logs = stderr.decode('utf-8') - output = stdout.decode('utf-8') + # We suppress declared but not used warnings below, as these may produce helpful + # in test failures (via pytests introspection and logging). + logs = stderr.decode('utf-8') # noqa(F841) + output = stdout.decode('utf-8') # noqa(F841) assert (client_process.returncode == 0) return stdout.decode('utf-8') - def assertIsSubset(self, subset, superset): - self.assertLessEqual(subset.items(), superset.items()) - def startNighthawkGrpcService(self, service_name="traffic-generator-service"): + """Start the Nighthawk gRPC service. + + Args: + service_name (String, optional): Service type to start. Defaults to "traffic-generator-service". + """ host = self.server_ip if self.ip_version == IpVersion.IPV4 else "[%s]" % self.server_ip self.grpc_service = NighthawkGrpcService(self._nighthawk_service_path, host, self.ip_version, service_name) @@ -283,8 +281,8 @@ def startNighthawkGrpcService(self, service_name="traffic-generator-service"): class HttpIntegrationTestBase(IntegrationTestBase): - """ - Base for running plain http tests against the Nighthawk test server + """Base for running plain http tests against the Nighthawk test server. + NOTE: any script that consumes derivations of this, needs to needs also explictly import server_config, to avoid errors caused by the server_config not being found by pytest. @@ -300,9 +298,7 @@ def getTestServerRootUri(self): class MultiServerHttpIntegrationTestBase(IntegrationTestBase): - """ - Base for running plain http tests against multiple Nighthawk test servers - """ + """Base for running plain http tests against multiple Nighthawk test servers.""" def __init__(self, ip_version, server_config, backend_count): """See base class.""" @@ -319,9 +315,7 @@ def getAllTestServerRootUris(self): class HttpsIntegrationTestBase(IntegrationTestBase): - """ - Base for https tests against the Nighthawk test server - """ + """Base for https tests against the Nighthawk test server.""" def __init__(self, ip_version, server_config): """See base class.""" @@ -333,11 +327,10 @@ def getTestServerRootUri(self): class SniIntegrationTestBase(HttpsIntegrationTestBase): - """ - Base for https/sni tests against the Nighthawk test server - """ + """Base for https/sni tests against the Nighthawk test server.""" def __init__(self, ip_version, server_config): + """See base class.""" super(SniIntegrationTestBase, self).__init__(ip_version, server_config) def getTestServerRootUri(self): @@ -346,11 +339,10 @@ def getTestServerRootUri(self): class MultiServerHttpsIntegrationTestBase(IntegrationTestBase): - """ - Base for https tests against multiple Nighthawk test servers - """ + """Base for https tests against multiple Nighthawk test servers.""" def __init__(self, ip_version, server_config, backend_count): + """See base class.""" super(MultiServerHttpsIntegrationTestBase, self).__init__(ip_version, server_config, backend_count) @@ -365,11 +357,21 @@ def getAllTestServerRootUris(self): @pytest.fixture() def server_config(): + """Fixture which yields the path to the stock server configuration. + + Yields: + String: Path to the stock server configuration. + """ yield "nighthawk/test/integration/configurations/nighthawk_http_origin.yaml" @pytest.fixture(params=determineIpVersionsFromEnvironment()) def http_test_server_fixture(request, server_config): + """Fixture for setting up a test environment with the stock http server configuration. + + Yields: + HttpIntegrationTestBase: A fully set up instance. Tear down will happen automatically. + """ f = HttpIntegrationTestBase(request.param, server_config) f.setUp() yield f @@ -378,6 +380,11 @@ def http_test_server_fixture(request, server_config): @pytest.fixture(params=determineIpVersionsFromEnvironment()) def https_test_server_fixture(request, server_config): + """Fixture for setting up a test environment with the stock https server configuration. + + Yields: + HttpsIntegrationTestBase: A fully set up instance. Tear down will happen automatically. + """ f = HttpsIntegrationTestBase(request.param, server_config) f.setUp() yield f @@ -386,6 +393,11 @@ def https_test_server_fixture(request, server_config): @pytest.fixture(params=determineIpVersionsFromEnvironment()) def multi_http_test_server_fixture(request, server_config): + """Fixture for setting up a test environment with multiple servers, using the stock http server configuration. + + Yields: + MultiServerHttpIntegrationTestBase: A fully set up instance. Tear down will happen automatically. + """ f = MultiServerHttpIntegrationTestBase(request.param, server_config, backend_count=3) f.setUp() yield f @@ -394,6 +406,11 @@ def multi_http_test_server_fixture(request, server_config): @pytest.fixture(params=determineIpVersionsFromEnvironment()) def multi_https_test_server_fixture(request, server_config): + """Fixture for setting up a test environment with multiple servers, using the stock https server configuration. + + Yields: + MultiServerHttpsIntegrationTestBase: A fully set up instance. Tear down will happen automatically. + """ f = MultiServerHttpsIntegrationTestBase(request.param, server_config, backend_count=3) f.setUp() yield f diff --git a/test/integration/nighthawk_grpc_service.py b/test/integration/nighthawk_grpc_service.py index 7af686ca3..eab1ccde5 100644 --- a/test/integration/nighthawk_grpc_service.py +++ b/test/integration/nighthawk_grpc_service.py @@ -1,3 +1,4 @@ +"""Contains the NighthawkGrpcService class.""" import logging import socket import subprocess @@ -10,15 +11,15 @@ # TODO(oschaaf): unify some of this code with the test server wrapper. class NighthawkGrpcService(object): - """ - Class for running the Nighthawk gRPC service in a separate process. + """Class for running the Nighthawk gRPC service in a separate process. + Usage: grpc_service = NighthawkGrpcService("/path/to/nighthawk_service"), "127.0.0.1", IpVersion.IPV4) if grpc_service.start(): .... You can talk to the Nighthawk gRPC service at the 127.0.0.1:grpc_service.server_port ... Attributes: - server_ip: IP address used by the gRPC service to listen. + server_ip: IP address used by the gRPC service to listen. server_port: An integer, indicates the port used by the gRPC service to listen. 0 means that the server is not listening. log_lines: An array of log lines emitted by the service. Available after stop() is called, reset to None on start(). """ @@ -28,7 +29,7 @@ def __init__(self, server_ip, ip_version, service_name="traffic-generator-service"): - """Initializes Nighthawk gRPC service. + """Initialize the Nighthawk gRPC service. Args: server_binary_path: A string, indicates where the nighthawk gRPC service binary resides @@ -66,7 +67,7 @@ def _waitUntilServerListening(self): tries = 90 while tries > 0: contents = "" - if not self._address_file is None: + if self._address_file is not None: try: with open(self._address_file) as f: contents = f.read().strip() @@ -85,21 +86,21 @@ def _waitUntilServerListening(self): return False def start(self): - """ - Starts the Nighthawk gRPC service. Returns True upon success, after which the server_port attribute + """Start the Nighthawk gRPC service. + + Returns True upon success, after which the server_port attribute can be queried to get the listening port. """ - self.log_lines = None self._server_thread.daemon = True self._server_thread.start() return self._waitUntilServerListening() def stop(self): - """ - Signals the Nighthawk gRPC service to stop, waits for its termination, and returns the exit code of the associated process. - """ + """Signals the Nighthawk gRPC service to stop. + Waits for its termination, and returns the exit code of the associated process. + """ self._server_process.terminate() self._server_thread.join() self.server_port = 0 diff --git a/test/integration/nighthawk_test_server.py b/test/integration/nighthawk_test_server.py index 4f3ddb202..6a6deb849 100644 --- a/test/integration/nighthawk_test_server.py +++ b/test/integration/nighthawk_test_server.py @@ -1,3 +1,5 @@ +"""Contains the NighthawkTestServer class, which wraps the nighthawk_test_servern binary.""" + import http.client import json import logging @@ -39,27 +41,29 @@ def _substitute_yaml_values(runfiles_instance, obj, params): class TestServerBase(object): + """Base class for running a server in a separate process. + + Attributes: + ip_version: IP version that the proxy should use when listening. + server_ip: string containing the server ip that will be used to listen + server_port: Integer, get the port used by the server to listen for traffic. + docker_image: String, supplies a docker image for execution of the test server binary. Sourced from environment variable NH_DOCKER_IMAGE. + tmpdir: String, indicates the location used to store outputs like logs. """ - Base class for running a server in a separate process. - - Arguments: - server_binary_path: String, specify the path to the test server binary. - config_template_path: String, specify the path to the test server configuration template. - server_ip: String, specify the ip address the test server should use to listen for traffic. - server_binary_config_path_arg: String, specify the name of the CLI argument the test server binary uses to accept a configuration path. - parameters: Dictionary. Supply this to provide configuration template parameter replacement values. - tag: String. Supply this to get recognizeable output locations. - - Attributes: - ip_version: IP version that the proxy should use when listening. - server_ip: string containing the server ip that will be used to listen - server_port: Integer, get the port used by the server to listen for traffic. - docker_image: String, supplies a docker image for execution of the test server binary. Sourced from environment variable NH_DOCKER_IMAGE. - tmpdir: String, indicates the location used to store outputs like logs. - """ def __init__(self, server_binary_path, config_template_path, server_ip, ip_version, server_binary_config_path_arg, parameters, tag): + """Initialize a TestServerBase instance. + + Args: + server_binary_path (str): specify the path to the server binary. + config_template_path (str): specify the path to the test server configuration template. + server_ip (str): Specify the ip address the test server should use to listen for traffic. + ip_version (IPAddress): Specify the ip version the server should use to listen for traffic. + server_binary_config_path_arg (str): Specify the name of the CLI argument the test server binary uses to accept a configuration path. + parameters (dict): Supply to provide configuration template parameter replacement values. + tag (str): Supply to get recognizeable output locations. + """ assert ip_version != IpVersion.UNKNOWN self.ip_version = ip_version self.server_ip = server_ip @@ -73,7 +77,7 @@ def __init__(self, server_binary_path, config_template_path, server_ip, ip_versi self._parameters["tmpdir"] = self.tmpdir self._parameters["tag"] = tag self._server_process = None - self._server_thread = threading.Thread(target=self.serverThreadRunner) + self._server_thread = threading.Thread(target=self._serverThreadRunner) self._admin_address_path = "" self._parameterized_config_path = "" self._instance_id = str(random.randint(1, 1024 * 1024 * 1024)) @@ -88,22 +92,21 @@ def _prepareForExecution(self): Path(self.tmpdir).mkdir(parents=True, exist_ok=True) - with tempfile.NamedTemporaryFile( - mode="w", delete=False, suffix=".config.yaml", dir=self.tmpdir) as tmp: + with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".config.yaml", + dir=self.tmpdir) as tmp: self._parameterized_config_path = tmp.name - yaml.safe_dump( - data, - tmp, - default_flow_style=False, - explicit_start=True, - allow_unicode=True, - encoding='utf-8') - - with tempfile.NamedTemporaryFile( - mode="w", delete=False, suffix=".adminport", dir=self.tmpdir) as tmp: + yaml.safe_dump(data, + tmp, + default_flow_style=False, + explicit_start=True, + allow_unicode=True, + encoding='utf-8') + + with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".adminport", + dir=self.tmpdir) as tmp: self._admin_address_path = tmp.name - def serverThreadRunner(self): + def _serverThreadRunner(self): args = [] if self.docker_image != "": # TODO(#383): As of https://github.com/envoyproxy/envoy/commit/e8a2d1e24dc9a0da5273442204ec3cdfad1e7ca8 @@ -125,6 +128,17 @@ def serverThreadRunner(self): logging.debug(stderr.decode("utf-8")) def fetchJsonFromAdminInterface(self, path): + """Fetch and parse json from the admin interface. + + Args: + path (str): Request uri path and query to use when fetching. E.g. "/stats?format=json" + + Raises: + NighthawkException: Raised when the fetch resulted in any http status code other then 200. + + Returns: + json: Parsed json object. + """ uri_host = self.server_ip if self.ip_version == IpVersion.IPV6: uri_host = "[%s]" % self.server_ip @@ -136,7 +150,7 @@ def fetchJsonFromAdminInterface(self, path): r.status_code) return r.json() - def tryUpdateFromAdminInterface(self): + def _tryUpdateFromAdminInterface(self): with open(self._admin_address_path) as admin_address_file: admin_address = admin_address_file.read() tmp = admin_address.split(":") @@ -155,6 +169,11 @@ def tryUpdateFromAdminInterface(self): return False def enableCpuProfiler(self): + """Enable the built-in cpu profiler of the test server. + + Returns: + Bool: True iff the cpu profiler was succesfully enabled. + """ uri_host = self.server_ip if self.ip_version == IpVersion.IPV6: uri_host = "[%s]" % self.server_ip @@ -163,23 +182,33 @@ def enableCpuProfiler(self): logging.info("Enabled CPU profiling via %s: %s", uri, r.status_code == 200) return r.status_code == 200 - def waitUntilServerListening(self): + def _waitUntilServerListening(self): # we allow 30 seconds for the server to have its listeners up. # (It seems that in sanitizer-enabled runs this can take a little while) timeout = time.time() + 60 while time.time() < timeout: - if self.tryUpdateFromAdminInterface(): + if self._tryUpdateFromAdminInterface(): return True time.sleep(0.1) - logging.error("Timeout in waitUntilServerListening()") + logging.error("Timeout in _waitUntilServerListening()") return False def start(self): + """Start the server. + + Returns: + Bool: True iff the server started successfully. + """ self._server_thread.daemon = True self._server_thread.start() - return self.waitUntilServerListening() + return self._waitUntilServerListening() def stop(self): + """Stop the server. + + Returns: + Int: exit code of the server process. + """ os.remove(self._admin_address_path) self._server_process.terminate() self._server_thread.join() @@ -187,10 +216,11 @@ def stop(self): class NighthawkTestServer(TestServerBase): + """Run the Nighthawk test server in a separate process. + + Passes in the right cli-arg to point it to its + configuration. For, say, NGINX this would be '-c' instead. """ - Will run the Nighthawk test server in a separate process. Passes in the right cli-arg to point it to its - configuration. For, say, NGINX this would be '-c' instead. - """ def __init__(self, server_binary_path, @@ -199,13 +229,21 @@ def __init__(self, ip_version, parameters=dict(), tag=""): + """Initialize a NighthawkTestServer instance. + + Args: + server_binary_path (String): Path to the nighthawk test server binary. + config_template_path (String): Path to the nighthawk test server configuration template. + server_ip (String): Ip address for the server to use when listening. + ip_version (IPVersion): IPVersion enum member indicating the ip version that the server should use when listening. + parameters (dictionary, optional): Directionary with replacement values for substition purposes in the server configuration template. Defaults to dict(). + tag (str, optional): Tags. Supply this to get recognizeable output locations. Defaults to "". + """ super(NighthawkTestServer, self).__init__(server_binary_path, config_template_path, server_ip, ip_version, "--config-path", parameters, tag) def getCliVersionString(self): - """ Get the version string as written to the output by the CLI. - """ - + """Get the version string as written to the output by the CLI.""" args = [] if self.docker_image != "": args = ["docker", "run", "--rm", self.docker_image] diff --git a/test/integration/test_connection_management.py b/test/integration/test_connection_management.py index 9f1fd5459..317ee6190 100644 --- a/test/integration/test_connection_management.py +++ b/test/integration/test_connection_management.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3 +"""Test connection management of nighthawk's load generator.""" import logging import os @@ -6,17 +6,18 @@ import pytest from test.integration.integration_test_fixtures import (http_test_server_fixture, server_config) -from test.integration.utility import * - - -def run_with_number_of_connections(fixture, - number_of_connections, - expected_connections=-1, - max_pending_requests=0, - requests_per_connection=1024 * 1024, - rps=100, - run_test_expectation=True, - h2=False): +from test.integration import asserts +from test.integration import utility + + +def _run_with_number_of_connections(fixture, + number_of_connections, + expected_connections=-1, + max_pending_requests=0, + requests_per_connection=1024 * 1024, + rps=100, + run_test_expectation=True, + h2=False): if expected_connections == -1: expected_connections = number_of_connections # We add a delay to responses to make sure connections are needed, as the pool creates connections on-demand. @@ -33,72 +34,77 @@ def run_with_number_of_connections(fixture, parsed_json, _ = fixture.runNighthawkClient(args) counters = fixture.getNighthawkCounterMapFromJson(parsed_json) if run_test_expectation: - assertCounterEqual(counters, "upstream_cx_total", expected_connections) + asserts.assertCounterEqual(counters, "upstream_cx_total", expected_connections) return counters # A series that tests with queueing disabled -@pytest.mark.skipif(isSanitizerRun(), reason="Unstable in sanitizer runs") +@pytest.mark.skipif(utility.isSanitizerRun(), reason="Unstable in sanitizer runs") def test_http_h1_connection_management_1(http_test_server_fixture): - run_with_number_of_connections(http_test_server_fixture, 1) + """Test http h1 connection management with 1 connection and queueing disabled.""" + _run_with_number_of_connections(http_test_server_fixture, 1) -@pytest.mark.skipif(isSanitizerRun(), reason="Unstable in sanitizer runs") +@pytest.mark.skipif(utility.isSanitizerRun(), reason="Unstable in sanitizer runs") def test_http_h1_connection_management_2(http_test_server_fixture): - run_with_number_of_connections(http_test_server_fixture, 2) + """Test http h1 connection management with 2 connections and queueing disabled.""" + _run_with_number_of_connections(http_test_server_fixture, 2) # A series that tests with queueing enabled -@pytest.mark.skipif(isSanitizerRun(), reason="Unstable in sanitizer runs") +@pytest.mark.skipif(utility.isSanitizerRun(), reason="Unstable in sanitizer runs") def test_http_h1_connection_management_with_queue_1(http_test_server_fixture): - run_with_number_of_connections(http_test_server_fixture, 1, max_pending_requests=5) + """Test http h1 connection management with 1 connection and queueing enabled.""" + _run_with_number_of_connections(http_test_server_fixture, 1, max_pending_requests=5) -@pytest.mark.skipif(isSanitizerRun(), reason="Unstable in sanitizer runs") +@pytest.mark.skipif(utility.isSanitizerRun(), reason="Unstable in sanitizer runs") def test_http_h1_connection_management_with_queue_5(http_test_server_fixture): - run_with_number_of_connections(http_test_server_fixture, 5, max_pending_requests=5) + """Test http h1 connection management with 5 connections and queueing enabled.""" + _run_with_number_of_connections(http_test_server_fixture, 5, max_pending_requests=5) -def connection_management_test_request_per_connection(fixture, requests_per_connection, use_h2): +def _connection_management_test_request_per_connection(fixture, requests_per_connection, use_h2): max_requests_per_conn = 5 - counters = run_with_number_of_connections( - fixture, - 1, - max_pending_requests=1, - requests_per_connection=max_requests_per_conn, - run_test_expectation=False, - h2=use_h2) + counters = _run_with_number_of_connections(fixture, + 1, + max_pending_requests=1, + requests_per_connection=max_requests_per_conn, + run_test_expectation=False, + h2=use_h2) requests = counters["upstream_rq_total"] - assertCounterBetweenInclusive(counters, "upstream_cx_total", (requests / max_requests_per_conn), - (requests / max_requests_per_conn) + 1) + asserts.assertCounterBetweenInclusive(counters, "upstream_cx_total", + (requests / max_requests_per_conn), + (requests / max_requests_per_conn) + 1) -# Test h1 with a single request_per_connection -@pytest.mark.skipif(isSanitizerRun(), reason="Unstable in sanitizer runs") +@pytest.mark.skipif(utility.isSanitizerRun(), reason="Unstable in sanitizer runs") def test_http_h1_connection_management_single_request_per_conn_1(http_test_server_fixture): - connection_management_test_request_per_connection(http_test_server_fixture, 1, False) + """Test h1 with a single request_per_connection.""" + _connection_management_test_request_per_connection(http_test_server_fixture, 1, False) -# Test h1 with a request_per_connection set to 5 -@pytest.mark.skipif(isSanitizerRun(), reason="Unstable in sanitizer runs") +@pytest.mark.skipif(utility.isSanitizerRun(), reason="Unstable in sanitizer runs") def test_http_h1_connection_management_single_request_per_conn_5(http_test_server_fixture): - connection_management_test_request_per_connection(http_test_server_fixture, 5, False) + """Test h1 with a request_per_connection set to 5.""" + _connection_management_test_request_per_connection(http_test_server_fixture, 5, False) -# Test h2 with a single request_per_connection -@pytest.mark.skipif(isSanitizerRun(), reason="Unstable in sanitizer runs") +@pytest.mark.skipif(utility.isSanitizerRun(), reason="Unstable in sanitizer runs") def test_http_h2_connection_management_single_request_per_conn_1(http_test_server_fixture): - connection_management_test_request_per_connection(http_test_server_fixture, 1, True) + """Test h2 with a single request_per_connection.""" + _connection_management_test_request_per_connection(http_test_server_fixture, 1, True) -# Test h2 with a request_per_connection set to 5 -@pytest.mark.skipif(isSanitizerRun(), reason="Unstable in sanitizer runs") +@pytest.mark.skipif(utility.isSanitizerRun(), reason="Unstable in sanitizer runs") def test_http_h2_connection_management_single_request_per_conn_1(http_test_server_fixture): - connection_management_test_request_per_connection(http_test_server_fixture, 5, True) + """Test h2 with a request_per_connection set to 5.""" + _connection_management_test_request_per_connection(http_test_server_fixture, 5, True) def test_h1_pool_strategy(http_test_server_fixture): - """ + """Test connection re-use strategies of the http 1 connection pool. + Test that with the "mru" strategy only the first created connection gets to send requests. Then, with the "lru" strategy, we expect the other connection to be used as well. """ @@ -113,8 +119,8 @@ def countLogLinesWithSubstring(logs, substring): http_test_server_fixture.getTestServerRootUri() ]) - assertNotIn("[C1] message complete", logs) - assertEqual(countLogLinesWithSubstring(logs, "[C0] message complete"), 10) + asserts.assertNotIn("[C1] message complete", logs) + asserts.assertEqual(countLogLinesWithSubstring(logs, "[C0] message complete"), 10) requests = 12 connections = 3 @@ -128,4 +134,4 @@ def countLogLinesWithSubstring(logs, substring): for i in range(1, connections): line_count = countLogLinesWithSubstring(logs, "[C%d] message complete" % i) strict_count = (requests / connections) * 2 - assertBetweenInclusive(line_count, strict_count, strict_count) + asserts.assertBetweenInclusive(line_count, strict_count, strict_count) diff --git a/test/integration/test_grpc_service.py b/test/integration/test_grpc_service.py index 1271cd5b0..316b32f33 100644 --- a/test/integration/test_grpc_service.py +++ b/test/integration/test_grpc_service.py @@ -1,11 +1,14 @@ -#!/usr/bin/env python3 +"""Tests for the nighthawk_service binary.""" + import pytest from test.integration.integration_test_fixtures import (http_test_server_fixture, server_config) from test.integration import utility +from test.integration import asserts def test_grpc_service_happy_flow(http_test_server_fixture): + """Test that the gRPC service is able to execute a load test against the test server.""" http_test_server_fixture.startNighthawkGrpcService("dummy-request-source") parsed_json, _ = http_test_server_fixture.runNighthawkClient([ "--termination-predicate", "benchmark.http_2xx:5", "--rps 10", @@ -14,11 +17,12 @@ def test_grpc_service_happy_flow(http_test_server_fixture): http_test_server_fixture.getTestServerRootUri() ]) counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) - utility.assertGreaterEqual(counters["benchmark.http_2xx"], 5) - utility.assertEqual(counters["requestsource.internal.upstream_rq_200"], 1) + asserts.assertGreaterEqual(counters["benchmark.http_2xx"], 5) + asserts.assertEqual(counters["requestsource.internal.upstream_rq_200"], 1) def test_grpc_service_down(http_test_server_fixture): + """Test that the gRPC service detects that a test server is down.""" parsed_json, _ = http_test_server_fixture.runNighthawkClient([ "--rps 100", "--request-source %s:%s" % (http_test_server_fixture.server_ip, "34589"), @@ -26,11 +30,12 @@ def test_grpc_service_down(http_test_server_fixture): ], expect_failure=True) counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) - utility.assertEqual(counters["requestsource.upstream_rq_pending_failure_eject"], 1) + asserts.assertEqual(counters["requestsource.upstream_rq_pending_failure_eject"], 1) @pytest.mark.skipif(utility.isSanitizerRun(), reason="Slow in sanitizer runs") def test_grpc_service_stress(http_test_server_fixture): + """Test high load.""" http_test_server_fixture.startNighthawkGrpcService("dummy-request-source") parsed_json, _ = http_test_server_fixture.runNighthawkClient([ "--duration 100", "--rps 10000", "--concurrency 4", "--termination-predicate", @@ -40,8 +45,8 @@ def test_grpc_service_stress(http_test_server_fixture): http_test_server_fixture.getTestServerRootUri() ]) counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) - utility.assertGreaterEqual(counters["benchmark.http_2xx"], 5000) - utility.assertEqual(counters["requestsource.internal.upstream_rq_200"], 4) + asserts.assertGreaterEqual(counters["benchmark.http_2xx"], 5000) + asserts.assertEqual(counters["requestsource.internal.upstream_rq_200"], 4) def _run_service_with_args(args): @@ -49,18 +54,21 @@ def _run_service_with_args(args): def test_grpc_service_help(): + """Test that the gRPC service behaves as expected when --help is passed.""" (exit_code, output) = _run_service_with_args("--help") - utility.assertEqual(exit_code, 0) - utility.assertIn("USAGE", output) + asserts.assertEqual(exit_code, 0) + asserts.assertIn("USAGE", output) def test_grpc_service_bad_arguments(): + """Test that the gRPC service behaves as expected with bad cli arguments.""" (exit_code, output) = _run_service_with_args("--foo") - utility.assertEqual(exit_code, 1) - utility.assertIn("PARSE ERROR: Argument: --foo", output) + asserts.assertEqual(exit_code, 1) + asserts.assertIn("PARSE ERROR: Argument: --foo", output) def test_grpc_service_nonexisting_listener_address(): + """Test that the gRPC service behaves as expected when an address is passed that it can't listen to.""" (exit_code, output) = _run_service_with_args("--listen 1.1.1.1:1") - utility.assertEqual(exit_code, 1) - utility.assertIn("Failure: Could not start the grpc service", output) + asserts.assertEqual(exit_code, 1) + asserts.assertIn("Failure: Could not start the grpc service", output) diff --git a/test/integration/test_integration_basics.py b/test/integration/test_integration_basics.py index 8cc7e2fbd..85bc4f59b 100644 --- a/test/integration/test_integration_basics.py +++ b/test/integration/test_integration_basics.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3 +"""Tests Nighthawk's basic functionality.""" import json import logging @@ -10,17 +10,21 @@ from threading import Thread from test.integration.common import IpVersion -from test.integration.integration_test_fixtures import ( - http_test_server_fixture, https_test_server_fixture, multi_http_test_server_fixture, - multi_https_test_server_fixture, server_config) -from test.integration.utility import * +from test.integration.integration_test_fixtures import (http_test_server_fixture, + https_test_server_fixture, + multi_http_test_server_fixture, + multi_https_test_server_fixture, + server_config) +from test.integration import asserts +from test.integration import utility # TODO(oschaaf): we mostly verify stats observed from the client-side. Add expectations # for the server side as well. def test_http_h1(http_test_server_fixture): - """ + """Test http1 over plain http. + Runs the CLI configured to use plain HTTP/1 against our test server, and sanity checks statistics from both client and server. """ @@ -29,32 +33,43 @@ def test_http_h1(http_test_server_fixture): "--termination-predicate", "benchmark.http_2xx:24" ]) counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) - assertCounterEqual(counters, "benchmark.http_2xx", 25) - assertCounterEqual(counters, "upstream_cx_http1_total", 1) - assertCounterEqual(counters, "upstream_cx_rx_bytes_total", 3400) - assertCounterEqual(counters, "upstream_cx_total", 1) - assertCounterEqual(counters, "upstream_cx_tx_bytes_total", - 1400 if http_test_server_fixture.ip_version == IpVersion.IPV6 else 1450) - assertCounterEqual(counters, "upstream_rq_pending_total", 1) - assertCounterEqual(counters, "upstream_rq_total", 25) - assertCounterEqual(counters, "default.total_match_count", 1) + asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25) + asserts.assertCounterEqual(counters, "upstream_cx_http1_total", 1) + asserts.assertCounterEqual(counters, "upstream_cx_rx_bytes_total", 3400) + asserts.assertCounterEqual(counters, "upstream_cx_total", 1) + asserts.assertCounterEqual( + counters, "upstream_cx_tx_bytes_total", + 1400 if http_test_server_fixture.ip_version == IpVersion.IPV6 else 1450) + asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1) + asserts.assertCounterEqual(counters, "upstream_rq_total", 25) + asserts.assertCounterEqual(counters, "default.total_match_count", 1) global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson(parsed_json) - assertEqual(int(global_histograms["benchmark_http_client.response_body_size"]["count"]), 25) - assertEqual(int(global_histograms["benchmark_http_client.response_header_size"]["count"]), 25) - assertEqual(int(global_histograms["benchmark_http_client.response_body_size"]["raw_mean"]), 10) - assertEqual(int(global_histograms["benchmark_http_client.response_header_size"]["raw_mean"]), 97) - assertEqual(int(global_histograms["benchmark_http_client.response_body_size"]["raw_min"]), 10) - assertEqual(int(global_histograms["benchmark_http_client.response_header_size"]["raw_min"]), 97) - assertEqual(int(global_histograms["benchmark_http_client.response_body_size"]["raw_max"]), 10) - assertEqual(int(global_histograms["benchmark_http_client.response_header_size"]["raw_max"]), 97) - assertEqual(int(global_histograms["benchmark_http_client.response_body_size"]["raw_pstdev"]), 0) - assertEqual(int(global_histograms["benchmark_http_client.response_header_size"]["raw_pstdev"]), 0) - - assertEqual(len(counters), 12) - - -def mini_stress_test(fixture, args): + asserts.assertEqual(int(global_histograms["benchmark_http_client.response_body_size"]["count"]), + 25) + asserts.assertEqual(int(global_histograms["benchmark_http_client.response_header_size"]["count"]), + 25) + asserts.assertEqual( + int(global_histograms["benchmark_http_client.response_body_size"]["raw_mean"]), 10) + asserts.assertEqual( + int(global_histograms["benchmark_http_client.response_header_size"]["raw_mean"]), 97) + asserts.assertEqual(int(global_histograms["benchmark_http_client.response_body_size"]["raw_min"]), + 10) + asserts.assertEqual( + int(global_histograms["benchmark_http_client.response_header_size"]["raw_min"]), 97) + asserts.assertEqual(int(global_histograms["benchmark_http_client.response_body_size"]["raw_max"]), + 10) + asserts.assertEqual( + int(global_histograms["benchmark_http_client.response_header_size"]["raw_max"]), 97) + asserts.assertEqual( + int(global_histograms["benchmark_http_client.response_body_size"]["raw_pstdev"]), 0) + asserts.assertEqual( + int(global_histograms["benchmark_http_client.response_header_size"]["raw_pstdev"]), 0) + + asserts.assertEqual(len(counters), 12) + + +def _mini_stress_test(fixture, args): # run a test with more rps then we can handle, and a very small client-side queue. # we should observe both lots of successfull requests as well as time spend in blocking mode., parsed_json, _ = fixture.runNighthawkClient(args) @@ -62,19 +77,19 @@ def mini_stress_test(fixture, args): # We set a reasonably low expectation of 100 requests. We set it low, because we want this # test to succeed on a reasonable share of setups (hopefully practically all). MIN_EXPECTED_REQUESTS = 100 - assertCounterEqual(counters, "benchmark.http_2xx", MIN_EXPECTED_REQUESTS) + asserts.assertCounterEqual(counters, "benchmark.http_2xx", MIN_EXPECTED_REQUESTS) if "--h2" in args: - assertCounterEqual(counters, "upstream_cx_http2_total", 1) + asserts.assertCounterEqual(counters, "upstream_cx_http2_total", 1) else: - assertCounterEqual(counters, "upstream_cx_http1_total", 1) + asserts.assertCounterEqual(counters, "upstream_cx_http1_total", 1) global_histograms = fixture.getNighthawkGlobalHistogramsbyIdFromJson(parsed_json) if "--open-loop" in args: - assertEqual(int(global_histograms["sequencer.blocking"]["count"]), 0) + asserts.assertEqual(int(global_histograms["sequencer.blocking"]["count"]), 0) else: - assertGreaterEqual(int(global_histograms["sequencer.blocking"]["count"]), 1) + asserts.assertGreaterEqual(int(global_histograms["sequencer.blocking"]["count"]), 1) - assertGreaterEqual( + asserts.assertGreaterEqual( int(global_histograms["benchmark_http_client.request_to_response"]["count"]), 1) return counters @@ -83,89 +98,75 @@ def mini_stress_test(fixture, args): # overflows, we can set fixed expectations with respect to overflows and anticipated pending # totals. def test_http_h1_mini_stress_test_with_client_side_queueing(http_test_server_fixture): - """ - Run a max rps test with the h1 pool against our test server, using a small client-side - queue.""" - counters = mini_stress_test(http_test_server_fixture, [ + """Run a max rps test with the h1 pool against our test server, using a small client-side queue.""" + counters = _mini_stress_test(http_test_server_fixture, [ http_test_server_fixture.getTestServerRootUri(), "--rps", "999999", "--max-pending-requests", "10", "--connections", "1", "--duration", "100", "--termination-predicate", "benchmark.http_2xx:99", "--simple-warmup" ]) - assertCounterGreaterEqual(counters, "upstream_rq_pending_total", 11) - assertCounterGreaterEqual(counters, "upstream_cx_overflow", 10) + asserts.assertCounterGreaterEqual(counters, "upstream_rq_pending_total", 11) + asserts.assertCounterGreaterEqual(counters, "upstream_cx_overflow", 10) def test_http_h1_mini_stress_test_without_client_side_queueing(http_test_server_fixture): - """ - Run a max rps test with the h1 pool against our test server, with no client-side - queueing. - """ - counters = mini_stress_test(http_test_server_fixture, [ + """Run a max rps test with the h1 pool against our test server, with no client-side queueing.""" + counters = _mini_stress_test(http_test_server_fixture, [ http_test_server_fixture.getTestServerRootUri(), "--rps", "999999", "--connections", "1", "--duration", "100", "--termination-predicate", "benchmark.http_2xx:99" ]) - assertCounterEqual(counters, "upstream_rq_pending_total", 1) - assertNotIn("upstream_cx_overflow", counters) + asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1) + asserts.assertNotIn("upstream_cx_overflow", counters) def test_http_h2_mini_stress_test_with_client_side_queueing(http_test_server_fixture): - """ - Run a max rps test with the h2 pool against our test server, using a small client-side - queue. - """ - counters = mini_stress_test(http_test_server_fixture, [ + """Run a max rps test with the h2 pool against our test server, using a small client-side queue.""" + counters = _mini_stress_test(http_test_server_fixture, [ http_test_server_fixture.getTestServerRootUri(), "--rps", "999999", "--max-pending-requests", "10", "--h2", "--max-active-requests", "1", "--connections", "1", "--duration", "100", "--termination-predicate", "benchmark.http_2xx:99", "--simple-warmup" ]) - assertCounterEqual(counters, "upstream_rq_pending_total", 1) - assertCounterGreaterEqual(counters, "upstream_rq_pending_overflow", 10) + asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1) + asserts.assertCounterGreaterEqual(counters, "upstream_rq_pending_overflow", 10) def test_http_h2_mini_stress_test_without_client_side_queueing(http_test_server_fixture): - """ - Run a max rps test with the h2 pool against our test server, with no client-side - queueing. - """ - counters = mini_stress_test(http_test_server_fixture, [ + """Run a max rps test with the h2 pool against our test server, with no client-side queueing.""" + counters = _mini_stress_test(http_test_server_fixture, [ http_test_server_fixture.getTestServerRootUri(), "--rps", "999999", "--h2", "--max-active-requests", "1", "--connections", "1", "--duration", "100", "--termination-predicate", "benchmark.http_2xx:99" ]) - assertCounterEqual(counters, "upstream_rq_pending_total", 1) - assertNotIn("upstream_rq_pending_overflow", counters) + asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1) + asserts.assertNotIn("upstream_rq_pending_overflow", counters) -@pytest.mark.skipif(isSanitizerRun(), reason="Unstable and very slow in sanitizer runs") +@pytest.mark.skipif(utility.isSanitizerRun(), reason="Unstable and very slow in sanitizer runs") def test_http_h1_mini_stress_test_open_loop(http_test_server_fixture): - """ - H1 open loop stress test. We expect higher pending and overflow counts - """ - counters = mini_stress_test(http_test_server_fixture, [ + """Run an H1 open loop stress test. We expect higher pending and overflow counts.""" + counters = _mini_stress_test(http_test_server_fixture, [ http_test_server_fixture.getTestServerRootUri(), "--rps", "10000", "--max-pending-requests", "1", "--open-loop", "--max-active-requests", "1", "--connections", "1", "--duration", "100", "--termination-predicate", "benchmark.http_2xx:99", "--simple-warmup" ]) # we expect pool overflows - assertCounterGreater(counters, "benchmark.pool_overflow", 10) + asserts.assertCounterGreater(counters, "benchmark.pool_overflow", 10) -@pytest.mark.skipif(isSanitizerRun(), reason="Unstable and very slow in sanitizer runs") +@pytest.mark.skipif(utility.isSanitizerRun(), reason="Unstable and very slow in sanitizer runs") def test_http_h2_mini_stress_test_open_loop(http_test_server_fixture): - """ - H2 open loop stress test. We expect higher overflow counts - """ - counters = mini_stress_test(http_test_server_fixture, [ + """Run an H2 open loop stress test. We expect higher overflow counts.""" + counters = _mini_stress_test(http_test_server_fixture, [ http_test_server_fixture.getTestServerRootUri(), "--rps", "10000", "--max-pending-requests", "1", "--h2", "--open-loop", "--max-active-requests", "1", "--duration", "100", "--termination-predicate", "benchmark.http_2xx:99", "--simple-warmup" ]) # we expect pool overflows - assertCounterGreater(counters, "benchmark.pool_overflow", 10) + asserts.assertCounterGreater(counters, "benchmark.pool_overflow", 10) def test_http_h2(http_test_server_fixture): - """ + """Test h2 over plain http. + Runs the CLI configured to use h2c against our test server, and sanity checks statistics from both client and server. """ @@ -175,22 +176,19 @@ def test_http_h2(http_test_server_fixture): "100", "--termination-predicate", "benchmark.http_2xx:24", "--rps", "100" ]) counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) - assertCounterEqual(counters, "benchmark.http_2xx", 25) - assertCounterEqual(counters, "upstream_cx_http2_total", 1) - assertCounterGreaterEqual(counters, "upstream_cx_rx_bytes_total", 1030) - assertCounterEqual(counters, "upstream_cx_total", 1) - assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total", 403) - assertCounterEqual(counters, "upstream_rq_pending_total", 1) - assertCounterEqual(counters, "upstream_rq_total", 25) - assertCounterEqual(counters, "default.total_match_count", 1) - assertEqual(len(counters), 12) + asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25) + asserts.assertCounterEqual(counters, "upstream_cx_http2_total", 1) + asserts.assertCounterGreaterEqual(counters, "upstream_cx_rx_bytes_total", 1030) + asserts.assertCounterEqual(counters, "upstream_cx_total", 1) + asserts.assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total", 403) + asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1) + asserts.assertCounterEqual(counters, "upstream_rq_total", 25) + asserts.assertCounterEqual(counters, "default.total_match_count", 1) + asserts.assertEqual(len(counters), 12) def test_http_concurrency(http_test_server_fixture): - """ - Concurrency should act like a multiplier. - """ - + """Test that concurrency acts like a multiplier.""" parsed_json, _ = http_test_server_fixture.runNighthawkClient([ "--concurrency 4 --rps 100 --connections 1", "--duration", "100", "--termination-predicate", "benchmark.http_2xx:24", @@ -200,14 +198,15 @@ def test_http_concurrency(http_test_server_fixture): # Quite a loose expectation, but this may fluctuate depending on server load. # Ideally we'd see 4 workers * 5 rps * 5s = 100 requests total - assertCounterEqual(counters, "benchmark.http_2xx", 100) - assertCounterEqual(counters, "upstream_cx_http1_total", 4) + asserts.assertCounterEqual(counters, "benchmark.http_2xx", 100) + asserts.assertCounterEqual(counters, "upstream_cx_http1_total", 4) @pytest.mark.parametrize('server_config', ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) def test_https_h1(https_test_server_fixture): - """ + """Test h1 over https. + Runs the CLI configured to use HTTP/1 over https against our test server, and sanity checks statistics from both client and server. """ @@ -216,24 +215,25 @@ def test_https_h1(https_test_server_fixture): "--duration", "100", "--termination-predicate", "benchmark.http_2xx:24" ]) counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) - assertCounterEqual(counters, "benchmark.http_2xx", 25) - assertCounterEqual(counters, "upstream_cx_http1_total", 1) - assertCounterEqual(counters, "upstream_cx_rx_bytes_total", 3400) - assertCounterEqual(counters, "upstream_cx_total", 1) - assertCounterEqual(counters, "upstream_cx_tx_bytes_total", - 1400 if https_test_server_fixture.ip_version == IpVersion.IPV6 else 1450) - assertCounterEqual(counters, "upstream_rq_pending_total", 1) - assertCounterEqual(counters, "upstream_rq_total", 25) - assertCounterEqual(counters, "ssl.ciphers.ECDHE-RSA-AES128-GCM-SHA256", 1) - assertCounterEqual(counters, "ssl.curves.X25519", 1) - assertCounterEqual(counters, "ssl.handshake", 1) - assertCounterEqual(counters, "ssl.sigalgs.rsa_pss_rsae_sha256", 1) - assertCounterEqual(counters, "ssl.versions.TLSv1.2", 1) - assertCounterEqual(counters, "default.total_match_count", 1) - assertEqual(len(counters), 17) + asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25) + asserts.assertCounterEqual(counters, "upstream_cx_http1_total", 1) + asserts.assertCounterEqual(counters, "upstream_cx_rx_bytes_total", 3400) + asserts.assertCounterEqual(counters, "upstream_cx_total", 1) + asserts.assertCounterEqual( + counters, "upstream_cx_tx_bytes_total", + 1400 if https_test_server_fixture.ip_version == IpVersion.IPV6 else 1450) + asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1) + asserts.assertCounterEqual(counters, "upstream_rq_total", 25) + asserts.assertCounterEqual(counters, "ssl.ciphers.ECDHE-RSA-AES128-GCM-SHA256", 1) + asserts.assertCounterEqual(counters, "ssl.curves.X25519", 1) + asserts.assertCounterEqual(counters, "ssl.handshake", 1) + asserts.assertCounterEqual(counters, "ssl.sigalgs.rsa_pss_rsae_sha256", 1) + asserts.assertCounterEqual(counters, "ssl.versions.TLSv1.2", 1) + asserts.assertCounterEqual(counters, "default.total_match_count", 1) + asserts.assertEqual(len(counters), 17) server_stats = https_test_server_fixture.getTestServerStatisticsJson() - assertEqual( + asserts.assertEqual( https_test_server_fixture.getServerStatFromJson(server_stats, "http.ingress_http.downstream_rq_2xx"), 25) @@ -241,7 +241,8 @@ def test_https_h1(https_test_server_fixture): @pytest.mark.parametrize('server_config', ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) def test_https_h2(https_test_server_fixture): - """ + """Test http2 over https. + Runs the CLI configured to use HTTP/2 (using https) against our test server, and sanity checks statistics from both client and server. """ @@ -251,30 +252,30 @@ def test_https_h2(https_test_server_fixture): "--termination-predicate", "benchmark.http_2xx:24", "--max-active-requests", "1" ]) counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) - assertCounterEqual(counters, "benchmark.http_2xx", 25) - assertCounterEqual(counters, "upstream_cx_http2_total", 1) + asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25) + asserts.assertCounterEqual(counters, "upstream_cx_http2_total", 1) # Through emperical observation, 1030 has been determined to be the minimum of bytes # we can expect to have received when execution has stopped. - assertCounterGreaterEqual(counters, "upstream_cx_rx_bytes_total", 1030) - assertCounterEqual(counters, "upstream_cx_total", 1) - assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total", 403) - assertCounterEqual(counters, "upstream_rq_pending_total", 1) - assertCounterEqual(counters, "upstream_rq_total", 25) - assertCounterEqual(counters, "ssl.ciphers.ECDHE-RSA-AES128-GCM-SHA256", 1) - assertCounterEqual(counters, "ssl.curves.X25519", 1) - assertCounterEqual(counters, "ssl.handshake", 1) - assertCounterEqual(counters, "ssl.sigalgs.rsa_pss_rsae_sha256", 1) - assertCounterEqual(counters, "ssl.versions.TLSv1.2", 1) - assertCounterEqual(counters, "default.total_match_count", 1) - assertEqual(len(counters), 17) + asserts.assertCounterGreaterEqual(counters, "upstream_cx_rx_bytes_total", 1030) + asserts.assertCounterEqual(counters, "upstream_cx_total", 1) + asserts.assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total", 403) + asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1) + asserts.assertCounterEqual(counters, "upstream_rq_total", 25) + asserts.assertCounterEqual(counters, "ssl.ciphers.ECDHE-RSA-AES128-GCM-SHA256", 1) + asserts.assertCounterEqual(counters, "ssl.curves.X25519", 1) + asserts.assertCounterEqual(counters, "ssl.handshake", 1) + asserts.assertCounterEqual(counters, "ssl.sigalgs.rsa_pss_rsae_sha256", 1) + asserts.assertCounterEqual(counters, "ssl.versions.TLSv1.2", 1) + asserts.assertCounterEqual(counters, "default.total_match_count", 1) + asserts.assertEqual(len(counters), 17) @pytest.mark.parametrize('server_config', ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) def test_https_h2_multiple_connections(https_test_server_fixture): - """ - Test that the experimental h2 pool uses multiple connections. - The burst we send ensures we will need 10 connections right away, as we + """Test that the experimental h2 pool uses multiple connections. + + The burst we send ensures we will need 10 connections right away, as we limit max active streams per connection to 1 by setting the experimental flag to use multiple h2 connections. """ @@ -286,14 +287,14 @@ def test_https_h2_multiple_connections(https_test_server_fixture): "10" ]) counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) - assertCounterGreaterEqual(counters, "benchmark.http_2xx", 100) + asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", 100) # Empirical observation shows we may end up creating more then 10 connections. # This is stock Envoy h/2 pool behavior. - assertCounterGreaterEqual(counters, "upstream_cx_http2_total", 10) + asserts.assertCounterGreaterEqual(counters, "upstream_cx_http2_total", 10) def _do_tls_configuration_test(https_test_server_fixture, cli_parameter, use_h2): - """Runs tests for different ciphers. + """Test with different ciphers. For a given choice of (--tls-context, --transport-socket) x (H1, H2), run a series of traffic tests with different ciphers. @@ -303,13 +304,13 @@ def _do_tls_configuration_test(https_test_server_fixture, cli_parameter, use_h2) cli_parameter: string, --tls-context or --transport-socket use_h2: boolean, whether to pass --h2 """ - if cli_parameter == "--tls-context": json_template = "{common_tls_context:{tls_params:{cipher_suites:[\"-ALL:%s\"]}}}" else: - json_template = ("{name:\"envoy.transport_sockets.tls\",typed_config:{" + - "\"@type\":\"type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext\"," + - "common_tls_context:{tls_params:{cipher_suites:[\"-ALL:%s\"]}}}}") + json_template = "%s%s%s" % ( + "{name:\"envoy.transport_sockets.tls\",typed_config:{", + "\"@type\":\"type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext\",", + "common_tls_context:{tls_params:{cipher_suites:[\"-ALL:%s\"]}}}}") for cipher in [ "ECDHE-RSA-AES128-SHA", @@ -320,51 +321,43 @@ def _do_tls_configuration_test(https_test_server_fixture, cli_parameter, use_h2) https_test_server_fixture.getTestServerRootUri() ]) counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) - assertCounterGreaterEqual(counters, "ssl.ciphers.%s" % cipher, 1) + asserts.assertCounterGreaterEqual(counters, "ssl.ciphers.%s" % cipher, 1) @pytest.mark.parametrize('server_config', ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) def test_https_h1_tls_context_configuration(https_test_server_fixture): - """ - Verifies specifying tls cipher suites works with the h1 pool - """ + """Test that specifying tls cipher suites works with the h1 pool.""" _do_tls_configuration_test(https_test_server_fixture, "--tls-context", use_h2=False) @pytest.mark.parametrize('server_config', ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) def test_https_h1_transport_socket_configuration(https_test_server_fixture): - """ - Verifies specifying tls cipher suites via transport socket works with the h1 pool - """ - + """Test that specifying tls cipher suites via transport socket works with the h1 pool.""" _do_tls_configuration_test(https_test_server_fixture, "--transport-socket", use_h2=False) @pytest.mark.parametrize('server_config', ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) def test_https_h2_tls_context_configuration(https_test_server_fixture): - """ - Verifies specifying tls cipher suites works with the h2 pool - """ + """Test that specifying tls cipher suites works with the h2 pool.""" _do_tls_configuration_test(https_test_server_fixture, "--tls-context", use_h2=True) @pytest.mark.parametrize('server_config', ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) def test_https_h2_transport_socket_configuration(https_test_server_fixture): - """ - Verifies specifying tls cipher suites via transport socket works with the h2 pool - """ + """Test that specifying tls cipher suites via transport socket works with the h2 pool.""" _do_tls_configuration_test(https_test_server_fixture, "--transport-socket", use_h2=True) @pytest.mark.parametrize('server_config', ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) def test_https_prefetching(https_test_server_fixture): - """ - Test we prefetch connections. We test for 1 second at 1 rps, which should + """Test we prefetch connections. + + We test for 1 second at 1 rps, which should result in 1 connection max without prefetching. However, we specify 50 connections and the prefetching flag, so we ought to see 50 http1 connections created. """ @@ -373,14 +366,14 @@ def test_https_prefetching(https_test_server_fixture): https_test_server_fixture.getTestServerRootUri() ]) counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) - assertCounterEqual(counters, "upstream_cx_http1_total", 50) + asserts.assertCounterEqual(counters, "upstream_cx_http1_total", 50) @pytest.mark.parametrize('server_config', ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) def test_https_log_verbosity(https_test_server_fixture): - """ - Test that that the specified log verbosity level is respected. + """Test that the specified log verbosity level is respected. + This tests for a sentinel we know is only right when the level is set to 'trace'. """ @@ -389,53 +382,51 @@ def test_https_log_verbosity(https_test_server_fixture): _, logs = https_test_server_fixture.runNighthawkClient( ["--duration 1", "--rps 1", "-v debug", https_test_server_fixture.getTestServerRootUri()]) - assertNotIn(trace_level_sentinel, logs) + asserts.assertNotIn(trace_level_sentinel, logs) _, logs = https_test_server_fixture.runNighthawkClient( ["--duration 1", "--rps 1", "-v trace", https_test_server_fixture.getTestServerRootUri()]) - assertIn(trace_level_sentinel, logs) + asserts.assertIn(trace_level_sentinel, logs) def test_dotted_output_format(http_test_server_fixture): - """ - Ensure we get the dotted string output format when requested. - and ensure we get latency percentiles. - """ + """Test that we get the dotted string output format when requested, and ensure we get latency percentiles.""" output, _ = http_test_server_fixture.runNighthawkClient([ "--duration 1", "--rps 10", "--output-format dotted", http_test_server_fixture.getTestServerRootUri() ], as_json=False) - assertIn("global.benchmark_http_client.request_to_response.permilles-500.microseconds", output) + asserts.assertIn("global.benchmark_http_client.request_to_response.permilles-500.microseconds", + output) # TODO(oschaaf): add percentiles to the gold testing in the C++ output formatter # once the fortio formatter has landed (https://github.com/envoyproxy/nighthawk/pull/168) def test_cli_output_format(http_test_server_fixture): - """ - Ensure we observe latency percentiles with CLI output. - """ + """Test that we observe latency percentiles with CLI output.""" output, _ = http_test_server_fixture.runNighthawkClient( ["--duration 1", "--rps 10", http_test_server_fixture.getTestServerRootUri()], as_json=False) - assertIn("Initiation to completion", output) - assertIn("Percentile", output) + asserts.assertIn("Initiation to completion", output) + asserts.assertIn("Percentile", output) def test_request_body_gets_transmitted(http_test_server_fixture): - """ - Test that the number of bytes we request for the request body gets reflected in the upstream + """Test request body transmission. + + Ensure that the number of bytes we request for the request body gets reflected in the upstream connection transmitted bytes counter for h1 and h2. """ def check_upload_expectations(fixture, parsed_json, expected_transmitted_bytes, expected_received_bytes): counters = fixture.getNighthawkCounterMapFromJson(parsed_json) - assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total", expected_transmitted_bytes) + asserts.assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total", + expected_transmitted_bytes) server_stats = fixture.getTestServerStatisticsJson() # Server side expectations start failing with larger upload sizes - assertGreaterEqual( + asserts.assertGreaterEqual( fixture.getServerStatFromJson(server_stats, "http.ingress_http.downstream_cx_rx_bytes_total"), expected_received_bytes) @@ -463,8 +454,9 @@ def check_upload_expectations(fixture, parsed_json, expected_transmitted_bytes, def test_http_h1_termination_predicate(http_test_server_fixture): - """ - Put in a termination predicate. Should result in successfull execution, with 10 successfull requests. + """Test with a termination predicate. + + Should result in successfull execution, with 10 successfull requests. We would expect 25 based on rps and duration. """ parsed_json, _ = http_test_server_fixture.runNighthawkClient([ @@ -472,12 +464,13 @@ def test_http_h1_termination_predicate(http_test_server_fixture): "--connections", "1", "--termination-predicate", "benchmark.http_2xx:9" ]) counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) - assertCounterEqual(counters, "benchmark.http_2xx", 10) + asserts.assertCounterEqual(counters, "benchmark.http_2xx", 10) def test_http_h1_failure_predicate(http_test_server_fixture): - """ - Put in a termination predicate. Should result in failing execution, with 10 successfull requests. + """Test with a failure predicate. + + Should result in failing execution, with 10 successfull requests. """ parsed_json, _ = http_test_server_fixture.runNighthawkClient([ http_test_server_fixture.getTestServerRootUri(), "--duration", "5", "--rps", "500", @@ -485,14 +478,11 @@ def test_http_h1_failure_predicate(http_test_server_fixture): ], expect_failure=True) counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) - assertCounterEqual(counters, "benchmark.http_2xx", 1) + asserts.assertCounterEqual(counters, "benchmark.http_2xx", 1) def test_bad_arg_error_messages(http_test_server_fixture): - """ - Test arguments that pass proto validation, but are found to be no good nonetheless, result in reasonable error - messages. - """ + """Test arguments that pass proto validation, but are found to be no good nonetheless, result in reasonable error messages.""" _, err = http_test_server_fixture.runNighthawkClient( [http_test_server_fixture.getTestServerRootUri(), "--termination-predicate ", "a:a"], expect_failure=True, @@ -501,7 +491,8 @@ def test_bad_arg_error_messages(http_test_server_fixture): def test_multiple_backends_http_h1(multi_http_test_server_fixture): - """ + """Test that we can load-test multiple backends on http. + Runs the CLI configured to use plain HTTP/1 against multiple test servers, and sanity checks statistics from both client and server. """ @@ -516,27 +507,28 @@ def test_multiple_backends_http_h1(multi_http_test_server_fixture): parsed_json, stderr = multi_http_test_server_fixture.runNighthawkClient(nighthawk_client_args) counters = multi_http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) - assertCounterEqual(counters, "benchmark.http_2xx", 25) - assertCounterEqual(counters, "upstream_cx_http1_total", 3) - assertCounterGreater(counters, "upstream_cx_rx_bytes_total", 0) - assertCounterEqual(counters, "upstream_cx_total", 3) - assertCounterGreater(counters, "upstream_cx_tx_bytes_total", 0) - assertCounterEqual(counters, "upstream_rq_pending_total", 3) - assertCounterEqual(counters, "upstream_rq_total", 25) - assertCounterEqual(counters, "default.total_match_count", 3) + asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25) + asserts.assertCounterEqual(counters, "upstream_cx_http1_total", 3) + asserts.assertCounterGreater(counters, "upstream_cx_rx_bytes_total", 0) + asserts.assertCounterEqual(counters, "upstream_cx_total", 3) + asserts.assertCounterGreater(counters, "upstream_cx_tx_bytes_total", 0) + asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 3) + asserts.assertCounterEqual(counters, "upstream_rq_total", 25) + asserts.assertCounterEqual(counters, "default.total_match_count", 3) total_2xx = 0 for parsed_server_json in multi_http_test_server_fixture.getAllTestServerStatisticsJsons(): single_2xx = multi_http_test_server_fixture.getServerStatFromJson( parsed_server_json, "http.ingress_http.downstream_rq_2xx") - assertBetweenInclusive(single_2xx, 8, 9) + asserts.assertBetweenInclusive(single_2xx, 8, 9) total_2xx += single_2xx - assertBetweenInclusive(total_2xx, 24, 25) + asserts.assertBetweenInclusive(total_2xx, 24, 25) @pytest.mark.parametrize('server_config', ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) def test_multiple_backends_https_h1(multi_https_test_server_fixture): - """ + """Test that we can load-test multiple backends on https. + Runs the CLI configured to use HTTP/1 with TLS against multiple test servers, and sanity checks statistics from both client and server. """ @@ -551,38 +543,36 @@ def test_multiple_backends_https_h1(multi_https_test_server_fixture): parsed_json, stderr = multi_https_test_server_fixture.runNighthawkClient(nighthawk_client_args) counters = multi_https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) - assertCounterEqual(counters, "benchmark.http_2xx", 25) - assertCounterEqual(counters, "upstream_cx_http1_total", 3) - assertCounterGreater(counters, "upstream_cx_rx_bytes_total", 0) - assertCounterEqual(counters, "upstream_cx_total", 3) - assertCounterGreater(counters, "upstream_cx_tx_bytes_total", 0) - assertCounterEqual(counters, "upstream_rq_pending_total", 3) - assertCounterEqual(counters, "upstream_rq_total", 25) - assertCounterEqual(counters, "default.total_match_count", 3) + asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25) + asserts.assertCounterEqual(counters, "upstream_cx_http1_total", 3) + asserts.assertCounterGreater(counters, "upstream_cx_rx_bytes_total", 0) + asserts.assertCounterEqual(counters, "upstream_cx_total", 3) + asserts.assertCounterGreater(counters, "upstream_cx_tx_bytes_total", 0) + asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 3) + asserts.assertCounterEqual(counters, "upstream_rq_total", 25) + asserts.assertCounterEqual(counters, "default.total_match_count", 3) total_2xx = 0 for parsed_server_json in multi_https_test_server_fixture.getAllTestServerStatisticsJsons(): single_2xx = multi_https_test_server_fixture.getServerStatFromJson( parsed_server_json, "http.ingress_http.downstream_rq_2xx") - assertBetweenInclusive(single_2xx, 8, 9) + asserts.assertBetweenInclusive(single_2xx, 8, 9) total_2xx += single_2xx - assertBetweenInclusive(total_2xx, 24, 25) + asserts.assertBetweenInclusive(total_2xx, 24, 25) @pytest.mark.parametrize('server_config', ["nighthawk/test/integration/configurations/sni_origin.yaml"]) def test_https_h1_sni(https_test_server_fixture): - """ - Tests SNI indication works on https/h1 - """ + """Test that SNI indication works on https/h1.""" # Verify success when we set the right host parsed_json, _ = https_test_server_fixture.runNighthawkClient([ https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100", "--termination-predicate", "benchmark.http_2xx:2", "--request-header", "host: sni.com" ]) counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) - assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1) - assertCounterGreaterEqual(counters, "upstream_cx_http1_total", 1) - assertCounterGreaterEqual(counters, "ssl.handshake", 1) + asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1) + asserts.assertCounterGreaterEqual(counters, "upstream_cx_http1_total", 1) + asserts.assertCounterGreaterEqual(counters, "ssl.handshake", 1) # Verify failure when we set no host (will get plain http) parsed_json, _ = https_test_server_fixture.runNighthawkClient( @@ -597,17 +587,15 @@ def test_https_h1_sni(https_test_server_fixture): expect_failure=False) counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) - assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1) - assertCounterGreaterEqual(counters, "upstream_cx_http1_total", 1) - assertNotIn("ssl.handshake", counters) + asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1) + asserts.assertCounterGreaterEqual(counters, "upstream_cx_http1_total", 1) + asserts.assertNotIn("ssl.handshake", counters) @pytest.mark.parametrize('server_config', ["nighthawk/test/integration/configurations/sni_origin.yaml"]) def test_https_h2_sni(https_test_server_fixture): - """ - Tests SNI indication works on https/h1 - """ + """Tests that SNI indication works on https/h1.""" # Verify success when we set the right host parsed_json, _ = https_test_server_fixture.runNighthawkClient([ https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100", @@ -615,9 +603,9 @@ def test_https_h2_sni(https_test_server_fixture): "--h2" ]) counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) - assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1) - assertCounterGreaterEqual(counters, "upstream_cx_http2_total", 1) - assertCounterEqual(counters, "ssl.handshake", 1) + asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1) + asserts.assertCounterGreaterEqual(counters, "upstream_cx_http2_total", 1) + asserts.assertCounterEqual(counters, "ssl.handshake", 1) # Verify success when we set the right host parsed_json, _ = https_test_server_fixture.runNighthawkClient([ @@ -625,9 +613,9 @@ def test_https_h2_sni(https_test_server_fixture): "--termination-predicate", "benchmark.http_2xx:2", "--request-header", "host: sni.com", "--h2" ]) counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) - assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1) - assertCounterGreaterEqual(counters, "upstream_cx_http2_total", 1) - assertCounterEqual(counters, "ssl.handshake", 1) + asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1) + asserts.assertCounterGreaterEqual(counters, "upstream_cx_http2_total", 1) + asserts.assertCounterEqual(counters, "ssl.handshake", 1) # Verify failure when we set no host (will get plain http) parsed_json, _ = https_test_server_fixture.runNighthawkClient([ @@ -645,23 +633,22 @@ def test_https_h2_sni(https_test_server_fixture): @pytest.fixture(scope="function", params=[1, 25]) def qps_parameterization_fixture(request): + """Yield queries per second values to iterate test parameterization on.""" param = request.param yield param @pytest.fixture(scope="function", params=[1, 3]) def duration_parameterization_fixture(request): + """Yield duration values to iterate test parameterization on.""" param = request.param yield param -@pytest.mark.skipif(isSanitizerRun(), reason="Unstable in sanitizer runs") +@pytest.mark.skipif(utility.isSanitizerRun(), reason="Unstable in sanitizer runs") def test_http_request_release_timing(http_test_server_fixture, qps_parameterization_fixture, duration_parameterization_fixture): - ''' - Verify latency-sample-, query- and reply- counts in various configurations. - ''' - + """Test latency-sample-, query- and reply- counts in various configurations.""" for concurrency in [1, 2]: parsed_json, _ = http_test_server_fixture.runNighthawkClient([ http_test_server_fixture.getTestServerRootUri(), "--duration", @@ -674,13 +661,13 @@ def test_http_request_release_timing(http_test_server_fixture, qps_parameterizat global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson( parsed_json) counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) - assertEqual( + asserts.assertEqual( int(global_histograms["benchmark_http_client.request_to_response"]["count"]), total_requests) - assertEqual( - int(global_histograms["benchmark_http_client.queue_to_connect"]["count"]), total_requests) + asserts.assertEqual(int(global_histograms["benchmark_http_client.queue_to_connect"]["count"]), + total_requests) - assertCounterEqual(counters, "benchmark.http_2xx", (total_requests)) + asserts.assertCounterEqual(counters, "benchmark.http_2xx", (total_requests)) def _send_sigterm(process): @@ -692,9 +679,7 @@ def _send_sigterm(process): def test_cancellation_with_infinite_duration(http_test_server_fixture): - """ - Make sure that we can use signals to cancel execution. - """ + """Test that we can use signals to cancel execution.""" args = [ http_test_server_fixture.nighthawk_client_path, "--concurrency", "2", http_test_server_fixture.getTestServerRootUri(), "--no-duration", "--output-format", "json" @@ -704,24 +689,26 @@ def test_cancellation_with_infinite_duration(http_test_server_fixture): stdout, stderr = client_process.communicate() client_process.wait() output = stdout.decode('utf-8') - assertEqual(client_process.returncode, 0) + asserts.assertEqual(client_process.returncode, 0) parsed_json = json.loads(output) counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) - assertCounterEqual(counters, "graceful_stop_requested", 2) - assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1) + asserts.assertCounterEqual(counters, "graceful_stop_requested", 2) + asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1) def _run_client_with_args(args): - return run_binary_with_args("nighthawk_client", args) + return utility.run_binary_with_args("nighthawk_client", args) def test_client_help(): + """Test that passing --help behaves as expected.""" (exit_code, output) = _run_client_with_args("--help") - assertEqual(exit_code, 0) - assertIn("USAGE", output) + asserts.assertEqual(exit_code, 0) + asserts.assertIn("USAGE", output) def test_client_bad_arg(): + """Test that passing bad arguments behaves as expected.""" (exit_code, output) = _run_client_with_args("127.0.0.1 --foo") - assertEqual(exit_code, 1) - assertIn("PARSE ERROR: Argument: --foo", output) + asserts.assertEqual(exit_code, 1) + asserts.assertIn("PARSE ERROR: Argument: --foo", output) diff --git a/test/integration/test_integration_zipkin.py b/test/integration/test_integration_zipkin.py index 420861507..2b815d2d1 100644 --- a/test/integration/test_integration_zipkin.py +++ b/test/integration/test_integration_zipkin.py @@ -1,14 +1,16 @@ -#!/usr/bin/env python3 +"""Test the Zipkin tracing feature of Nighthawk's load generator.""" + import pytest # server_config needs to be explicitly imported to avoid an error, as http_test_server_fixture # relies on it. from integration_test_fixtures import (http_test_server_fixture, server_config) -from utility import * +from test.integration import asserts def test_tracing_zipkin(http_test_server_fixture): - """ + """Test zipkin tracing. + Test that we send spans when our zipkin tracing feature is enabled. Note there's no actual zipkin server started, so traffic will get (hopefully) get send into the void. @@ -21,6 +23,6 @@ def test_tracing_zipkin(http_test_server_fixture): http_test_server_fixture.getTestServerRootUri() ]) counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) - assertGreaterEqual(counters["benchmark.http_2xx"], 50) - assertGreaterEqual(counters["tracing.zipkin.reports_dropped"], 9) - assertGreaterEqual(counters["tracing.zipkin.spans_sent"], 45) + asserts.assertGreaterEqual(counters["benchmark.http_2xx"], 50) + asserts.assertGreaterEqual(counters["tracing.zipkin.reports_dropped"], 9) + asserts.assertGreaterEqual(counters["tracing.zipkin.spans_sent"], 45) diff --git a/test/integration/test_output_transform.py b/test/integration/test_output_transform.py index 3889fcdb0..bda42ab5b 100644 --- a/test/integration/test_output_transform.py +++ b/test/integration/test_output_transform.py @@ -1,7 +1,9 @@ -#!/usr/bin/env python3 +"""Tests for nighthawk_output_transform.""" + import pytest from test.integration import utility +from test.integration import asserts import os import subprocess @@ -11,23 +13,24 @@ def _run_output_transform_with_args(args): def test_output_transform_help(): + """Test that the output transform binary behaves as expected when --help is passed.""" (exit_code, output) = _run_output_transform_with_args("--help") - utility.assertEqual(exit_code, 0) - utility.assertIn("USAGE", output) + asserts.assertEqual(exit_code, 0) + asserts.assertIn("USAGE", output) def test_output_transform_bad_arguments(): + """Test that the output transform binary behaves as expected when bad arguments are passed.""" (exit_code, output) = _run_output_transform_with_args("--foo") - utility.assertEqual(exit_code, 1) - utility.assertIn("PARSE ERROR: Argument: --foo", output) + asserts.assertEqual(exit_code, 1) + asserts.assertIn("PARSE ERROR: Argument: --foo", output) def test_output_transform_101(): - """ - Runs an arbitrary load test, which outputs to json. + """Run an arbitrary load test, which outputs to json. + This json output is then transformed to human readable output. """ - test_rundir = os.path.join(os.environ["TEST_SRCDIR"], os.environ["TEST_WORKSPACE"]) process = subprocess.run([ os.path.join(test_rundir, "nighthawk_client"), "--duration", "1", "--rps", "1", "127.0.0.1", @@ -39,6 +42,6 @@ def test_output_transform_101(): [os.path.join(test_rundir, "nighthawk_output_transform"), "--output-format", "human"], stdout=subprocess.PIPE, input=output) - utility.assertEqual(process.returncode, 0) - utility.assertIn("Nighthawk - A layer 7 protocol benchmarking tool", + asserts.assertEqual(process.returncode, 0) + asserts.assertIn("Nighthawk - A layer 7 protocol benchmarking tool", process.stdout.decode("utf-8")) diff --git a/test/integration/test_remote_execution.py b/test/integration/test_remote_execution.py index b4937a9c7..33189d4c0 100644 --- a/test/integration/test_remote_execution.py +++ b/test/integration/test_remote_execution.py @@ -1,15 +1,15 @@ -#!/usr/bin/env python3 +"""Tests for the remote execution feature.""" import pytest from test.integration.integration_test_fixtures import (http_test_server_fixture, server_config) -from test.integration.utility import * +from test.integration import asserts def test_remote_execution_basics(http_test_server_fixture): - """ - Verify remote execution via gRPC works as intended. We do that by running - nighthawk_service and configuring nighthawk_client to request execution via that. + """Verify remote execution via gRPC works as intended. + + We do that by running nighthawk_service and configuring nighthawk_client to request execution via that. """ http_test_server_fixture.startNighthawkGrpcService() args = [ @@ -22,12 +22,12 @@ def test_remote_execution_basics(http_test_server_fixture): for i in range(repeats): parsed_json, _ = http_test_server_fixture.runNighthawkClient(args) counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) - assertCounterGreaterEqual(counters, "benchmark.http_2xx", 25) + asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", 25) http_test_server_fixture.grpc_service.stop() # Ensure the gRPC service logs looks right. Specifically these logs ought to have sentinels # indicative of the right number of executions. (Avoids regression of #289). - assertEqual( + asserts.assertEqual( repeats, sum("Starting 1 threads / event loops" in line for line in http_test_server_fixture.grpc_service.log_lines)) @@ -38,10 +38,9 @@ def test_remote_execution_basics(http_test_server_fixture): def test_bad_service_uri(http_test_server_fixture): - """ - Test configuring a bad service uri. - """ + """Test configuring a bad service uri.""" args = [http_test_server_fixture.getTestServerRootUri(), "--nighthawk-service", "a:-1"] - parsed_json, err = http_test_server_fixture.runNighthawkClient( - args, expect_failure=True, as_json=False) + parsed_json, err = http_test_server_fixture.runNighthawkClient(args, + expect_failure=True, + as_json=False) assert "Bad service uri" in err diff --git a/test/integration/utility.py b/test/integration/utility.py index 907ffab6f..45fbd9e95 100644 --- a/test/integration/utility.py +++ b/test/integration/utility.py @@ -1,67 +1,20 @@ +"""Packages utility methods for tests.""" + import os import subprocess -def assertEqual(a, b): - assert a == b - - -def assertGreater(a, b): - assert a > b - - -def assertGreaterEqual(a, b): - assert a >= b - - -def assertLessEqual(a, b): - assert a <= b - - -def assertNotIn(a, b): - assert a not in b - - -def assertIn(a, b): - assert a in b - - -def assertBetweenInclusive(a, min_value, max_value): - assertGreaterEqual(a, min_value) - assertLessEqual(a, max_value) - - -def assertCounterEqual(counters, name, value): - assertIn(name, counters) - assertEqual(counters[name], value) - - -def assertCounterGreater(counters, name, value): - assertIn(name, counters) - assertGreater(counters[name], value) - - -def assertCounterGreaterEqual(counters, name, value): - assertIn(name, counters) - assertGreaterEqual(counters[name], value) - - -def assertCounterLessEqual(counters, name, value): - assertIn(name, counters) - assertLessEqual(counters[name], value) - - -def assertCounterBetweenInclusive(counters, name, min_value, max_value): - assertIn(name, counters) - assertBetweenInclusive(counters[name], min_value, max_value) - - def isSanitizerRun(): + """Determine if the current execution is a tsan/asan/ubsan run. + + Returns: + bool: True iff the current execution is determined to be a sanitizer run. + """ return True if os.environ.get("NH_INTEGRATION_TEST_SANITIZER_RUN", 0) == "1" else False def run_binary_with_args(binary, args): - """Executes a Nighthawk binary with the provided arguments. + """Execute a Nighthawk binary with the provided arguments. Args: binary: A string, the name of the to-be-called binary, e.g. "nighthawk_client". diff --git a/test/run_nighthawk_bazel_coverage.sh b/test/run_nighthawk_bazel_coverage.sh index 8b48f4eec..0512e3b23 100755 --- a/test/run_nighthawk_bazel_coverage.sh +++ b/test/run_nighthawk_bazel_coverage.sh @@ -43,7 +43,7 @@ COVERAGE_VALUE=${COVERAGE_VALUE%?} if [ "$VALIDATE_COVERAGE" == "true" ] then - COVERAGE_THRESHOLD=98.6 + COVERAGE_THRESHOLD=98.4 COVERAGE_FAILED=$(echo "${COVERAGE_VALUE}<${COVERAGE_THRESHOLD}" | bc) if test ${COVERAGE_FAILED} -eq 1; then echo Code coverage ${COVERAGE_VALUE} is lower than limit of ${COVERAGE_THRESHOLD} @@ -52,4 +52,4 @@ then echo Code coverage ${COVERAGE_VALUE} is good and higher than limit of ${COVERAGE_THRESHOLD} fi fi -echo "HTML coverage report is in ${COVERAGE_DIR}/index.html" \ No newline at end of file +echo "HTML coverage report is in ${COVERAGE_DIR}/index.html" diff --git a/test/server/BUILD b/test/server/BUILD index e1cec75f4..23723c3ac 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -19,3 +19,14 @@ envoy_cc_test( "@envoy//test/integration:http_integration_lib", ], ) + +envoy_cc_test( + name = "http_dynamic_delay_filter_integration_test", + srcs = ["http_dynamic_delay_filter_integration_test.cc"], + repository = "@envoy", + deps = [ + "//source/server:http_dynamic_delay_filter_config", + "@envoy//source/common/api:api_lib_with_external_headers", + "@envoy//test/integration:http_integration_lib", + ], +) diff --git a/test/server/http_dynamic_delay_filter_integration_test.cc b/test/server/http_dynamic_delay_filter_integration_test.cc new file mode 100644 index 000000000..c4a38e429 --- /dev/null +++ b/test/server/http_dynamic_delay_filter_integration_test.cc @@ -0,0 +1,184 @@ +#include + +#include "external/envoy/test/integration/http_integration.h" + +#include "api/server/response_options.pb.h" + +#include "server/configuration.h" +#include "server/http_dynamic_delay_filter.h" + +#include "gtest/gtest.h" + +namespace Nighthawk { + +/** + * Support class for testing the dynamic delay filter. We rely on the fault filter for + * inducing the actual delay, so this aims to prove that: + * - The computations are correct. + * - Static/file-based configuration is handled as expected. + * - Request level configuration is handled as expected. + * - Failure modes work. + * - TODO(#393): An end to end test which proves that the interaction between this filter + * and the fault filter work as expected. + */ +class HttpDynamicDelayIntegrationTest + : public Envoy::HttpIntegrationTest, + public testing::TestWithParam { +protected: + HttpDynamicDelayIntegrationTest() + : HttpIntegrationTest(Envoy::Http::CodecClient::Type::HTTP1, GetParam()), + request_headers_({{":method", "GET"}, {":path", "/"}, {":authority", "host"}}), + delay_header_string_(Envoy::Http::LowerCaseString("x-envoy-fault-delay-request")) {} + + // We don't override SetUp(): tests in this file will call setup() instead to avoid having to + // create a fixture per filter configuration. + void setup(const std::string& config) { + config_helper_.addFilter(config); + HttpIntegrationTest::initialize(); + } + + // Fetches a response with request-level configuration set in the request header. + Envoy::IntegrationStreamDecoderPtr getResponse(absl::string_view request_level_config, + bool setup_for_upstream_request = true) { + const Envoy::Http::LowerCaseString key("x-nighthawk-test-server-config"); + Envoy::Http::TestRequestHeaderMapImpl request_headers = request_headers_; + request_headers.setCopy(key, request_level_config); + return getResponse(request_headers, setup_for_upstream_request); + } + + // Fetches a response with the default request headers, expecting the fake upstream to supply + // the response. + Envoy::IntegrationStreamDecoderPtr getResponse() { return getResponse(request_headers_); } + + // Fetches a response using the provided request headers. When setup_for_upstream_request + // is true, the expectation will be that an upstream request will be needed to provide a + // response. If it is set to false, the extension is expected to supply the response, and + // no upstream request ought to occur. + Envoy::IntegrationStreamDecoderPtr + getResponse(const Envoy::Http::TestRequestHeaderMapImpl& request_headers, + bool setup_for_upstream_request = true) { + cleanupUpstreamAndDownstream(); + codec_client_ = makeHttpConnection(lookupPort("http")); + Envoy::IntegrationStreamDecoderPtr response; + if (setup_for_upstream_request) { + response = sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0); + } else { + response = codec_client_->makeHeaderOnlyRequest(request_headers); + response->waitForEndStream(); + } + return response; + } + + const Envoy::Http::TestRequestHeaderMapImpl request_headers_; + const Envoy::Http::LowerCaseString delay_header_string_; +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, HttpDynamicDelayIntegrationTest, + testing::ValuesIn(Envoy::TestEnvironment::getIpVersionsForTest())); + +// Verify expectations with an empty dynamic-delay configuration. +TEST_P(HttpDynamicDelayIntegrationTest, NoStaticConfiguration) { + setup(R"( +name: dynamic-delay +typed_config: + "@type": type.googleapis.com/nighthawk.server.ResponseOptions +)"); + // Don't send any config request header + getResponse(); + EXPECT_EQ(upstream_request_->headers().get(delay_header_string_), nullptr); + // Send a config request header with an empty / default config. Should be a no-op. + getResponse("{}"); + EXPECT_EQ(upstream_request_->headers().get(delay_header_string_), nullptr); + // Send a config request header, this should become effective. + getResponse("{static_delay: \"1.6s\"}"); + EXPECT_EQ(upstream_request_->headers().get(delay_header_string_)->value().getStringView(), + "1600"); + + // Send a malformed config request header. This ought to shortcut and directly reply, + // hence we don't expect an upstream request. + auto response = getResponse("bad_json", false); + EXPECT_EQ(Envoy::Http::Utility::getResponseStatus(response->headers()), 500); + EXPECT_EQ( + response->body(), + "dynamic-delay didn't understand the request: Error merging json config: Unable to parse " + "JSON as proto (INVALID_ARGUMENT:Unexpected token.\nbad_json\n^): bad_json"); + // Send an empty config header, which ought to trigger failure mode as well. + response = getResponse("", false); + EXPECT_EQ(Envoy::Http::Utility::getResponseStatus(response->headers()), 500); + EXPECT_EQ( + response->body(), + "dynamic-delay didn't understand the request: Error merging json config: Unable to " + "parse JSON as proto (INVALID_ARGUMENT:Unexpected end of string. Expected a value.\n\n^): "); +} + +// Verify expectations with static/file-based static_delay configuration. +TEST_P(HttpDynamicDelayIntegrationTest, StaticConfigurationStaticDelay) { + setup(R"EOF( +name: dynamic-delay +typed_config: + "@type": type.googleapis.com/nighthawk.server.ResponseOptions + static_delay: 1.33s +)EOF"); + getResponse(); + EXPECT_EQ(upstream_request_->headers().get(delay_header_string_)->value().getStringView(), + "1330"); + getResponse("{}"); + EXPECT_EQ(upstream_request_->headers().get(delay_header_string_)->value().getStringView(), + "1330"); + getResponse("{static_delay: \"0.2s\"}"); + // TODO(#392): This fails, because the duration is a two-field message: it would make here to see + // both the number of seconds and nanoseconds to be overridden. + // However, the seconds part is set to '0', which equates to the default of the underlying int + // type, and the fact that we are using proto3, which doesn't merge default values. + // Hence the following expectation will fail, as it yields 1200 instead of the expected 200. + // EXPECT_EQ(upstream_request_->headers().get(delay_header_string_)->value().getStringView(), + // "200"); + getResponse("{static_delay: \"2.2s\"}"); + EXPECT_EQ(upstream_request_->headers().get(delay_header_string_)->value().getStringView(), + "2200"); +} + +// Verify expectations with static/file-based concurrency_based_linear_delay configuration. +TEST_P(HttpDynamicDelayIntegrationTest, StaticConfigurationConcurrentDelay) { + setup(R"EOF( +name: dynamic-delay +typed_config: + "@type": type.googleapis.com/nighthawk.server.ResponseOptions + concurrency_based_linear_delay: + minimal_delay: 0.05s + concurrency_delay_factor: 0.01s +)EOF"); + getResponse(); + EXPECT_EQ(upstream_request_->headers().get(delay_header_string_)->value().getStringView(), "60"); +} + +class ComputeTest : public testing::Test { +public: + int64_t compute(uint64_t concurrency, uint64_t minimal_delay_seconds, + uint64_t minimal_delay_nanos, uint64_t delay_factor_seconds, + uint64_t delay_factor_nanos) { + Envoy::ProtobufWkt::Duration minimal_delay; + Envoy::ProtobufWkt::Duration delay_factor; + minimal_delay.set_seconds(minimal_delay_seconds); + minimal_delay.set_nanos(minimal_delay_nanos); + delay_factor.set_seconds(delay_factor_seconds); + delay_factor.set_nanos(delay_factor_nanos); + return Server::HttpDynamicDelayDecoderFilter::computeConcurrencyBasedLinearDelayMs( + concurrency, minimal_delay, delay_factor); + } +}; + +// Test that the delay looks as expected with various parameterizations. +TEST_F(ComputeTest, ComputeConcurrencyBasedLinearDelayMs) { + EXPECT_EQ(compute(1, 1, 0, 0, 0), 1000); + EXPECT_EQ(compute(2, 1, 0, 0, 0), 1000); + EXPECT_EQ(compute(1, 2, 0, 0, 0), 2000); + EXPECT_EQ(compute(2, 2, 0, 0, 0), 2000); + EXPECT_EQ(compute(1, 0, 500000, 0, 500000), 1); + EXPECT_EQ(compute(2, 0, 500000, 0, 500000), 2); + EXPECT_EQ(compute(3, 0, 500000, 0, 500000), 2); + EXPECT_EQ(compute(4, 0, 500000, 0, 500000), 3); + EXPECT_EQ(compute(4, 1, 500000, 1, 500000), 5003); +} + +} // namespace Nighthawk diff --git a/test/server/http_test_server_filter_integration_test.cc b/test/server/http_test_server_filter_integration_test.cc index 1946f3abc..702b77c3c 100644 --- a/test/server/http_test_server_filter_integration_test.cc +++ b/test/server/http_test_server_filter_integration_test.cc @@ -7,7 +7,9 @@ #include "api/server/response_options.pb.h" #include "api/server/response_options.pb.validate.h" +#include "server/configuration.h" #include "server/http_test_server_filter.h" +#include "server/well_known_headers.h" #include "gtest/gtest.h" @@ -310,7 +312,7 @@ TEST_F(HttpTestServerDecoderFilterTest, HeaderMerge) { Server::HttpTestServerDecoderFilterConfigSharedPtr config = std::make_shared(initial_options); Server::HttpTestServerDecoderFilter f(config); - absl::optional error_message; + std::string error_message; nighthawk::server::ResponseOptions options = config->server_config(); EXPECT_EQ(1, options.response_headers_size()); @@ -320,40 +322,40 @@ TEST_F(HttpTestServerDecoderFilterTest, HeaderMerge) { EXPECT_EQ(false, options.response_headers(0).append().value()); Envoy::Http::TestResponseHeaderMapImpl header_map{{":status", "200"}, {"foo", "bar"}}; - f.applyConfigToResponseHeaders(header_map, options); + Server::Configuration::applyConfigToResponseHeaders(header_map, options); EXPECT_TRUE(Envoy::TestUtility::headerMapEqualIgnoreOrder( header_map, Envoy::Http::TestResponseHeaderMapImpl{{":status", "200"}, {"foo", "bar1"}})); - EXPECT_TRUE(f.mergeJsonConfig( + EXPECT_TRUE(Server::Configuration::mergeJsonConfig( R"({response_headers: [ { header: { key: "foo", value: "bar2"}, append: false } ]})", options, error_message)); - EXPECT_EQ(absl::nullopt, error_message); + EXPECT_EQ("", error_message); EXPECT_EQ(2, options.response_headers_size()); EXPECT_EQ("foo", options.response_headers(1).header().key()); EXPECT_EQ("bar2", options.response_headers(1).header().value()); EXPECT_EQ(false, options.response_headers(1).append().value()); - f.applyConfigToResponseHeaders(header_map, options); + Server::Configuration::applyConfigToResponseHeaders(header_map, options); EXPECT_TRUE(Envoy::TestUtility::headerMapEqualIgnoreOrder( header_map, Envoy::Http::TestRequestHeaderMapImpl{{":status", "200"}, {"foo", "bar2"}})); - EXPECT_TRUE(f.mergeJsonConfig( + EXPECT_TRUE(Server::Configuration::mergeJsonConfig( R"({response_headers: [ { header: { key: "foo2", value: "bar3"}, append: true } ]})", options, error_message)); - EXPECT_EQ(absl::nullopt, error_message); + EXPECT_EQ("", error_message); EXPECT_EQ(3, options.response_headers_size()); EXPECT_EQ("foo2", options.response_headers(2).header().key()); EXPECT_EQ("bar3", options.response_headers(2).header().value()); EXPECT_EQ(true, options.response_headers(2).append().value()); - f.applyConfigToResponseHeaders(header_map, options); + Server::Configuration::applyConfigToResponseHeaders(header_map, options); EXPECT_TRUE(Envoy::TestUtility::headerMapEqualIgnoreOrder( header_map, Envoy::Http::TestResponseHeaderMapImpl{ {":status", "200"}, {"foo", "bar2"}, {"foo2", "bar3"}})); - EXPECT_FALSE(f.mergeJsonConfig(kBadJson, options, error_message)); + EXPECT_FALSE(Server::Configuration::mergeJsonConfig(kBadJson, options, error_message)); EXPECT_EQ("Error merging json config: Unable to parse JSON as proto (INVALID_ARGUMENT:Unexpected " "token.\nbad_json\n^): bad_json", error_message); diff --git a/test/statistic_test.cc b/test/statistic_test.cc index c73e1d6b8..b72d76043 100644 --- a/test/statistic_test.cc +++ b/test/statistic_test.cc @@ -2,10 +2,12 @@ #include #include +#include #include // std::bad_cast #include "external/envoy/source/common/protobuf/utility.h" #include "external/envoy/source/common/stats/isolated_store_impl.h" +#include "external/envoy/test/mocks/stats/mocks.h" #include "external/envoy/test/test_common/file_system_for_test.h" #include "external/envoy/test/test_common/utility.h" @@ -20,7 +22,8 @@ using namespace testing; namespace Nighthawk { -using MyTypes = Types; +using MyTypes = Types; template class TypedStatisticTest : public Test {}; @@ -116,15 +119,15 @@ TYPED_TEST(TypedStatisticTest, SingleAndDoubleValue) { a.addValue(1); EXPECT_EQ(1, a.count()); - EXPECT_DOUBLE_EQ(1, a.mean()); + Helper::expectNear(1.0, a.mean(), a.significantDigits()); EXPECT_DOUBLE_EQ(0, a.pvariance()); EXPECT_DOUBLE_EQ(0, a.pstdev()); a.addValue(2); EXPECT_EQ(2, a.count()); - EXPECT_DOUBLE_EQ(1.5, a.mean()); - EXPECT_DOUBLE_EQ(0.25, a.pvariance()); - EXPECT_DOUBLE_EQ(0.5, a.pstdev()); + Helper::expectNear(1.5, a.mean(), a.significantDigits()); + Helper::expectNear(0.25, a.pvariance(), a.significantDigits()); + Helper::expectNear(0.5, a.pstdev(), a.significantDigits()); } TYPED_TEST(TypedStatisticTest, CatastrophicalCancellation) { @@ -220,6 +223,15 @@ TYPED_TEST(TypedStatisticTest, StringOutput) { } } +TYPED_TEST(TypedStatisticTest, IdFieldWorks) { + TypeParam statistic; + std::string id = "fooid"; + + EXPECT_EQ("", statistic.id()); + statistic.setId(id); + EXPECT_EQ(id, statistic.id()); +} + class StatisticTest : public Test {}; // Note that we explicitly subject SimpleStatistic to the large @@ -269,6 +281,20 @@ TEST(StatisticTest, StreamingStatProtoOutputLargeValues) { EXPECT_EQ(proto.pstdev().nanos(), 0); } +TEST(StatisticTest, CircllhistStatisticProtoOutputLargeValues) { + CircllhistStatistic statistic; + uint64_t value = 100ul + 0xFFFFFFFF; + statistic.addValue(value); + statistic.addValue(value); + const nighthawk::client::Statistic proto = + statistic.toProto(Statistic::SerializationDomain::DURATION); + + EXPECT_EQ(proto.count(), 2); + Helper::expectNear(Envoy::Protobuf::util::TimeUtil::DurationToNanoseconds(proto.mean()), value, + statistic.significantDigits()); + EXPECT_EQ(Envoy::Protobuf::util::TimeUtil::DurationToNanoseconds(proto.pstdev()), 0); +} + TEST(StatisticTest, HdrStatisticPercentilesProto) { nighthawk::client::Statistic parsed_json_proto; HdrStatistic statistic; @@ -281,33 +307,51 @@ TEST(StatisticTest, HdrStatisticPercentilesProto) { util.loadFromJson(Envoy::Filesystem::fileSystemForTest().fileReadToEnd( TestEnvironment::runfilesPath("test/test_data/hdr_proto_json.gold")), parsed_json_proto, Envoy::ProtobufMessage::getStrictValidationVisitor()); - // Instead of comparing proto's, we perform a string-based comparison, because that emits a - // helpful diff when this fails. const std::string json = util.getJsonStringFromMessage( statistic.toProto(Statistic::SerializationDomain::DURATION), true, true); const std::string golden_json = util.getJsonStringFromMessage(parsed_json_proto, true, true); - EXPECT_EQ(json, golden_json); + EXPECT_THAT(statistic.toProto(Statistic::SerializationDomain::DURATION), + Envoy::ProtoEq(parsed_json_proto)) + << json << "\n" + << "is not equal to golden file:\n" + << golden_json; +} + +TEST(StatisticTest, CircllhistStatisticPercentilesProto) { + nighthawk::client::Statistic parsed_json_proto; + CircllhistStatistic statistic; + + for (int i = 1; i <= 10; i++) { + statistic.addValue(i); + } + + Envoy::MessageUtil util; + util.loadFromJson(Envoy::Filesystem::fileSystemForTest().fileReadToEnd( + TestEnvironment::runfilesPath("test/test_data/circllhist_proto_json.gold")), + parsed_json_proto, Envoy::ProtobufMessage::getStrictValidationVisitor()); + const std::string json = util.getJsonStringFromMessage( + statistic.toProto(Statistic::SerializationDomain::DURATION), true, true); + const std::string golden_json = util.getJsonStringFromMessage(parsed_json_proto, true, true); + EXPECT_THAT(statistic.toProto(Statistic::SerializationDomain::DURATION), + Envoy::ProtoEq(parsed_json_proto)) + << json << "\n" + << "is not equal to golden file:\n" + << golden_json; } TEST(StatisticTest, CombineAcrossTypesFails) { HdrStatistic a; InMemoryStatistic b; StreamingStatistic c; + CircllhistStatistic d; EXPECT_THROW(a.combine(b), std::bad_cast); EXPECT_THROW(a.combine(c), std::bad_cast); EXPECT_THROW(b.combine(a), std::bad_cast); EXPECT_THROW(b.combine(c), std::bad_cast); EXPECT_THROW(c.combine(a), std::bad_cast); EXPECT_THROW(c.combine(b), std::bad_cast); -} - -TEST(StatisticTest, IdFieldWorks) { - StreamingStatistic c; - std::string id = "fooid"; - - EXPECT_EQ("", c.id()); - c.setId(id); - EXPECT_EQ(id, c.id()); + EXPECT_THROW(c.combine(d), std::bad_cast); + EXPECT_THROW(d.combine(a), std::bad_cast); } TEST(StatisticTest, HdrStatisticOutOfRange) { @@ -319,13 +363,73 @@ TEST(StatisticTest, HdrStatisticOutOfRange) { TEST(StatisticTest, NullStatistic) { NullStatistic stat; EXPECT_EQ(0, stat.count()); + std::string id = "fooid"; + stat.setId(id); + EXPECT_EQ(id, stat.id()); stat.addValue(1); + EXPECT_EQ(0, stat.count()); + EXPECT_EQ(0, stat.max()); + EXPECT_EQ(UINT64_MAX, stat.min()); EXPECT_EQ(0, stat.mean()); EXPECT_EQ(0, stat.pvariance()); EXPECT_EQ(0, stat.pstdev()); EXPECT_NE(nullptr, stat.combine(stat)); EXPECT_EQ(0, stat.significantDigits()); EXPECT_NE(nullptr, stat.createNewInstanceOfSameType()); + const nighthawk::client::Statistic proto = stat.toProto(Statistic::SerializationDomain::RAW); + EXPECT_EQ(id, proto.id()); + EXPECT_EQ(0, proto.count()); + EXPECT_EQ(0, proto.raw_mean()); + EXPECT_EQ(0, proto.raw_pstdev()); + EXPECT_EQ(0, proto.raw_max()); + EXPECT_EQ(UINT64_MAX, proto.raw_min()); +} + +using SinkableTypes = Types; + +template class SinkableStatisticTest : public Test {}; + +TYPED_TEST_SUITE(SinkableStatisticTest, SinkableTypes); + +TYPED_TEST(SinkableStatisticTest, EmptySinkableStatistic) { + Envoy::Stats::MockIsolatedStatsStore mock_store; + TypeParam stat(mock_store); + EXPECT_EQ(0, stat.count()); + EXPECT_TRUE(std::isnan(stat.mean())); + EXPECT_TRUE(std::isnan(stat.pvariance())); + EXPECT_TRUE(std::isnan(stat.pstdev())); + EXPECT_EQ(stat.min(), UINT64_MAX); + EXPECT_EQ(stat.max(), 0); + EXPECT_EQ(Envoy::Stats::Histogram::Unit::Unspecified, stat.unit()); + EXPECT_FALSE(stat.used()); + EXPECT_EQ("", stat.name()); + EXPECT_DEATH(stat.tagExtractedName(), ".*"); + EXPECT_EQ(absl::nullopt, stat.worker_id()); +} + +TYPED_TEST(SinkableStatisticTest, SimpleSinkableStatistic) { + Envoy::Stats::MockIsolatedStatsStore mock_store; + const int worker_id = 0; + TypeParam stat(mock_store, worker_id); + const uint64_t sample_value = 123; + const std::string stat_name = "stat_name"; + + EXPECT_CALL(mock_store, deliverHistogramToSinks(_, sample_value)).Times(1); + stat.recordValue(sample_value); + stat.setId(stat_name); + + EXPECT_EQ(1, stat.count()); + Helper::expectNear(123.0, stat.mean(), stat.significantDigits()); + EXPECT_DOUBLE_EQ(0, stat.pvariance()); + EXPECT_DOUBLE_EQ(0, stat.pstdev()); + EXPECT_EQ(123, stat.min()); + EXPECT_EQ(123, stat.max()); + EXPECT_EQ(Envoy::Stats::Histogram::Unit::Unspecified, stat.unit()); + EXPECT_TRUE(stat.used()); + EXPECT_EQ(stat_name, stat.name()); + EXPECT_DEATH(stat.tagExtractedName(), ".*"); + EXPECT_TRUE(stat.worker_id().has_value()); + EXPECT_EQ(worker_id, stat.worker_id().value()); } } // namespace Nighthawk diff --git a/test/stream_decoder_test.cc b/test/stream_decoder_test.cc index 93cb5333f..a18cabc10 100644 --- a/test/stream_decoder_test.cc +++ b/test/stream_decoder_test.cc @@ -1,9 +1,9 @@ #include +#include "external/envoy/source/common/common/random_generator.h" #include "external/envoy/source/common/event/dispatcher_impl.h" #include "external/envoy/source/common/http/header_map_impl.h" #include "external/envoy/source/common/network/utility.h" -#include "external/envoy/source/common/runtime/runtime_impl.h" #include "external/envoy/source/common/stats/isolated_store_impl.h" #include "external/envoy/test/mocks/http/mocks.h" #include "external/envoy/test/mocks/stream_info/mocks.h" @@ -49,7 +49,7 @@ class StreamDecoderTest : public Test, public StreamDecoderCompletionCallback { HeaderMapPtr request_headers_; uint64_t stream_decoder_completion_callbacks_{0}; uint64_t pool_failures_{0}; - Envoy::Runtime::RandomGeneratorImpl random_generator_; + Envoy::Random::RandomGeneratorImpl random_generator_; Envoy::Tracing::HttpTracerSharedPtr http_tracer_; Envoy::Http::ResponseHeaderMapPtr test_header_; Envoy::Http::ResponseTrailerMapPtr test_trailer_; diff --git a/test/test_data/circllhist_proto_json.gold b/test/test_data/circllhist_proto_json.gold new file mode 100644 index 000000000..4262b4fd2 --- /dev/null +++ b/test/test_data/circllhist_proto_json.gold @@ -0,0 +1,130 @@ +{ + "count": "10", + "id": "", + "percentiles": [ + { + "percentile": 0, + "count": "0", + "duration": "0.000000001s" + }, + { + "percentile": 0.1, + "count": "1", + "duration": "0.000000001s" + }, + { + "percentile": 0.2, + "count": "2", + "duration": "0.000000002s" + }, + { + "percentile": 0.3, + "count": "3", + "duration": "0.000000003s" + }, + { + "percentile": 0.4, + "count": "4", + "duration": "0.000000004s" + }, + { + "percentile": 0.5, + "count": "5", + "duration": "0.000000005s" + }, + { + "percentile": 0.55, + "count": "5", + "duration": "0.000000006s" + }, + { + "percentile": 0.6, + "count": "6", + "duration": "0.000000006s" + }, + { + "percentile": 0.65, + "count": "6", + "duration": "0.000000007s" + }, + { + "percentile": 0.7, + "count": "7", + "duration": "0.000000007s" + }, + { + "percentile": 0.75, + "count": "7", + "duration": "0.000000008s" + }, + { + "percentile": 0.775, + "count": "7", + "duration": "0.000000008s" + }, + { + "percentile": 0.8, + "count": "8", + "duration": "0.000000008s" + }, + { + "percentile": 0.825, + "count": "8", + "duration": "0.000000009s" + }, + { + "percentile": 0.85, + "count": "8", + "duration": "0.000000009s" + }, + { + "percentile": 0.875, + "count": "8", + "duration": "0.000000009s" + }, + { + "percentile": 0.9, + "count": "9", + "duration": "0.000000009s" + }, + { + "percentile": 0.925, + "count": "9", + "duration": "0.000000010s" + }, + { + "percentile": 0.95, + "count": "9", + "duration": "0.000000010s" + }, + { + "percentile": 0.975, + "count": "9", + "duration": "0.000000010s" + }, + { + "percentile": 0.99, + "count": "9", + "duration": "0.000000010s" + }, + { + "percentile": 0.995, + "count": "9", + "duration": "0.000000010s" + }, + { + "percentile": 0.999, + "count": "9", + "duration": "0.000000010s" + }, + { + "percentile": 1, + "count": "10", + "duration": "0.000000011s" + } + ], + "mean": "0.000000006s", + "pstdev": "0.000000003s", + "min": "0.000000001s", + "max": "0.000000010s" +} diff --git a/test/worker_test.cc b/test/worker_test.cc index 30263f0af..c30a8b323 100644 --- a/test/worker_test.cc +++ b/test/worker_test.cc @@ -1,5 +1,6 @@ #include +#include "external/envoy/source/common/common/random_generator.h" #include "external/envoy/source/common/runtime/runtime_impl.h" #include "external/envoy/source/common/stats/isolated_store_impl.h" #include "external/envoy/test/mocks/local_info/mocks.h" @@ -36,7 +37,7 @@ class WorkerTest : public Test { Envoy::Api::ApiPtr api_; Envoy::ThreadLocal::MockInstance tls_; Envoy::Stats::IsolatedStoreImpl test_store_; - Envoy::Runtime::RandomGeneratorImpl rand_; + Envoy::Random::RandomGeneratorImpl rand_; NiceMock local_info_; NiceMock validation_visitor_; }; diff --git a/tools/check_envoy_includes.py b/tools/check_envoy_includes.py index a121485c4..c2ffdf90c 100755 --- a/tools/check_envoy_includes.py +++ b/tools/check_envoy_includes.py @@ -1,20 +1,21 @@ #!/usr/bin/env python3 +"""Ensure that includes are referenced via external/envoy.""" -# Ensure that includes are referenced via external/envoy - -import os, re, sys +import os +import re +import sys import subprocess from pathlib import Path -def get_inspection_targets_from_dir(dir): +def _get_inspection_targets_from_dir(dir): result = [] result.extend(Path(dir).rglob("*.cc")) result.extend(Path(dir).rglob("*.h")) return result -def inspect_line(bazel_output_base, file_path, line): +def _inspect_line(bazel_output_base, file_path, line): match = re.findall(r'#include "([^"]*)"', line) if len(match) == 1: path = match[0] @@ -32,20 +33,23 @@ def inspect_line(bazel_output_base, file_path, line): alternative_found = os.path.isfile(potential_envoy_path) if alternative_found: + # TODO(#399): remove after extension includes are available at the proper location. + if "extensions/filters/http/fault/fault_filter.h" == path: + return True sys.stderr.writelines("Bad include in file " + str(file_path) + ": " + path) - sys.stderr.writelines(" (Possible fixed path: " + + sys.stderr.writelines(" (Possible fixed path: %s" % potential_envoy_path[len(bazel_output_base) + 1:] + ")\n") return False return True -def inspect_file(bazel_output_base, file_path): +def _inspect_file(bazel_output_base, file_path): offending_lines = [] with open(str(file_path), encoding='utf-8') as f: lines = f.readlines() offending_lines.extend( - l for l in lines if not inspect_line(bazel_output_base, file_path, l.strip())) + line for line in lines if not _inspect_line(bazel_output_base, file_path, line.strip())) return offending_lines @@ -53,6 +57,6 @@ def inspect_file(bazel_output_base, file_path): bazel_output_base = subprocess.run(['bazel', 'info', 'output_base'], stdout=subprocess.PIPE).stdout.decode('utf-8').strip() workspace_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) - targets = get_inspection_targets_from_dir(workspace_root) - status_list = list(map(lambda x: inspect_file(bazel_output_base, x), targets)) + targets = _get_inspection_targets_from_dir(workspace_root) + status_list = list(map(lambda x: _inspect_file(bazel_output_base, x), targets)) sys.exit(-1 if any(status_list) else 0) diff --git a/tools/format_python_tools.py b/tools/format_python_tools.py index e81cb11a6..6f40c7501 100755 --- a/tools/format_python_tools.py +++ b/tools/format_python_tools.py @@ -37,8 +37,10 @@ def validateFormat(fix=False): failed_update_files = set() successful_update_files = set() for python_file in collectFiles(): - reformatted_source, encoding, changed = FormatFile( - python_file, style_config='.style.yapf', in_place=fix, print_diff=not fix) + reformatted_source, encoding, changed = FormatFile(python_file, + style_config='.style.yapf', + in_place=fix, + print_diff=not fix) if not fix: fixes_required = True if changed else fixes_required if reformatted_source: @@ -64,8 +66,10 @@ def displayFixResults(successful_files, failed_files): if __name__ == '__main__': parser = argparse.ArgumentParser(description='Tool to format python files.') - parser.add_argument( - 'action', choices=['check', 'fix'], default='check', help='Fix invalid syntax in files.') + parser.add_argument('action', + choices=['check', 'fix'], + default='check', + help='Fix invalid syntax in files.') args = parser.parse_args() is_valid = validateFormat(args.action == 'fix') sys.exit(0 if is_valid else 1) diff --git a/tools/format_python_tools.sh b/tools/format_python_tools.sh index 337a178cf..301d8653d 100755 --- a/tools/format_python_tools.sh +++ b/tools/format_python_tools.sh @@ -15,11 +15,23 @@ echo "Running Python format check..." python format_python_tools.py $1 echo "Running Python3 flake8 check..." -EXCLUDE="--exclude=../benchmarks/tmp/*,*/venv/*" -flake8 . ${EXCLUDE} --count --select=E901,E999,F821,F822,F823 --show-source --statistics -# We raise the bar higher for benchmarks/ overall, but especially when it comes to docstrings. -# Check everything, except indentation and line length for now. -# Also, we ignore unused imports and redefinitions of unused, as those seems to raise false flags in test definitions. -flake8 ../benchmarks/ ${EXCLUDE} --docstring-convention pep257 --ignore=E114,E111,E501,F401,F811 --count --show-source --statistics -# Additional docstring checking based on Google's convention. -flake8 ../benchmarks/ ${EXCLUDE} --docstring-convention google --select=D --count --show-source --statistics +cd .. +EXCLUDE="--exclude=benchmarks/tmp/*,*/venv/*,tools/format_python_tools.py,tools/gen_compilation_database.py,bazel-*" + + +# Because of conflict with the automatic fix format script, we ignore: +# E111 Indentation is not a multiple of four +# E114 Indentation is not a multiple of four (comment) +# E501 Line too long (82 > 79 characters) +# E124 Closing bracket does not match visual indentation +# E125 Continuation line with same indent as next logical line +# E126 Continuation line over-indented for hanging indent + +# We ignore false positives because of what look like pytest peculiarities +# F401 Module imported but unused +# F811 Redefinition of unused name from line n +flake8 . ${EXCLUDE} --ignore=E114,E111,E501,F401,F811,E124,E125,E126,D --count --show-source --statistics +# D = Doc comment related checks (We check both p257 AND google conventions). +flake8 . ${EXCLUDE} --docstring-convention pep257 --select=D --count --show-source --statistics +flake8 . ${EXCLUDE} --docstring-convention google --select=D --count --show-source --statistics + diff --git a/tools/gen_compilation_database.py b/tools/gen_compilation_database.py index feba85957..73ec0ba57 100755 --- a/tools/gen_compilation_database.py +++ b/tools/gen_compilation_database.py @@ -39,8 +39,8 @@ def generateCompilationDatabase(args): compdb = [] for compdb_file in Path(execroot).glob("**/*.compile_commands.json"): - compdb.extend( - json.loads("[" + compdb_file.read_text().replace("__EXEC_ROOT__", execroot) + "]")) + compdb.extend(json.loads("[" + compdb_file.read_text().replace("__EXEC_ROOT__", execroot) + + "]")) return compdb @@ -103,12 +103,11 @@ def fixCompilationDatabase(args, db): parser.add_argument('--include_headers', action='store_true') parser.add_argument('--vscode', action='store_true') # @@@ - parser.add_argument( - 'bazel_targets', - nargs='*', - default=[ - "//source/...", "//test:*", "//test/integration/...", "//test/client/...", - "//test/server/...", "//tools/..." - ]) + parser.add_argument('bazel_targets', + nargs='*', + default=[ + "//source/...", "//test:*", "//test/integration/...", "//test/client/...", + "//test/server/...", "//tools/..." + ]) args = parser.parse_args() fixCompilationDatabase(args, generateCompilationDatabase(args)) diff --git a/tools/requirements.txt b/tools/requirements.txt index c053a08de..f1d27c321 100644 --- a/tools/requirements.txt +++ b/tools/requirements.txt @@ -1,3 +1,3 @@ -flake8==3.6.0 -yapf==0.25.0 +flake8==3.8.3 +yapf==0.30.0 flake8-docstrings==1.5.0 diff --git a/tools/update_cli_readme_documentation.py b/tools/update_cli_readme_documentation.py index 4400e4440..02f460ed9 100644 --- a/tools/update_cli_readme_documentation.py +++ b/tools/update_cli_readme_documentation.py @@ -1,6 +1,5 @@ #!/usr/bin/env python3 - -# Tool to automatically update README.md files that contain --help output of c++ TCLAP binaries +"""Tool to automatically update README.md files that contain --help output of c++ TCLAP binaries.""" import logging import argparse @@ -20,12 +19,11 @@ "--readme", required=True, help="Relative path to the target documentation file, for example: \"README.md\"") - parser.add_argument( - "--mode", - default="check", - required=True, - choices={"check", "fix"}, - help="Either \"check\" or \"fix\"") + parser.add_argument("--mode", + default="check", + required=True, + choices={"check", "fix"}, + help="Either \"check\" or \"fix\"") args = parser.parse_args() logging.getLogger().setLevel(logging.INFO) @@ -42,20 +40,20 @@ target_path = pathlib.Path(readme_md_path) with target_path.open("r", encoding="utf-8") as f: original_contents = target_path.read_text(encoding="utf-8") - replaced = re.sub("\nUSAGE\:[^.]*.*%s[^```]*" % args.binary, str.join("\n", cli_help), + replaced = re.sub("\nUSAGE\\:[^.]*.*%s[^```]*" % args.binary, str.join("\n", cli_help), original_contents) # Avoid check_format flagging "over-enthousiastic" whitespace replaced = replaced.replace("... [", "... [") if replaced != original_contents: if args.mode == "check": - logging.info( - "CLI documentation in /%s needs to be updated for %s" % (args.readme, args.binary)) + logging.info("CLI documentation in /%s needs to be updated for %s" % + (args.readme, args.binary)) sys.exit(-1) elif args.mode == "fix": with target_path.open("w", encoding="utf-8") as f: - logging.error( - "CLI documentation in /%s needs to be updated for %s" % (args.readme, args.binary)) + logging.error("CLI documentation in /%s needs to be updated for %s" % + (args.readme, args.binary)) f.write("%s" % replaced) logging.info("Done") diff --git a/tools/valgrind-suppressions.txt b/tools/valgrind-suppressions.txt deleted file mode 100644 index dce24197c..000000000 --- a/tools/valgrind-suppressions.txt +++ /dev/null @@ -1,12 +0,0 @@ -{ - - Memcheck:Leak - match-leak-kinds: definite - fun:_Znam - fun:InitModule - fun:_ZN15MallocExtension8RegisterEPS_ - fun:__cxx_global_var_init.2 - fun:_GLOBAL__sub_I_tcmalloc.cc - fun:__libc_csu_init - fun:(below main) -} \ No newline at end of file diff --git a/tools/valgrind-tests.sh b/tools/valgrind-tests.sh deleted file mode 100755 index 0b00311eb..000000000 --- a/tools/valgrind-tests.sh +++ /dev/null @@ -1,4 +0,0 @@ -export TEST_WORKSPACE=. -export TEST_SRCDIR="$(pwd)" -export ENVOY_IP_TEST_VERSIONS="v4only" -valgrind --leak-check=full --track-origins=yes --gen-suppressions=all --suppressions=nighthawk/envoy/tools/valgrind-suppressions.txt --suppressions=nighthawk/tools/valgrind-suppressions.txt bazel-bin/test/nighthawk_test diff --git a/tools/valgrind.sh b/tools/valgrind.sh deleted file mode 100755 index 049b2dacb..000000000 --- a/tools/valgrind.sh +++ /dev/null @@ -1 +0,0 @@ -valgrind --leak-check=full --track-origins=yes --gen-suppressions=all --suppressions=nighthawk/envoy/tools/valgrind-suppressions.txt --suppressions=nighthawk/tools/valgrind-suppressions.txt bazel-bin/nighthawk_client --rps 2 http://127.0.0.1:10000/