diff --git a/.bazelrc b/.bazelrc index 256a9081a..0a920b70d 100644 --- a/.bazelrc +++ b/.bazelrc @@ -13,10 +13,10 @@ # Startup options cannot be selected via config. startup --host_jvm_args=-Xmx2g -build --workspace_status_command=bazel/get_workspace_status +build --workspace_status_command="bash bazel/get_workspace_status" build --experimental_local_memory_estimate build --experimental_strict_action_env=true -build --host_force_python=PY2 +build --host_force_python=PY3 build --action_env=BAZEL_LINKLIBS=-l%:libstdc++.a build --action_env=BAZEL_LINKOPTS=-lm build --host_javabase=@bazel_tools//tools/jdk:remote_jdk11 @@ -113,6 +113,28 @@ build:sizeopt -c opt --copt -Os # Test options build --test_env=HEAPCHECK=normal --test_env=PPROF_PATH +# Coverage options +coverage --config=coverage +build:coverage --action_env=BAZEL_USE_LLVM_NATIVE_COVERAGE=1 +build:coverage --action_env=GCOV=llvm-profdata +build:coverage --copt=-DNDEBUG +build:coverage --test_timeout=900 +build:coverage --define=ENVOY_CONFIG_COVERAGE=1 +build:coverage --cxxopt="-DENVOY_CONFIG_COVERAGE=1" +build:coverage --coverage_support=@envoy//bazel/coverage:coverage_support +build:coverage --test_env=CC_CODE_COVERAGE_SCRIPT=external/envoy/bazel/coverage/collect_cc_coverage.sh +build:coverage --test_env=HEAPCHECK= +build:coverage --combined_report=lcov +build:coverage --strategy=TestRunner=sandboxed,local +build:coverage --strategy=CoverageReport=sandboxed,local +build:coverage --experimental_use_llvm_covmap +build:coverage --collect_code_coverage +build:coverage --instrumentation_filter="//source(?!/common/chromium_url|/extensions/quic_listeners/quiche/platform)[/:],//include[/:]" +coverage:test-coverage --test_arg="--log-path /dev/null" +coverage:test-coverage --test_arg="-l trace" +coverage:fuzz-coverage --config=asan-fuzzer +coverage:fuzz-coverage --run_under=@envoy//bazel/coverage:fuzz_coverage_wrapper.sh + # Remote execution: https://docs.bazel.build/versions/master/remote-execution.html build:rbe-toolchain --host_platform=@envoy_build_tools//toolchains:rbe_ubuntu_clang_platform build:rbe-toolchain --platforms=@envoy_build_tools//toolchains:rbe_ubuntu_clang_platform @@ -161,8 +183,8 @@ build:remote-msan --config=rbe-toolchain-clang-libc++ build:remote-msan --config=rbe-toolchain-msan # Docker sandbox -# NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L7 -build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu@sha256:3788a87461f2b3dc8048ad0ce5df40438a56e0a8f1a4ab0f61b4ef0d8c11ff1f +# NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8 +build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:04f06115b6ee7cfea74930353fb47a41149cbec3 build:docker-sandbox --spawn_strategy=docker build:docker-sandbox --strategy=Javac=docker build:docker-sandbox --strategy=Closure=docker @@ -191,10 +213,64 @@ build:remote-ci --remote_executor=grpcs://remotebuildexecution.googleapis.com # Fuzz builds build:asan-fuzzer --config=clang-asan build:asan-fuzzer --define=FUZZING_ENGINE=libfuzzer -build:asan-fuzzer --copt=-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION build:asan-fuzzer --copt=-fsanitize=fuzzer-no-link +build:asan-fuzzer --copt=-fno-omit-frame-pointer +build:asan-fuzzer --copt=-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION # Remove UBSAN halt_on_error to avoid crashing on protobuf errors. build:asan-fuzzer --test_env=UBSAN_OPTIONS=print_stacktrace=1 +# Fuzzing without ASAN. This is useful for profiling fuzzers without any ASAN artifacts. +build:plain-fuzzer --define=FUZZING_ENGINE=libfuzzer +build:plain-fuzzer --copt=-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION +build:plain-fuzzer --copt=-fsanitize=fuzzer-no-link + +# Compile database generation config +# We don't care about built binaries so always strip and use fastbuild. +build:compdb -c fastbuild +build:compdb --strip=always +build:compdb --build_tag_filters=-nocompdb +build:compdb --define=ENVOY_CONFIG_COMPILATION_DATABASE=1 + +# Windows build quirks +build:windows --action_env=TMPDIR +build:windows --define signal_trace=disabled +build:windows --define hot_restart=disabled +build:windows --define tcmalloc=disabled +build:windows --define manual_stamp=manual_stamp + +# Should not be required after upstream fix to bazel, +# and already a no-op to linux/macos builds +# see issue https://github.com/bazelbuild/rules_foreign_cc/issues/301 +build:windows --copt="-DCARES_STATICLIB" +build:windows --copt="-DNGHTTP2_STATICLIB" +build:windows --copt="-DCURL_STATICLIB" + +# Required to work around build defects on Windows MSVC cl +# Unguarded gcc pragmas in quiche are not recognized by MSVC +build:msvc-cl --copt="/wd4068" +# Allows 'nodiscard' function return values to be discarded +build:msvc-cl --copt="/wd4834" +# Allows inline functions to be undefined +build:msvc-cl --copt="/wd4506" +build:msvc-cl --copt="-D_SILENCE_EXPERIMENTAL_FILESYSTEM_DEPRECATION_WARNING" + +# Required to work around Windows clang-cl build defects +# Ignore conflicting definitions of _WIN32_WINNT +# Overriding __TIME__ etc is problematic (and is actually an invalid no-op) +build:clang-cl --copt="-Wno-macro-redefined" +build:clang-cl --copt="-Wno-builtin-macro-redefined" +build:clang-cl --action_env=USE_CLANG_CL=1 + +# Defaults to 'auto' - Off for windows, so override to linux behavior +build:windows --enable_runfiles=yes + +# This should become adopted by bazel as the default +build:windows --features=compiler_param_file + +# These options attempt to force a monolithic binary including the CRT +build:windows --features=fully_static_link +build:windows --features=static_link_msvcrt +build:windows --dynamic_mode=off + try-import %workspace%/clang.bazelrc -try-import %workspace%/user.bazelrc +try-import %workspace%/user.bazelrc \ No newline at end of file diff --git a/.bazelversion b/.bazelversion new file mode 100644 index 000000000..fd2a01863 --- /dev/null +++ b/.bazelversion @@ -0,0 +1 @@ +3.1.0 diff --git a/.circleci/config.yml b/.circleci/config.yml index ff9710745..fffd93077 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,7 +1,7 @@ references: envoy-build-image: &envoy-build-image # Jan 9th, 2020 - envoyproxy/envoy-build-ubuntu@sha256:3788a87461f2b3dc8048ad0ce5df40438a56e0a8f1a4ab0f61b4ef0d8c11ff1f + envoyproxy/envoy-build-ubuntu:04f06115b6ee7cfea74930353fb47a41149cbec3 version: 2 jobs: build: diff --git a/WORKSPACE b/WORKSPACE index 95a53aeb7..a13e6dad8 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -16,6 +16,10 @@ load("@envoy//bazel:repositories.bzl", "envoy_dependencies") envoy_dependencies() +load("@envoy//bazel:repositories_extra.bzl", "envoy_dependencies_extra") + +envoy_dependencies_extra() + load("@envoy//bazel:dependency_imports.bzl", "envoy_dependency_imports") envoy_dependency_imports() diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index c12bcb84a..3f9cfcfd7 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,10 +1,7 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -ENVOY_COMMIT = "3a2512d923e2eee1fce9f4f6a23cf93f2e7ed93f" # March 1st, 2020 -ENVOY_SHA = "6a643ae3141762c403aa031cc19e65c441759785fd7dda51dcf5ad5f149283d0" - -RULES_PYTHON_COMMIT = "dd7f9c5f01bafbfea08c44092b6b0c8fc8fcb77f" # Feb 22nd, 2020 -RULES_PYTHON_SHA = "0aa9ec790a58053e3ab5af397879b267a625955f8297c239b2d8559c6773397b" +ENVOY_COMMIT = "74290ef76a76fbbf50f072dc33438791f93f68c7" # May 25th, 2020 +ENVOY_SHA = "123ad63c7081f2575aa08a5038d2302d7def3ea5d86cc4dd274757b597212446" HDR_HISTOGRAM_C_VERSION = "0.9.13" # Feb 22nd, 2020 HDR_HISTOGRAM_C_SHA = "2bd4a4631b64f2f8cf968ef49dd03ff3c51b487c3c98a01217ae4cf4a35b8310" @@ -55,10 +52,3 @@ cc_library( strip_prefix = "HdrHistogram_c-%s" % HDR_HISTOGRAM_C_VERSION, url = "https://github.com/HdrHistogram/HdrHistogram_c/archive/%s.tar.gz" % HDR_HISTOGRAM_C_VERSION, ) - - http_archive( - name = "io_bazel_rules_python", - sha256 = RULES_PYTHON_SHA, - strip_prefix = "rules_python-%s" % RULES_PYTHON_COMMIT, - url = "https://github.com/bazelbuild/rules_python/archive/%s.tar.gz" % RULES_PYTHON_COMMIT, - ) diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 8de24d3df..878a6efee 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -73,13 +73,20 @@ function do_asan() { echo "bazel ASAN/UBSAN debug build with tests" echo "Building and testing envoy tests..." cd "${SRCDIR}" - run_bazel test ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-asan //test/... + + # We build this in steps to avoid running out of memory in CI + run_bazel build ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-asan -- //source/exe/... && \ + run_bazel build ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-asan -- //source/server/... && \ + run_bazel build ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-asan -- //test/mocks/... && \ + run_bazel build ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-asan -- //test/... && \ + run_bazel test ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-asan -- //test/... } function do_tsan() { echo "bazel TSAN debug build with tests" echo "Building and testing envoy tests..." cd "${SRCDIR}" + [ -z "$CIRCLECI" ] || export BAZEL_BUILD_OPTIONS="${BAZEL_TEST_OPTIONS} --local_ram_resources=12288" run_bazel test ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-tsan //test/... } @@ -115,6 +122,12 @@ if [ -n "$CIRCLECI" ]; then fi # We constrain parallelism in CI to avoid running out of memory. NUM_CPUS=8 + if [[ "$1" == "asan" ]]; then + NUM_CPUS=5 + fi + if [[ "$1" == "coverage" ]]; then + NUM_CPUS=3 + fi fi if grep 'docker\|lxc' /proc/1/cgroup; then @@ -177,10 +190,6 @@ case "$1" in exit 0 ;; asan) - if [ -n "$CIRCLECI" ]; then - # Decrease parallelism to avoid running out of memory - NUM_CPUS=7 - fi do_asan exit 0 ;; diff --git a/ci/docker/docker_push.sh b/ci/docker/docker_push.sh index 4116590af..9fd065a87 100755 --- a/ci/docker/docker_push.sh +++ b/ci/docker/docker_push.sh @@ -13,11 +13,19 @@ fi DOCKER_IMAGE_PREFIX="${DOCKER_IMAGE_PREFIX:-envoyproxy/nighthawk}" # push the nighthawk image on tags or merge to master -if [[ -n "$CIRCLE_TAG" ]] || [[ "$CIRCLE_BRANCH" = 'master' ]]; then +if [[ "$CIRCLE_BRANCH" = 'master' ]]; then docker login -u "$DOCKERHUB_USERNAME" -p "$DOCKERHUB_PASSWORD" docker push "${DOCKER_IMAGE_PREFIX}-dev:latest" docker tag "${DOCKER_IMAGE_PREFIX}-dev:latest" "${DOCKER_IMAGE_PREFIX}-dev:${CIRCLE_SHA1}" docker push "${DOCKER_IMAGE_PREFIX}-dev:${CIRCLE_SHA1}" else - echo 'Ignoring non-master branch for docker push.' + if [[ -n "$CIRCLE_TAG" ]]; then + TAG="$CIRCLE_TAG" + docker login -u "$DOCKERHUB_USERNAME" -p "$DOCKERHUB_PASSWORD" + docker push "${DOCKER_IMAGE_PREFIX}:${TAG}" + docker tag "${DOCKER_IMAGE_PREFIX}:${TAG}" "${DOCKER_IMAGE_PREFIX}:${TAG}" + docker push "${DOCKER_IMAGE_PREFIX}:${TAG}" + else + echo 'Ignoring non-master branch for docker push.' + fi fi diff --git a/include/nighthawk/client/factories.h b/include/nighthawk/client/factories.h index d2e6b35a5..dc393d8a3 100644 --- a/include/nighthawk/client/factories.h +++ b/include/nighthawk/client/factories.h @@ -24,19 +24,44 @@ namespace Client { class BenchmarkClientFactory { public: virtual ~BenchmarkClientFactory() = default; + + /** + * Constructs a BenchmarkClient + * + * @param api reference to the Api object. + * @param dispatcher supplies the owning thread's dispatcher. + * @param scope stats scope for any stats tracked by the benchmark client. + * @param cluster_manager Cluster manager preconfigured with our target cluster. + * @param http_tracer Shared pointer to an http tracer implementation (e.g. Zipkin). + * @param cluster_name Name of the cluster that this benchmark client will use. In conjunction + * with cluster_manager this will allow the this BenchmarkClient to access the target connection + * pool. + * @param request_source Source of request-specifiers. Will be queries every time the + * BenchmarkClient is asked to issue a request. + * + * @return BenchmarkClientPtr pointer to a BenchmarkClient instance. + */ virtual BenchmarkClientPtr create(Envoy::Api::Api& api, Envoy::Event::Dispatcher& dispatcher, Envoy::Stats::Scope& scope, Envoy::Upstream::ClusterManagerPtr& cluster_manager, - Envoy::Tracing::HttpTracerPtr& http_tracer, + Envoy::Tracing::HttpTracerSharedPtr& http_tracer, absl::string_view cluster_name, - RequestSource& request_generator) const PURE; + RequestSource& request_source) const PURE; }; class OutputFormatterFactory { public: virtual ~OutputFormatterFactory() = default; + + /** + * Constructs an OutputFormatter instance according to the requested output format. + * + * @param options Proto configuration object indicating the desired output format. + * + * @return OutputFormatterPtr pointer to an OutputFormatter instance. + */ virtual OutputFormatterPtr - create(const nighthawk::client::OutputFormat_OutputFormatOptions) const PURE; + create(const nighthawk::client::OutputFormat_OutputFormatOptions options) const PURE; }; } // namespace Client diff --git a/include/nighthawk/client/output_collector.h b/include/nighthawk/client/output_collector.h index d55f2eb8f..1ff274821 100644 --- a/include/nighthawk/client/output_collector.h +++ b/include/nighthawk/client/output_collector.h @@ -15,6 +15,15 @@ namespace Client { class OutputCollector { public: virtual ~OutputCollector() = default; + + /** + * Adds a result to the structured output. + * + * @param name unique name of the result. E.g. worker_1. + * @param statistics Reference to a vector of statistics to add to the output. + * @param counters Reference to a map of counter values, keyed by name, to add to the output. + * @param execution_duration Execution duration associated to the to-be-added result. + */ virtual void addResult(absl::string_view name, const std::vector& statistics, const std::map& counters, const std::chrono::nanoseconds execution_duration) PURE; @@ -24,6 +33,10 @@ class OutputCollector { * @param output the output value to set. */ virtual void setOutput(const nighthawk::client::Output& output) PURE; + + /** + * @return nighthawk::client::Output proto output object. + */ virtual nighthawk::client::Output toProto() const PURE; }; diff --git a/include/nighthawk/client/output_formatter.h b/include/nighthawk/client/output_formatter.h index e3e15e44b..d28fffb0b 100644 --- a/include/nighthawk/client/output_formatter.h +++ b/include/nighthawk/client/output_formatter.h @@ -17,6 +17,11 @@ namespace Client { class OutputFormatter { public: virtual ~OutputFormatter() = default; + + /** + * @return std::string serialized representation of output. The specific format depends + * on the derived class, for example human-readable or json. + */ virtual std::string formatProto(const nighthawk::client::Output& output) const PURE; }; diff --git a/include/nighthawk/common/request.h b/include/nighthawk/common/request.h index d24fedeb3..d123b8736 100644 --- a/include/nighthawk/common/request.h +++ b/include/nighthawk/common/request.h @@ -15,6 +15,10 @@ using HeaderMapPtr = std::shared_ptr; class Request { public: virtual ~Request() = default; + + /** + * @return HeaderMapPtr shared pointer to a request header specification. + */ virtual HeaderMapPtr header() const PURE; // TODO(oschaaf): expectations }; diff --git a/lorem_ipsum.txt b/lorem_ipsum.txt deleted file mode 100644 index 19c215558..000000000 --- a/lorem_ipsum.txt +++ /dev/null @@ -1 +0,0 @@ -hay diff --git a/source/client/BUILD b/source/client/BUILD index 4ae1e0cb8..7d4ede8f4 100644 --- a/source/client/BUILD +++ b/source/client/BUILD @@ -52,11 +52,12 @@ envoy_cc_library( "@envoy//source/common/config:utility_lib_with_external_headers", "@envoy//source/common/event:dispatcher_includes_with_external_headers", "@envoy//source/common/event:real_time_system_lib_with_external_headers", + "@envoy//source/common/grpc:context_lib_with_external_headers", "@envoy//source/common/http:context_lib_with_external_headers", "@envoy//source/common/http:header_map_lib_with_external_headers", "@envoy//source/common/http:headers_lib_with_external_headers", "@envoy//source/common/http/http1:codec_lib_with_external_headers", - "@envoy//source/common/http/http1:conn_pool_legacy_lib_with_external_headers", + "@envoy//source/common/http/http1:conn_pool_lib_with_external_headers", "@envoy//source/common/http/http2:conn_pool_lib_with_external_headers", "@envoy//source/common/init:manager_lib_with_external_headers", "@envoy//source/common/local_info:local_info_lib_with_external_headers", @@ -65,7 +66,6 @@ envoy_cc_library( "@envoy//source/common/protobuf:message_validator_lib_with_external_headers", "@envoy//source/common/protobuf:utility_lib_with_external_headers", "@envoy//source/common/runtime:runtime_lib_with_external_headers", - "@envoy//source/common/runtime:uuid_util_lib_with_external_headers", "@envoy//source/common/secret:secret_manager_impl_lib_with_external_headers", "@envoy//source/common/singleton:manager_impl_lib_with_external_headers", "@envoy//source/common/stats:allocator_lib_with_external_headers", @@ -79,6 +79,7 @@ envoy_cc_library( "@envoy//source/exe:platform_header_lib_with_external_headers", "@envoy//source/exe:platform_impl_lib", "@envoy//source/exe:process_wide_lib_with_external_headers", + "@envoy//source/common/http:request_id_extension_lib_with_external_headers", "@envoy//source/extensions/tracers:well_known_names_with_external_headers", "@envoy//source/extensions/transport_sockets:well_known_names_with_external_headers", "@envoy//source/extensions/transport_sockets/tls:context_lib_with_external_headers", diff --git a/source/client/benchmark_client_impl.cc b/source/client/benchmark_client_impl.cc index 8a1bc425f..37902e58f 100644 --- a/source/client/benchmark_client_impl.cc +++ b/source/client/benchmark_client_impl.cc @@ -9,7 +9,6 @@ #include "external/envoy/source/common/http/headers.h" #include "external/envoy/source/common/http/utility.h" #include "external/envoy/source/common/network/utility.h" -#include "external/envoy/source/common/runtime/uuid_util.h" #include "client/stream_decoder.h" @@ -27,7 +26,11 @@ Http1PoolImpl::newStream(Envoy::Http::ResponseDecoder& response_decoder, // In prefetch mode we try to keep the amount of connections at the configured limit. if (prefetch_connections_) { while (host_->cluster().resourceManager(priority_).connections().canCreate()) { - createNewConnection(); + // We cannot rely on ::tryCreateConnection here, because that might decline without + // updating connections().canCreate() above. We would risk an infinite loop. + ActiveClientPtr client = instantiateActiveClient(); + connecting_request_capacity_ += client->effectiveConcurrentRequestLimit(); + client->moveIntoList(std::move(client), owningList(client->state_)); } } @@ -35,8 +38,7 @@ Http1PoolImpl::newStream(Envoy::Http::ResponseDecoder& response_decoder, // of ready_clients_, which will pick the oldest one instead. This makes us cycle through // all the available connections. if (!ready_clients_.empty() && connection_reuse_strategy_ == ConnectionReuseStrategy::LRU) { - ready_clients_.back()->moveBetweenLists(ready_clients_, busy_clients_); - attachRequestToClient(*busy_clients_.front(), response_decoder, callbacks); + attachRequestToClient(*ready_clients_.back(), response_decoder, callbacks); return nullptr; } @@ -44,58 +46,12 @@ Http1PoolImpl::newStream(Envoy::Http::ResponseDecoder& response_decoder, return ConnPoolImpl::newStream(response_decoder, callbacks); } -Http2PoolImpl::Http2PoolImpl( - Envoy::Event::Dispatcher& dispatcher, Envoy::Upstream::HostConstSharedPtr host, - Envoy::Upstream::ResourcePriority priority, - const Envoy::Network::ConnectionSocket::OptionsSharedPtr& options, // NOLINT - const Envoy::Network::TransportSocketOptionsSharedPtr& transport_socket_options) // NOLINT - : Envoy::Http::Legacy::ConnPoolImplBase(std::move(host), priority), dispatcher_(dispatcher), - socket_options_(options), transport_socket_options_(transport_socket_options) { - for (uint32_t i = 0; i < host_->cluster().resourceManager(priority_).connections().max(); i++) { - pools_.push_back(std::make_unique( - dispatcher_, host_, priority_, socket_options_, transport_socket_options_)); - } -} - -void Http2PoolImpl::addDrainedCallback(Envoy::Http::ConnectionPool::Instance::DrainedCb cb) { - for (auto& pool : pools_) { - pool->addDrainedCallback(cb); - } -} - -void Http2PoolImpl::drainConnections() { - for (auto& pool : pools_) { - pool->drainConnections(); - } -} - -bool Http2PoolImpl::hasActiveConnections() const { - for (auto& pool : pools_) { - if (pool->hasActiveConnections()) { - return true; - } - } - return false; -} - -Envoy::Http::ConnectionPool::Cancellable* -Http2PoolImpl::newStream(Envoy::Http::ResponseDecoder& response_decoder, - Envoy::Http::ConnectionPool::Callbacks& callbacks) { - // Use the simplest but probably naive approach of rotating over the available pool instances - // / connections to distribute requests accross them. - return pools_[pool_round_robin_index_++ % pools_.size()]->newStream(response_decoder, callbacks); -}; - -void Http2PoolImpl::checkForDrained() { - // TODO(oschaaf): this one is protected, can't forward it. -} - BenchmarkClientHttpImpl::BenchmarkClientHttpImpl( Envoy::Api::Api& api, Envoy::Event::Dispatcher& dispatcher, Envoy::Stats::Scope& scope, StatisticPtr&& connect_statistic, StatisticPtr&& response_statistic, StatisticPtr&& response_header_size_statistic, StatisticPtr&& response_body_size_statistic, bool use_h2, Envoy::Upstream::ClusterManagerPtr& cluster_manager, - Envoy::Tracing::HttpTracerPtr& http_tracer, absl::string_view cluster_name, + Envoy::Tracing::HttpTracerSharedPtr& http_tracer, absl::string_view cluster_name, RequestGenerator request_generator, const bool provide_resource_backpressure) : api_(api), dispatcher_(dispatcher), scope_(scope.createScope("benchmark.")), connect_statistic_(std::move(connect_statistic)), @@ -161,12 +117,11 @@ bool BenchmarkClientHttpImpl::tryStartRequest(CompletionCallback caller_completi } } - std::string x_request_id = generator_.uuid(); auto stream_decoder = new StreamDecoder( dispatcher_, api_.timeSource(), *this, std::move(caller_completion_callback), *connect_statistic_, *response_statistic_, *response_header_size_statistic_, *response_body_size_statistic_, request->header(), shouldMeasureLatencies(), content_length, - x_request_id, http_tracer_); + generator_, http_tracer_); requests_initiated_++; pool_ptr->newStream(*stream_decoder, *stream_decoder); return true; @@ -202,9 +157,12 @@ void BenchmarkClientHttpImpl::onPoolFailure(Envoy::Http::ConnectionPool::PoolFai case Envoy::Http::ConnectionPool::PoolFailureReason::Overflow: benchmark_client_stats_.pool_overflow_.inc(); break; - case Envoy::Http::ConnectionPool::PoolFailureReason::ConnectionFailure: + case Envoy::Http::ConnectionPool::PoolFailureReason::LocalConnectionFailure: + case Envoy::Http::ConnectionPool::PoolFailureReason::RemoteConnectionFailure: benchmark_client_stats_.pool_connection_failure_.inc(); break; + case Envoy::Http::ConnectionPool::PoolFailureReason::Timeout: + break; default: NOT_REACHED_GCOVR_EXCL_LINE; } diff --git a/source/client/benchmark_client_impl.h b/source/client/benchmark_client_impl.h index 69c580eb6..a40e90a19 100644 --- a/source/client/benchmark_client_impl.h +++ b/source/client/benchmark_client_impl.h @@ -15,7 +15,7 @@ #include "nighthawk/common/statistic.h" #include "external/envoy/source/common/common/logger.h" -#include "external/envoy/source/common/http/http1/conn_pool_legacy.h" +#include "external/envoy/source/common/http/http1/conn_pool.h" #include "external/envoy/source/common/http/http2/conn_pool.h" #include "external/envoy/source/common/runtime/runtime_impl.h" @@ -45,13 +45,13 @@ struct BenchmarkClientStats { ALL_BENCHMARK_CLIENT_STATS(GENERATE_COUNTER_STRUCT) }; -class Http1PoolImpl : public Envoy::Http::Legacy::Http1::ProdConnPoolImpl { +class Http1PoolImpl : public Envoy::Http::Http1::ProdConnPoolImpl { public: enum class ConnectionReuseStrategy { MRU, LRU, }; - using Envoy::Http::Legacy::Http1::ProdConnPoolImpl::ProdConnPoolImpl; + using Envoy::Http::Http1::ProdConnPoolImpl::ProdConnPoolImpl; Envoy::Http::ConnectionPool::Cancellable* newStream(Envoy::Http::ResponseDecoder& response_decoder, Envoy::Http::ConnectionPool::Callbacks& callbacks) override; @@ -67,49 +67,6 @@ class Http1PoolImpl : public Envoy::Http::Legacy::Http1::ProdConnPoolImpl { bool prefetch_connections_{}; }; -// Vanilla Envoy's HTTP/2 pool is single connection only (or actually sometimes dual in connection -// drainage scenarios). Http2PoolImpl is an experimental pool, which uses multiple vanilla Envoy -// HTTP/2 pools under the hood. Using multiple connections is useful when testing backends that need -// multiple connections to distribute load internally. Combining multiple connections with -// --max-requests-per-connection may help as well, as doing periodically initiating new connections -// may help the benchmark target by giving it an opportunity to rebalance. -class Http2PoolImpl : public Envoy::Http::ConnectionPool::Instance, - public Envoy::Http::Legacy::ConnPoolImplBase { -public: - // For doc comments, see Envoy::Http::ConnectionPool::Instance & Envoy::Http::ConnPoolImplBase - Http2PoolImpl( - Envoy::Event::Dispatcher& dispatcher, Envoy::Upstream::HostConstSharedPtr host, - Envoy::Upstream::ResourcePriority priority, - const Envoy::Network::ConnectionSocket::OptionsSharedPtr& options, // NOLINT - const Envoy::Network::TransportSocketOptionsSharedPtr& transport_socket_options); // NOLINT - - // Envoy::Http::ConnectionPool::Instance - Envoy::Http::Protocol protocol() const override { return Envoy::Http::Protocol::Http2; } - - void addDrainedCallback(Envoy::Http::ConnectionPool::Instance::DrainedCb cb) override; - - void drainConnections() override; - - bool hasActiveConnections() const override; - - Envoy::Upstream::HostDescriptionConstSharedPtr host() const override { return host_; }; - - Envoy::Http::ConnectionPool::Cancellable* - newStream(Envoy::Http::ResponseDecoder& response_decoder, - Envoy::Http::ConnectionPool::Callbacks& callbacks) override; - -protected: - // Envoy::Http::ConnPoolImplBase - void checkForDrained() override; - -private: - Envoy::Event::Dispatcher& dispatcher_; - const Envoy::Network::ConnectionSocket::OptionsSharedPtr socket_options_; - const Envoy::Network::TransportSocketOptionsSharedPtr transport_socket_options_; - std::vector> pools_; - uint64_t pool_round_robin_index_{0}; -}; - class BenchmarkClientHttpImpl : public BenchmarkClient, public StreamDecoderCompletionCallback, public Envoy::Logger::Loggable { @@ -120,7 +77,7 @@ class BenchmarkClientHttpImpl : public BenchmarkClient, StatisticPtr&& response_header_size_statistic, StatisticPtr&& response_body_size_statistic, bool use_h2, Envoy::Upstream::ClusterManagerPtr& cluster_manager, - Envoy::Tracing::HttpTracerPtr& http_tracer, + Envoy::Tracing::HttpTracerSharedPtr& http_tracer, absl::string_view cluster_name, RequestGenerator request_generator, const bool provide_resource_backpressure); void setConnectionLimit(uint32_t connection_limit) { connection_limit_ = connection_limit; } @@ -178,7 +135,7 @@ class BenchmarkClientHttpImpl : public BenchmarkClient, bool measure_latencies_{}; BenchmarkClientStats benchmark_client_stats_; Envoy::Upstream::ClusterManagerPtr& cluster_manager_; - Envoy::Tracing::HttpTracerPtr& http_tracer_; + Envoy::Tracing::HttpTracerSharedPtr& http_tracer_; std::string cluster_name_; const RequestGenerator request_generator_; const bool provide_resource_backpressure_; diff --git a/source/client/client_worker_impl.cc b/source/client/client_worker_impl.cc index e1eae9c08..e4231bc87 100644 --- a/source/client/client_worker_impl.cc +++ b/source/client/client_worker_impl.cc @@ -20,7 +20,7 @@ ClientWorkerImpl::ClientWorkerImpl(Envoy::Api::Api& api, Envoy::ThreadLocal::Ins const RequestSourceFactory& request_generator_factory, Envoy::Stats::Store& store, const int worker_number, const Envoy::MonotonicTime starting_time, - Envoy::Tracing::HttpTracerPtr& http_tracer, + Envoy::Tracing::HttpTracerSharedPtr& http_tracer, const HardCodedWarmupStyle hardcoded_warmup_style) : WorkerImpl(api, tls, store), time_source_(std::make_unique(*dispatcher_)), diff --git a/source/client/client_worker_impl.h b/source/client/client_worker_impl.h index ded0a9d9b..05f2fcb35 100644 --- a/source/client/client_worker_impl.h +++ b/source/client/client_worker_impl.h @@ -34,7 +34,7 @@ class ClientWorkerImpl : public WorkerImpl, virtual public ClientWorker { const RequestSourceFactory& request_generator_factory, Envoy::Stats::Store& store, const int worker_number, const Envoy::MonotonicTime starting_time, - Envoy::Tracing::HttpTracerPtr& http_tracer, + Envoy::Tracing::HttpTracerSharedPtr& http_tracer, const HardCodedWarmupStyle hardcoded_warmup_style); StatisticPtrMap statistics() const override; @@ -58,7 +58,7 @@ class ClientWorkerImpl : public WorkerImpl, virtual public ClientWorker { Envoy::Stats::ScopePtr worker_scope_; Envoy::Stats::ScopePtr worker_number_scope_; const int worker_number_; - Envoy::Tracing::HttpTracerPtr& http_tracer_; + Envoy::Tracing::HttpTracerSharedPtr& http_tracer_; RequestSourcePtr request_generator_; BenchmarkClientPtr benchmark_client_; PhasePtr phase_; diff --git a/source/client/factories_impl.cc b/source/client/factories_impl.cc index 2c7ac533b..e65111f01 100644 --- a/source/client/factories_impl.cc +++ b/source/client/factories_impl.cc @@ -27,8 +27,9 @@ BenchmarkClientFactoryImpl::BenchmarkClientFactoryImpl(const Options& options) BenchmarkClientPtr BenchmarkClientFactoryImpl::create( Envoy::Api::Api& api, Envoy::Event::Dispatcher& dispatcher, Envoy::Stats::Scope& scope, - Envoy::Upstream::ClusterManagerPtr& cluster_manager, Envoy::Tracing::HttpTracerPtr& http_tracer, - absl::string_view cluster_name, RequestSource& request_generator) const { + Envoy::Upstream::ClusterManagerPtr& cluster_manager, + Envoy::Tracing::HttpTracerSharedPtr& http_tracer, absl::string_view cluster_name, + RequestSource& request_generator) const { StatisticFactoryImpl statistic_factory(options_); // While we lack options to configure which statistic backend goes where, we directly pass // StreamingStatistic for the stats that track response sizes. Ideally we would have options @@ -195,7 +196,7 @@ TerminationPredicate* TerminationPredicateFactoryImpl::linkConfiguredPredicates( predicate.first, predicate.second); current_predicate = ¤t_predicate->link( std::make_unique( - scope.counter(predicate.first), predicate.second, termination_status)); + scope.counterFromString(predicate.first), predicate.second, termination_status)); } return current_predicate; } diff --git a/source/client/factories_impl.h b/source/client/factories_impl.h index 3605739f5..928b63f99 100644 --- a/source/client/factories_impl.h +++ b/source/client/factories_impl.h @@ -30,7 +30,7 @@ class BenchmarkClientFactoryImpl : public OptionBasedFactoryImpl, public Benchma BenchmarkClientPtr create(Envoy::Api::Api& api, Envoy::Event::Dispatcher& dispatcher, Envoy::Stats::Scope& scope, Envoy::Upstream::ClusterManagerPtr& cluster_manager, - Envoy::Tracing::HttpTracerPtr& http_tracer, + Envoy::Tracing::HttpTracerSharedPtr& http_tracer, absl::string_view cluster_name, RequestSource& request_generator) const override; }; diff --git a/source/client/output_formatter_impl.cc b/source/client/output_formatter_impl.cc index e5bc44cdc..c438179a3 100644 --- a/source/client/output_formatter_impl.cc +++ b/source/client/output_formatter_impl.cc @@ -91,8 +91,9 @@ std::string ConsoleOutputFormatterImpl::formatProto(const nighthawk::client::Out header_written = true; } ss << fmt::format(" {:<{}}{:<{}}{:<{}}", p, 12, percentile.count(), 12, - percentile.has_duration() ? formatProtoDuration(percentile.duration()) - : fmt::format("{}", percentile.raw_value()), + percentile.has_duration() + ? formatProtoDuration(percentile.duration()) + : fmt::format("{}", static_cast(percentile.raw_value())), 15) << std::endl; } @@ -189,7 +190,8 @@ DottedStringOutputFormatterImpl::formatProto(const nighthawk::client::Output& ou "{}.microseconds: {}", percentile_prefix, Envoy::Protobuf::util::TimeUtil::DurationToMicroseconds(percentile.duration())); } else { - ss << fmt::format("{}.value: {}", percentile_prefix, percentile.raw_value()); + ss << fmt::format("{}.value: {}", percentile_prefix, + static_cast(percentile.raw_value())); } ss << std::endl; }); diff --git a/source/client/process_impl.cc b/source/client/process_impl.cc index a5da471a3..135e40706 100644 --- a/source/client/process_impl.cc +++ b/source/client/process_impl.cc @@ -74,9 +74,6 @@ class ClusterManagerFactory : public Envoy::Upstream::ProdClusterManagerFactory h1_pool->setConnectionReuseStrategy(connection_reuse_strategy_); h1_pool->setPrefetchConnections(prefetch_connections_); return Envoy::Http::ConnectionPool::InstancePtr{h1_pool}; - } else if (use_multi_conn_h2_pool_ && protocol == Envoy::Http::Protocol::Http2) { - return Envoy::Http::ConnectionPool::InstancePtr{ - new Http2PoolImpl(dispatcher, host, priority, options, transport_socket_options)}; } return Envoy::Upstream::ProdClusterManagerFactory::allocateConnPool( dispatcher, host, priority, protocol, options, transport_socket_options); @@ -89,29 +86,28 @@ class ClusterManagerFactory : public Envoy::Upstream::ProdClusterManagerFactory void setPrefetchConnections(const bool prefetch_connections) { prefetch_connections_ = prefetch_connections; } - void enableMultiConnectionH2Pool() { use_multi_conn_h2_pool_ = true; } private: Http1PoolImpl::ConnectionReuseStrategy connection_reuse_strategy_{}; bool prefetch_connections_{}; - bool use_multi_conn_h2_pool_{}; }; ProcessImpl::ProcessImpl(const Options& options, Envoy::Event::TimeSystem& time_system) : time_system_(time_system), stats_allocator_(symbol_table_), store_root_(stats_allocator_), api_(std::make_unique(platform_impl_.threadFactory(), store_root_, time_system_, platform_impl_.fileSystem())), - dispatcher_(api_->allocateDispatcher()), benchmark_client_factory_(options), + dispatcher_(api_->allocateDispatcher("main_thread")), benchmark_client_factory_(options), termination_predicate_factory_(options), sequencer_factory_(options), request_generator_factory_(options), options_(options), init_manager_("nh_init_manager"), local_info_(new Envoy::LocalInfo::LocalInfoImpl( {}, Envoy::Network::Utility::getLocalAddress(Envoy::Network::Address::IpVersion::v4), "nighthawk_service_zone", "nighthawk_service_cluster", "nighthawk_service_node")), secret_manager_(config_tracker_), http_context_(store_root_.symbolTable()), + grpc_context_(store_root_.symbolTable()), singleton_manager_(std::make_unique(api_->threadFactory())), access_log_manager_(std::chrono::milliseconds(1000), *api_, *dispatcher_, access_log_lock_, store_root_), - init_watcher_("Nighthawk", []() {}), validation_context_(false, false) { + init_watcher_("Nighthawk", []() {}), validation_context_(false, false, false) { // Any dispatchers created after the following call will use hr timers. setupForHRTimers(); std::string lower = absl::AsciiStrToLower( @@ -277,6 +273,9 @@ void ProcessImpl::createBootstrapConfiguration(envoy::config::bootstrap::v3::Boo cluster->set_name(fmt::format("{}", i)); cluster->mutable_connect_timeout()->set_seconds(options_.timeout().count()); cluster->mutable_max_requests_per_connection()->set_value(options_.maxRequestsPerConnection()); + if (options_.h2() && options_.h2UseMultipleConnections()) { + cluster->mutable_http2_protocol_options()->mutable_max_concurrent_streams()->set_value(1); + } auto thresholds = cluster->mutable_circuit_breakers()->add_thresholds(); // We do not support any retrying. @@ -356,7 +355,6 @@ void ProcessImpl::maybeCreateTracingDriver(const envoy::config::trace::v3::Traci Envoy::Runtime::LoaderSingleton::get(), *local_info_, generator_, time_system_); http_tracer_ = std::make_unique(std::move(zipkin_driver), *local_info_); - http_context_.setTracer(*http_tracer_); #else ENVOY_LOG(error, "Not build with any tracing support"); #endif @@ -377,41 +375,8 @@ void ProcessImpl::addRequestSourceCluster( socket_address->set_port_value(uri.port()); } -bool ProcessImpl::run(OutputCollector& collector) { - std::vector uris; - UriPtr request_source_uri; - UriPtr tracing_uri; - - try { - // TODO(oschaaf): See if we can rid of resolving here. - // We now only do it to validate. - if (options_.uri().has_value()) { - uris.push_back(std::make_unique(options_.uri().value())); - } else { - for (const nighthawk::client::MultiTarget::Endpoint& endpoint : - options_.multiTargetEndpoints()) { - uris.push_back(std::make_unique(fmt::format( - "{}://{}:{}{}", options_.multiTargetUseHttps() ? "https" : "http", - endpoint.address().value(), endpoint.port().value(), options_.multiTargetPath()))); - } - } - for (const UriPtr& uri : uris) { - uri->resolve(*dispatcher_, Utility::translateFamilyOptionString(options_.addressFamily())); - } - if (options_.requestSource() != "") { - request_source_uri = std::make_unique(options_.requestSource()); - request_source_uri->resolve(*dispatcher_, - Utility::translateFamilyOptionString(options_.addressFamily())); - } - if (options_.trace() != "") { - tracing_uri = std::make_unique(options_.trace()); - tracing_uri->resolve(*dispatcher_, - Utility::translateFamilyOptionString(options_.addressFamily())); - } - } catch (const UriException&) { - return false; - } - +bool ProcessImpl::runInternal(OutputCollector& collector, const std::vector& uris, + const UriPtr& request_source_uri, const UriPtr& tracing_uri) { int number_of_workers = determineConcurrency(); shutdown_ = false; const std::vector& workers = createWorkers(number_of_workers); @@ -419,23 +384,20 @@ bool ProcessImpl::run(OutputCollector& collector) { store_root_.initializeThreading(*dispatcher_, tls_); runtime_singleton_ = std::make_unique( Envoy::Runtime::LoaderPtr{new Envoy::Runtime::LoaderImpl( - *dispatcher_, tls_, {}, *local_info_, init_manager_, store_root_, generator_, + *dispatcher_, tls_, {}, *local_info_, store_root_, generator_, Envoy::ProtobufMessage::getStrictValidationVisitor(), *api_)}); ssl_context_manager_ = std::make_unique(time_system_); cluster_manager_factory_ = std::make_unique( admin_, Envoy::Runtime::LoaderSingleton::get(), store_root_, tls_, generator_, dispatcher_->createDnsResolver({}, false), *ssl_context_manager_, *dispatcher_, *local_info_, - secret_manager_, validation_context_, *api_, http_context_, access_log_manager_, - *singleton_manager_); + secret_manager_, validation_context_, *api_, http_context_, grpc_context_, + access_log_manager_, *singleton_manager_); cluster_manager_factory_->setConnectionReuseStrategy( options_.h1ConnectionReuseStrategy() == nighthawk::client::H1ConnectionReuseStrategy::LRU ? Http1PoolImpl::ConnectionReuseStrategy::LRU : Http1PoolImpl::ConnectionReuseStrategy::MRU); cluster_manager_factory_->setPrefetchConnections(options_.prefetchConnections()); - if (options_.h2UseMultipleConnections()) { - cluster_manager_factory_->enableMultiConnectionH2Pool(); - } envoy::config::bootstrap::v3::Bootstrap bootstrap; createBootstrapConfiguration(bootstrap, uris, request_source_uri, number_of_workers); if (tracing_uri != nullptr) { @@ -461,8 +423,8 @@ bool ProcessImpl::run(OutputCollector& collector) { std::chrono::nanoseconds total_execution_duration = 0ns; for (auto& worker : workers_) { auto sequencer_execution_duration = worker->phase().sequencer().executionDuration(); - // We don't write per-worker results if we only have a single worker, because the global results - // will be precisely the same. + // We don't write per-worker results if we only have a single worker, because the global + // results will be precisely the same. if (workers_.size() > 1) { StatisticFactoryImpl statistic_factory(options_); collector.addResult(fmt::format("worker_{}", i), @@ -473,10 +435,11 @@ bool ProcessImpl::run(OutputCollector& collector) { i++; } - // Note that above we use use counter values snapshotted by the workers right after its execution - // completes. Here we query the live counters to get to the global numbers. To make sure the - // global aggregated numbers line up, we must take care not to shut down the benchmark client - // before we do this, as that will increment certain counters like connections closed, etc. + // Note that above we use use counter values snapshotted by the workers right after its + // execution completes. Here we query the live counters to get to the global numbers. To make + // sure the global aggregated numbers line up, we must take care not to shut down the benchmark + // client before we do this, as that will increment certain counters like connections closed, + // etc. const auto& counters = Utility().mapCountersFromStore( store_root_, [](absl::string_view, uint64_t value) { return value > 0; }); StatisticFactoryImpl statistic_factory(options_); @@ -485,6 +448,49 @@ bool ProcessImpl::run(OutputCollector& collector) { return counters.find("sequencer.failed_terminations") == counters.end(); } +bool ProcessImpl::run(OutputCollector& collector) { + std::vector uris; + UriPtr request_source_uri; + UriPtr tracing_uri; + + try { + // TODO(oschaaf): See if we can rid of resolving here. + // We now only do it to validate. + if (options_.uri().has_value()) { + uris.push_back(std::make_unique(options_.uri().value())); + } else { + for (const nighthawk::client::MultiTarget::Endpoint& endpoint : + options_.multiTargetEndpoints()) { + uris.push_back(std::make_unique(fmt::format( + "{}://{}:{}{}", options_.multiTargetUseHttps() ? "https" : "http", + endpoint.address().value(), endpoint.port().value(), options_.multiTargetPath()))); + } + } + for (const UriPtr& uri : uris) { + uri->resolve(*dispatcher_, Utility::translateFamilyOptionString(options_.addressFamily())); + } + if (options_.requestSource() != "") { + request_source_uri = std::make_unique(options_.requestSource()); + request_source_uri->resolve(*dispatcher_, + Utility::translateFamilyOptionString(options_.addressFamily())); + } + if (options_.trace() != "") { + tracing_uri = std::make_unique(options_.trace()); + tracing_uri->resolve(*dispatcher_, + Utility::translateFamilyOptionString(options_.addressFamily())); + } + } catch (const UriException&) { + return false; + } + + try { + return runInternal(collector, uris, request_source_uri, tracing_uri); + } catch (Envoy::EnvoyException& ex) { + ENVOY_LOG(error, "Fatal exception: {}", ex.what()); + throw; + } +} + void ProcessImpl::setupForHRTimers() { // We override the local environment to indicate to libevent that we favor precision over // efficiency. Note that it is also possible to do this at setup time via libevent's api's. diff --git a/source/client/process_impl.h b/source/client/process_impl.h index dfba53f6a..967ee7c59 100644 --- a/source/client/process_impl.h +++ b/source/client/process_impl.h @@ -17,6 +17,7 @@ #include "external/envoy/source/common/access_log/access_log_manager_impl.h" #include "external/envoy/source/common/common/logger.h" #include "external/envoy/source/common/event/real_time_system.h" +#include "external/envoy/source/common/grpc/context_impl.h" #include "external/envoy/source/common/http/context_impl.h" #include "external/envoy/source/common/protobuf/message_validator_impl.h" #include "external/envoy/source/common/secret/secret_manager_impl.h" @@ -92,6 +93,9 @@ class ProcessImpl : public Process, public Envoy::Logger::Loggable mergeWorkerStatistics(const std::vector& workers) const; void setupForHRTimers(); + bool runInternal(OutputCollector& collector, const std::vector& uris, + const UriPtr& request_source_uri, const UriPtr& tracing_uri); + Envoy::ProcessWide process_wide_; Envoy::PlatformImpl platform_impl_; Envoy::Event::TimeSystem& time_system_; @@ -114,6 +118,7 @@ class ProcessImpl : public Process, public Envoy::Logger::Loggable runtime_singleton_; Envoy::Init::WatcherImpl init_watcher_; - Tracing::HttpTracerPtr http_tracer_; + Tracing::HttpTracerSharedPtr http_tracer_; Envoy::Server::ValidationAdmin admin_; Envoy::ProtobufMessage::ProdValidationContextImpl validation_context_; bool shutdown_{true}; diff --git a/source/client/stream_decoder.cc b/source/client/stream_decoder.cc index b9220a0c3..afdfd367d 100644 --- a/source/client/stream_decoder.cc +++ b/source/client/stream_decoder.cc @@ -3,6 +3,7 @@ #include #include "external/envoy/source/common/http/http1/codec_impl.h" +#include "external/envoy/source/common/http/request_id_extension_uuid_impl.h" #include "external/envoy/source/common/http/utility.h" #include "external/envoy/source/common/network/address_impl.h" #include "external/envoy/source/common/stream_info/stream_info_impl.h" @@ -134,13 +135,13 @@ void StreamDecoder::finalizeActiveSpan() { } } -void StreamDecoder::setupForTracing(std::string& x_request_id) { +void StreamDecoder::setupForTracing() { auto headers_copy = std::make_unique(); Envoy::Http::HeaderMapImpl::copyFrom(*headers_copy, *request_headers_); Envoy::Tracing::Decision tracing_decision = {Envoy::Tracing::Reason::ClientForced, true}; - RELEASE_ASSERT(Envoy::UuidUtils::setTraceableUuid(x_request_id, Envoy::UuidTraceStatus::Client), - "setTraceableUuid failed"); - headers_copy->setClientTraceId(x_request_id); + Envoy::Http::UUIDRequestIDExtension uuid_generator(random_generator_); + uuid_generator.set(*headers_copy, true); + uuid_generator.setTraceStatus(*headers_copy, Envoy::Http::TraceStatus::Client); active_span_ = http_tracer_->startSpan(config_, *headers_copy, stream_info_, tracing_decision); active_span_->injectContext(*headers_copy); request_headers_.reset(headers_copy.release()); diff --git a/source/client/stream_decoder.h b/source/client/stream_decoder.h index e0a178227..869a8d57e 100644 --- a/source/client/stream_decoder.h +++ b/source/client/stream_decoder.h @@ -13,7 +13,6 @@ #include "nighthawk/common/statistic.h" #include "external/envoy/source/common/http/header_map_impl.h" -#include "external/envoy/source/common/runtime/uuid_util.h" #include "external/envoy/source/common/stream_info/stream_info_impl.h" #include "external/envoy/source/common/tracing/http_tracer_impl.h" @@ -42,8 +41,9 @@ class StreamDecoder : public Envoy::Http::ResponseDecoder, OperationCallback caller_completion_callback, Statistic& connect_statistic, Statistic& latency_statistic, Statistic& response_header_sizes_statistic, Statistic& response_body_sizes_statistic, HeaderMapPtr request_headers, - bool measure_latencies, uint32_t request_body_size, std::string x_request_id, - Envoy::Tracing::HttpTracerPtr& http_tracer) + bool measure_latencies, uint32_t request_body_size, + Envoy::Runtime::RandomGenerator& random_generator, + Envoy::Tracing::HttpTracerSharedPtr& http_tracer) : dispatcher_(dispatcher), time_source_(time_source), decoder_completion_callback_(decoder_completion_callback), caller_completion_callback_(std::move(caller_completion_callback)), @@ -53,9 +53,9 @@ class StreamDecoder : public Envoy::Http::ResponseDecoder, request_headers_(std::move(request_headers)), connect_start_(time_source_.monotonicTime()), complete_(false), measure_latencies_(measure_latencies), request_body_size_(request_body_size), stream_info_(time_source_), - http_tracer_(http_tracer) { + random_generator_(random_generator), http_tracer_(http_tracer) { if (measure_latencies_ && http_tracer_ != nullptr) { - setupForTracing(x_request_id); + setupForTracing(); } } @@ -83,7 +83,7 @@ class StreamDecoder : public Envoy::Http::ResponseDecoder, static Envoy::StreamInfo::ResponseFlag streamResetReasonToResponseFlag(Envoy::Http::StreamResetReason reset_reason); void finalizeActiveSpan(); - void setupForTracing(std::string& x_request_id); + void setupForTracing(); private: void onComplete(bool success); @@ -110,7 +110,8 @@ class StreamDecoder : public Envoy::Http::ResponseDecoder, const uint32_t request_body_size_; Envoy::Tracing::EgressConfigImpl config_; Envoy::StreamInfo::StreamInfoImpl stream_info_; - Envoy::Tracing::HttpTracerPtr& http_tracer_; + Envoy::Runtime::RandomGenerator& random_generator_; + Envoy::Tracing::HttpTracerSharedPtr& http_tracer_; Envoy::Tracing::SpanPtr active_span_; Envoy::StreamInfo::UpstreamTiming upstream_timing_; }; diff --git a/source/common/worker_impl.cc b/source/common/worker_impl.cc index 5e59280c0..021a65f35 100644 --- a/source/common/worker_impl.cc +++ b/source/common/worker_impl.cc @@ -7,8 +7,8 @@ namespace Nighthawk { WorkerImpl::WorkerImpl(Envoy::Api::Api& api, Envoy::ThreadLocal::Instance& tls, Envoy::Stats::Store& store) - : thread_factory_(api.threadFactory()), dispatcher_(api.allocateDispatcher()), tls_(tls), - store_(store), time_source_(api.timeSource()) { + : thread_factory_(api.threadFactory()), dispatcher_(api.allocateDispatcher("worker_thread")), + tls_(tls), store_(store), time_source_(api.timeSource()) { tls.registerThread(*dispatcher_, false); } diff --git a/source/server/README.md b/source/server/README.md index 010023690..bdc72cb26 100644 --- a/source/server/README.md +++ b/source/server/README.md @@ -100,17 +100,21 @@ bazel-bin/nighthawk_test_server [--disable-extensions ] [--hot-restart-version] [--restart-epoch ] [--log-path ] -[--log-format-escaped] [--log-format -] [--component-log-level -] [-l ] -[--local-address-ip-version ] -[--admin-address-path ] +[--log-format-prefix-with-location +] [--log-format-escaped] +[--log-format ] +[--component-log-level ] [-l +] [--local-address-ip-version +] [--admin-address-path +] +[--ignore-unknown-dynamic-fields] [--reject-unknown-dynamic-fields] [--allow-unknown-static-fields] -[--allow-unknown-fields] [--config-yaml -] [-c ] [--concurrency -] [--base-id ] [--] -[--version] [-h] +[--allow-unknown-fields] +[--bootstrap-version ] +[--config-yaml ] [-c ] +[--concurrency ] [--base-id +] [--] [--version] [-h] Where: @@ -167,6 +171,10 @@ hot restart epoch # --log-path Path to logfile +--log-format-prefix-with-location +Prefix all occurrences of '%v' in log format with with '[%g:%#] ' +('[path/to/file.cc:99] '). + --log-format-escaped Escape c-style escape sequences in the application logs @@ -192,6 +200,9 @@ The local IP address version (v4 or v6). --admin-address-path Admin address path +--ignore-unknown-dynamic-fields +ignore unknown fields in dynamic configuration + --reject-unknown-dynamic-fields reject unknown fields in dynamic configuration @@ -201,6 +212,10 @@ allow unknown fields in static configuration --allow-unknown-fields allow unknown fields in static configuration (DEPRECATED) +--bootstrap-version +API version to parse the bootstrap config as (e.g. 3). If unset, all +known versions will be attempted + --config-yaml Inline YAML configuration, merges with the contents of --config-path diff --git a/test/BUILD b/test/BUILD index 652198bab..29cce7730 100644 --- a/test/BUILD +++ b/test/BUILD @@ -2,7 +2,6 @@ licenses(["notice"]) # Apache 2 load( "@envoy//bazel:envoy_build_system.bzl", - "envoy_cc_mock", "envoy_cc_test", "envoy_package", ) @@ -56,10 +55,10 @@ envoy_cc_test( "//test/mocks/common:mock_termination_predicate_factory", "@envoy//source/common/api:api_lib", "@envoy//source/common/stats:isolated_store_lib_with_external_headers", - "@envoy//test/mocks/init:init_mocks", "@envoy//test/mocks/local_info:local_info_mocks", "@envoy//test/mocks/protobuf:protobuf_mocks", "@envoy//test/mocks/thread_local:thread_local_mocks", + "@envoy//test/test_common:simulated_time_system_lib", ], ) @@ -238,7 +237,6 @@ envoy_cc_test( deps = [ "//source/common:nighthawk_common_lib", "@envoy//source/common/stats:isolated_store_lib_with_external_headers", - "@envoy//test/mocks/init:init_mocks", "@envoy//test/mocks/local_info:local_info_mocks", "@envoy//test/mocks/protobuf:protobuf_mocks", "@envoy//test/mocks/thread_local:thread_local_mocks", diff --git a/test/benchmark_http_client_test.cc b/test/benchmark_http_client_test.cc index d7b7073c7..983995df5 100644 --- a/test/benchmark_http_client_test.cc +++ b/test/benchmark_http_client_test.cc @@ -28,7 +28,8 @@ namespace Nighthawk { class BenchmarkClientHttpTest : public Test { public: BenchmarkClientHttpTest() - : api_(Envoy::Api::createApiForTest(time_system_)), dispatcher_(api_->allocateDispatcher()), + : api_(Envoy::Api::createApiForTest(time_system_)), + dispatcher_(api_->allocateDispatcher("test_thread")), cluster_manager_(std::make_unique()), cluster_info_(std::make_unique()), http_tracer_(std::make_unique()), response_code_("200") { @@ -128,7 +129,7 @@ class BenchmarkClientHttpTest : public Test { } uint64_t getCounter(absl::string_view name) { - return client_->scope().counter(std::string(name)).value(); + return client_->scope().counterFromString(std::string(name)).value(); } Envoy::Upstream::MockClusterManager& cluster_manager() { @@ -154,7 +155,7 @@ class BenchmarkClientHttpTest : public Test { NiceMock stream_encoder_; Envoy::Upstream::MockThreadLocalCluster thread_local_cluster_; Envoy::Upstream::ClusterInfoConstSharedPtr cluster_info_; - Envoy::Tracing::HttpTracerPtr http_tracer_; + Envoy::Tracing::HttpTracerSharedPtr http_tracer_; std::string response_code_; RequestGenerator request_generator_; }; @@ -222,10 +223,12 @@ TEST_F(BenchmarkClientHttpTest, StatusTrackingInOnComplete) { TEST_F(BenchmarkClientHttpTest, PoolFailures) { setupBenchmarkClient(); - client_->onPoolFailure(Envoy::Http::ConnectionPool::PoolFailureReason::ConnectionFailure); + client_->onPoolFailure(Envoy::Http::ConnectionPool::PoolFailureReason::LocalConnectionFailure); + client_->onPoolFailure(Envoy::Http::ConnectionPool::PoolFailureReason::RemoteConnectionFailure); client_->onPoolFailure(Envoy::Http::ConnectionPool::PoolFailureReason::Overflow); + client_->onPoolFailure(Envoy::Http::ConnectionPool::PoolFailureReason::Timeout); EXPECT_EQ(1, getCounter("pool_overflow")); - EXPECT_EQ(1, getCounter("pool_connection_failure")); + EXPECT_EQ(2, getCounter("pool_connection_failure")); } TEST_F(BenchmarkClientHttpTest, RequestMethodPost) { diff --git a/test/client_worker_test.cc b/test/client_worker_test.cc index 6867ceb82..4a7b59517 100644 --- a/test/client_worker_test.cc +++ b/test/client_worker_test.cc @@ -5,10 +5,10 @@ #include "external/envoy/source/common/runtime/runtime_impl.h" #include "external/envoy/source/common/stats/isolated_store_impl.h" -#include "external/envoy/test/mocks/init/mocks.h" #include "external/envoy/test/mocks/local_info/mocks.h" #include "external/envoy/test/mocks/protobuf/mocks.h" #include "external/envoy/test/mocks/thread_local/mocks.h" +#include "external/envoy/test/test_common/simulated_time_system.h" #include "common/statistic_impl.h" @@ -35,9 +35,9 @@ class ClientWorkerTest : public Test { public: ClientWorkerTest() : api_(Envoy::Api::createApiForTest()), thread_id_(std::this_thread::get_id()) { - loader_ = std::make_unique(Envoy::Runtime::LoaderPtr{ - new Envoy::Runtime::LoaderImpl(dispatcher_, tls_, {}, local_info_, init_manager_, store_, - rand_, validation_visitor_, *api_)}); + loader_ = std::make_unique( + Envoy::Runtime::LoaderPtr{new Envoy::Runtime::LoaderImpl( + dispatcher_, tls_, {}, local_info_, store_, rand_, validation_visitor_, *api_)}); benchmark_client_ = new MockBenchmarkClient(); sequencer_ = new MockSequencer(); request_generator_ = new MockRequestSource(); @@ -74,11 +74,6 @@ class ClientWorkerTest : public Test { TerminationPredicatePtr createMockTerminationPredicate() { auto predicate = std::make_unique>(); ON_CALL(*predicate, appendToChain(_)).WillByDefault(ReturnRef(*predicate)); - EXPECT_CALL(*predicate, evaluateChain()) - .Times(AtLeast(0)) - .WillOnce(Return(TerminationPredicate::Status::PROCEED)) - .WillOnce(Return(TerminationPredicate::Status::TERMINATE)); - return predicate; } @@ -91,7 +86,7 @@ class ClientWorkerTest : public Test { MockRequestSourceFactory request_generator_factory_; Envoy::Stats::IsolatedStoreImpl store_; NiceMock tls_; - Envoy::Event::TestRealTimeSystem time_system_; + Envoy::Event::SimulatedTimeSystem time_system_; MockBenchmarkClient* benchmark_client_; MockSequencer* sequencer_; MockRequestSource* request_generator_; @@ -99,10 +94,9 @@ class ClientWorkerTest : public Test { NiceMock dispatcher_; std::unique_ptr loader_; NiceMock local_info_; - Envoy::Init::MockManager init_manager_; NiceMock validation_visitor_; Envoy::Upstream::ClusterManagerPtr cluster_manager_ptr_; - Envoy::Tracing::HttpTracerPtr http_tracer_; + Envoy::Tracing::HttpTracerSharedPtr http_tracer_; }; TEST_F(ClientWorkerTest, BasicTest) { @@ -112,8 +106,7 @@ TEST_F(ClientWorkerTest, BasicTest) { InSequence dummy; EXPECT_CALL(*benchmark_client_, setShouldMeasureLatencies(false)).Times(1); EXPECT_CALL(*benchmark_client_, tryStartRequest(_)) - .Times(1) - .WillRepeatedly(Invoke(this, &ClientWorkerTest::CheckThreadChanged)); + .WillOnce(Invoke(this, &ClientWorkerTest::CheckThreadChanged)); EXPECT_CALL(*benchmark_client_, setShouldMeasureLatencies(true)).Times(1); EXPECT_CALL(*sequencer_, start).Times(1); EXPECT_CALL(*sequencer_, waitForCompletion).Times(1); @@ -124,8 +117,7 @@ TEST_F(ClientWorkerTest, BasicTest) { auto worker = std::make_unique( *api_, tls_, cluster_manager_ptr_, benchmark_client_factory_, termination_predicate_factory_, sequencer_factory_, request_generator_factory_, store_, worker_number, - time_system_.monotonicTime() + 10ms, http_tracer_, - ClientWorkerImpl::HardCodedWarmupStyle::ON); + time_system_.monotonicTime(), http_tracer_, ClientWorkerImpl::HardCodedWarmupStyle::ON); worker->start(); worker->waitForCompletion(); diff --git a/test/coverage/gen_build.sh b/test/coverage/gen_build.sh deleted file mode 100755 index 80d29af08..000000000 --- a/test/coverage/gen_build.sh +++ /dev/null @@ -1,76 +0,0 @@ -#!/bin/bash - -# Generate test/coverage/BUILD, which contains a single envoy_cc_test target -# that contains all C++ based tests suitable for performing the coverage run. A -# single binary (as opposed to multiple test targets) is require to work around -# the crazy in https://github.com/bazelbuild/bazel/issues/1118. This is used by -# the coverage runner script. - -set -e -set -x - -[ -z "${BAZEL_BIN}" ] && BAZEL_BIN=bazel -[ -z "${BUILDIFIER_BIN}" ] && BUILDIFIER_BIN=buildifier - -# Path to the generated BUILD file for the coverage target. -[ -z "${BUILD_PATH}" ] && BUILD_PATH="$(dirname "$0")"/BUILD - -# Extra repository information to include when generating coverage targets. This is useful for -# consuming projects. E.g., "@envoy". -[ -z "${REPOSITORY}" ] && REPOSITORY="" - -# This is an extra bazel path to query for additional targets. This is useful for consuming projects -# that want to run coverage over the public envoy code as well as private extensions. -# E.g., "//envoy-lyft/test/..." -[ -z "${EXTRA_QUERY_PATHS}" ] && EXTRA_QUERY_PATHS="" - -rm -f "${BUILD_PATH}" - -if [[ $# -gt 0 ]]; then - COVERAGE_TARGETS=$* -else - COVERAGE_TARGETS=//test/... -fi - -for target in ${COVERAGE_TARGETS}; do - TARGETS="$TARGETS $("${BAZEL_BIN}" query ${BAZEL_QUERY_OPTIONS} "attr('tags', 'coverage_test_lib', ${REPOSITORY}${target})" | grep "^//")" -done - -( - cat << EOF -# This file is generated by test/coverage/gen_build.sh automatically prior to -# coverage runs. It is under .gitignore. DO NOT EDIT, DO NOT CHECK IN. -load( - "@envoy//bazel:envoy_build_system.bzl", - "envoy_cc_test", - "envoy_package", -) - -envoy_package() - -envoy_cc_test( - name = "coverage_tests", - repository = "@envoy", - deps = [ -EOF - for t in ${TARGETS} - do - echo " \"$t\"," - done - cat << EOF - ], - # no-remote due to https://github.com/bazelbuild/bazel/issues/4685 - tags = ["manual", "no-remote"], - coverage = False, - # Due to the nature of coverage_tests, the shard of coverage_tests are very uneven, some of - # shard can take 100s and some takes only 10s, so we use the maximum sharding to here to let - # Bazel scheduling them across CPU cores. - # Sharding can be disabled by --test_sharding_strategy=disabled. - shard_count = 50, -) -EOF - -) > "${BUILD_PATH}" - -echo "Generated coverage BUILD file at: ${BUILD_PATH}" -"${BUILDIFIER_BIN}" "${BUILD_PATH}" \ No newline at end of file diff --git a/test/factories_test.cc b/test/factories_test.cc index 966e472f5..2f82f96e5 100644 --- a/test/factories_test.cc +++ b/test/factories_test.cc @@ -29,7 +29,7 @@ class FactoriesTest : public Test { Envoy::Stats::MockIsolatedStatsStore stats_store_; Envoy::Event::MockDispatcher dispatcher_; MockOptions options_; - Envoy::Tracing::HttpTracerPtr http_tracer_; + Envoy::Tracing::HttpTracerSharedPtr http_tracer_; }; TEST_F(FactoriesTest, CreateBenchmarkClient) { diff --git a/test/integration/integration_test.py b/test/integration/integration_test.py index 915b3cc6d..951d56b32 100644 --- a/test/integration/integration_test.py +++ b/test/integration/integration_test.py @@ -7,6 +7,8 @@ import sys import pytest +from utility import isSanitizerRun + if __name__ == '__main__': path = os.path.dirname(os.path.realpath(__file__)) test_selection_arg = sys.argv[1] if len(sys.argv) > 1 else "" @@ -22,7 +24,7 @@ "-x", path, "-n", - "20" # Number of tests to run in parallel + "4" if isSanitizerRun() else "20" # Number of tests to run in parallel ], plugins=["xdist"]) exit(r) diff --git a/test/integration/test_integration_basics.py b/test/integration/test_integration_basics.py index 8a0153714..a34ef52fb 100644 --- a/test/integration/test_integration_basics.py +++ b/test/integration/test_integration_basics.py @@ -264,16 +264,22 @@ def test_https_h2(https_test_server_fixture): def test_https_h2_multiple_connections(https_test_server_fixture): """ Test that the experimental h2 pool uses multiple connections. + The burst we send ensures we will need 10 connections right away, as we + limit max active streams per connection to 1 by setting the experimental + flag to use multiple h2 connections. """ parsed_json, _ = https_test_server_fixture.runNighthawkClient([ "--h2", https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100", - "--termination-predicate", "benchmark.http_2xx:9", "--max-active-requests", "1", - "--experimental-h2-use-multiple-connections" + "--termination-predicate", "benchmark.http_2xx:99", "--max-active-requests", "10", + "--max-pending-requests", "10", "--experimental-h2-use-multiple-connections", "--burst-size", + "10" ]) counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) - assertCounterEqual(counters, "benchmark.http_2xx", 10) - assertCounterEqual(counters, "upstream_cx_http2_total", 10) + assertCounterEqual(counters, "benchmark.http_2xx", 100) + # Empirical observation shows we may end up creating more then 10 connections. + # This is stock Envoy h/2 pool behavior. + assertCounterGreaterEqual(counters, "upstream_cx_http2_total", 10) def _do_tls_configuration_test(https_test_server_fixture, cli_parameter, use_h2): @@ -554,19 +560,16 @@ def test_https_h1_sni(sni_test_server_fixture): # Verify failure when we set no host (will get plain http) parsed_json, _ = sni_test_server_fixture.runNighthawkClient( - [sni_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100"], + [sni_test_server_fixture.getTestServerRootUri(), "--rps", "20", "--duration", "100"], expect_failure=True) # Verify success when we use plain http and don't request the sni host - parsed_json, _ = sni_test_server_fixture.runNighthawkClient( - [sni_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100"], - expect_failure=True) - parsed_json, _ = sni_test_server_fixture.runNighthawkClient([ sni_test_server_fixture.getTestServerRootUri().replace("https://", "http://"), "--rps", "100", - "--duration", "100", "--termination-predicate", "benchmark.http_2xx:2" + "--duration", "20", "--termination-predicate", "benchmark.http_2xx:2" ], expect_failure=False) + counters = sni_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1) assertCounterGreaterEqual(counters, "upstream_cx_http1_total", 1) diff --git a/test/mocks/client/mock_benchmark_client_factory.h b/test/mocks/client/mock_benchmark_client_factory.h index b3b69b708..d67db595c 100644 --- a/test/mocks/client/mock_benchmark_client_factory.h +++ b/test/mocks/client/mock_benchmark_client_factory.h @@ -17,7 +17,7 @@ class MockBenchmarkClientFactory : public BenchmarkClientFactory { MOCK_CONST_METHOD7(create, BenchmarkClientPtr(Envoy::Api::Api&, Envoy::Event::Dispatcher&, Envoy::Stats::Scope&, Envoy::Upstream::ClusterManagerPtr&, - Envoy::Tracing::HttpTracerPtr&, absl::string_view, + Envoy::Tracing::HttpTracerSharedPtr&, absl::string_view, RequestSource& request_generator)); }; diff --git a/test/output_formatter_test.cc b/test/output_formatter_test.cc index 71d85b4b4..4878c9b89 100644 --- a/test/output_formatter_test.cc +++ b/test/output_formatter_test.cc @@ -122,7 +122,7 @@ TEST_F(OutputCollectorTest, DottedFormatter) { "test/test_data/output_formatter.dotted.gold"); } -TEST_F(OutputCollectorTest, getLowerCaseOutputFormats) { +TEST_F(OutputCollectorTest, GetLowerCaseOutputFormats) { auto output_formats = OutputFormatterImpl::getLowerCaseOutputFormats(); // When you're looking at this code you probably just added an output format. // This is to point out that you might want to update the list below and add a test above. diff --git a/test/rate_limiter_test.cc b/test/rate_limiter_test.cc index 99f659da1..9da8424cc 100644 --- a/test/rate_limiter_test.cc +++ b/test/rate_limiter_test.cc @@ -25,11 +25,11 @@ TEST_F(RateLimiterTest, LinearRateLimiterTest) { EXPECT_FALSE(rate_limiter.tryAcquireOne()); - time_system.sleep(100ms); + time_system.advanceTimeWait(100ms); EXPECT_TRUE(rate_limiter.tryAcquireOne()); EXPECT_FALSE(rate_limiter.tryAcquireOne()); - time_system.sleep(1s); + time_system.advanceTimeWait(1s); for (int i = 0; i < 10; i++) { EXPECT_TRUE(rate_limiter.tryAcquireOne()); } @@ -91,13 +91,13 @@ TEST_F(RateLimiterTest, ScheduledStartingRateLimiterTest) { .WillRepeatedly(Return(true)); if (starting_late) { - time_system.sleep(schedule_delay); + time_system.advanceTimeWait(schedule_delay); } // We should expect zero releases until it is time to start. while (time_system.monotonicTime() < scheduled_starting_time) { EXPECT_FALSE(rate_limiter->tryAcquireOne()); - time_system.sleep(1ms); + time_system.advanceTimeWait(1ms); } // Now that is time to start, the rate limiter should propagate to the mock rate limiter. @@ -140,7 +140,7 @@ class BurstingRateLimiterIntegrationTest : public Test { EXPECT_EQ(burst_acquired, burst_size); EXPECT_EQ(i % (burst_interval_ms.count() - first_burst), 0); } - time_system.sleep(1ms); + time_system.advanceTimeWait(1ms); } } }; @@ -213,16 +213,16 @@ TEST_F(RateLimiterTest, DistributionSamplingRateLimiterImplSchedulingTest) { // The distribution first yields a 1 ns offset. So we don't expect to be green lighted. EXPECT_FALSE(rate_limiter->tryAcquireOne()); - time_system.sleep(1ns); + time_system.advanceTimeWait(1ns); EXPECT_TRUE(rate_limiter->tryAcquireOne()); // We expect releaseOne to be propagated. rate_limiter->releaseOne(); // The distribution will yield an offset of 0ns, we expect success. EXPECT_TRUE(rate_limiter->tryAcquireOne()); - // We don't sleep, and the distribution will yield a 1ns offset. No green light. + // We don't advanceTimeWait, and the distribution will yield a 1ns offset. No green light. EXPECT_FALSE(rate_limiter->tryAcquireOne()); - time_system.sleep(1ns); + time_system.advanceTimeWait(1ns); EXPECT_TRUE(rate_limiter->tryAcquireOne()); } @@ -259,7 +259,7 @@ class LinearRampingRateLimiterImplTest : public Test { if (expected_count > control_timings.size()) { control_timings.push_back(total_us_elapsed.count()); } - time_system.sleep(clock_tick); + time_system.advanceTimeWait(clock_tick); total_us_elapsed += clock_tick; } while (total_us_elapsed <= duration); @@ -368,12 +368,12 @@ class GraduallyOpeningRateLimiterFilterTest : public Test { acquisition_timings.push_back(total_ms_elapsed.count()); EXPECT_FALSE(rate_limiter->tryAcquireOne()); } - time_system.sleep(clock_tick); + time_system.advanceTimeWait(clock_tick); total_ms_elapsed += clock_tick; } while (total_ms_elapsed <= duration); EXPECT_FALSE(rate_limiter->tryAcquireOne()); - time_system.sleep(1s); + time_system.advanceTimeWait(1s); // Verify that after the rampup the expected constant pacing is maintained. // Calls should be forwarded to the regular linear rate limiter algorithm with its // corrective behavior so we can expect to acquire a series with that. @@ -410,7 +410,7 @@ TEST_F(ZipfRateLimiterImplTest, TimingVerificationTest) { if (rate_limiter->tryAcquireOne()) { aquisition_timings.push_back(total_ms_elapsed.count()); } - time_system.sleep(clock_tick); + time_system.advanceTimeWait(clock_tick); total_ms_elapsed += clock_tick; } while (total_ms_elapsed <= duration); EXPECT_EQ(aquisition_timings, diff --git a/test/run_nighthawk_bazel_coverage.sh b/test/run_nighthawk_bazel_coverage.sh index fada02da0..227294bc9 100755 --- a/test/run_nighthawk_bazel_coverage.sh +++ b/test/run_nighthawk_bazel_coverage.sh @@ -24,39 +24,23 @@ else COVERAGE_TARGETS=//test/... fi -# Make sure //test/coverage:coverage_tests is up-to-date. -SCRIPT_DIR="$(realpath "$(dirname "$0")")" -"${SCRIPT_DIR}"/coverage/gen_build.sh ${COVERAGE_TARGETS} - -BAZEL_USE_LLVM_NATIVE_COVERAGE=1 GCOV=llvm-profdata bazel coverage ${BAZEL_BUILD_OPTIONS} \ - -c fastbuild --copt=-DNDEBUG --instrumentation_filter=//source/...,//include/... \ - --test_timeout=2000 --cxxopt="-DENVOY_CONFIG_COVERAGE=1" --test_output=errors \ - --test_arg="--log-path /dev/null" --test_arg="-l trace" --test_env=HEAPCHECK= \ - --test_env=ENVOY_IP_TEST_VERSIONS=v4only //test/coverage:coverage_tests +BAZEL_BUILD_OPTIONS+=" --config=test-coverage --test_tag_filters=-nocoverage --test_env=ENVOY_IP_TEST_VERSIONS=v4only" +bazel coverage ${BAZEL_BUILD_OPTIONS} --test_output=all ${COVERAGE_TARGETS} COVERAGE_DIR="${SRCDIR}"/generated/coverage mkdir -p "${COVERAGE_DIR}" -COVERAGE_IGNORE_REGEX="(/external/|pb\.(validate\.)?(h|cc)|/chromium_url/|/test/|/tmp)" -COVERAGE_BINARY="bazel-bin/test/coverage/coverage_tests" COVERAGE_DATA="${COVERAGE_DIR}/coverage.dat" -echo "Merging coverage data..." -llvm-profdata merge -sparse -o ${COVERAGE_DATA} $(find -L bazel-out/k8-fastbuild/testlogs/test/coverage/coverage_tests/ -name coverage.dat) +cp bazel-out/_coverage/_coverage_report.dat "${COVERAGE_DATA}" -echo "Generating report..." -llvm-cov show "${COVERAGE_BINARY}" -instr-profile="${COVERAGE_DATA}" -Xdemangler=c++filt \ - -ignore-filename-regex="${COVERAGE_IGNORE_REGEX}" -output-dir=${COVERAGE_DIR} -format=html -sed -i -e 's|>proc/self/cwd/|>|g' "${COVERAGE_DIR}/index.html" -sed -i -e 's|>bazel-out/[^/]*/bin/\([^/]*\)/[^<]*/_virtual_includes/[^/]*|>\1|g' "${COVERAGE_DIR}/index.html" +COVERAGE_VALUE=$(genhtml --prefix ${PWD} --output "${COVERAGE_DIR}" "${COVERAGE_DATA}" | tee /dev/stderr | grep lines... | cut -d ' ' -f 4) +COVERAGE_VALUE=${COVERAGE_VALUE%?} [[ -z "${ENVOY_COVERAGE_DIR}" ]] || rsync -av "${COVERAGE_DIR}"/ "${ENVOY_COVERAGE_DIR}" if [ "$VALIDATE_COVERAGE" == "true" ] then - COVERAGE_VALUE=$(llvm-cov export "${COVERAGE_BINARY}" -instr-profile="${COVERAGE_DATA}" \ - -ignore-filename-regex="${COVERAGE_IGNORE_REGEX}" -summary-only | \ - python3 -c "import sys, json; print(json.load(sys.stdin)['data'][0]['totals']['lines']['percent'])") COVERAGE_THRESHOLD=98.2 COVERAGE_FAILED=$(echo "${COVERAGE_VALUE}<${COVERAGE_THRESHOLD}" | bc) if test ${COVERAGE_FAILED} -eq 1; then diff --git a/test/server/http_test_server_filter_integration_test.cc b/test/server/http_test_server_filter_integration_test.cc index 768ccf9ae..8d73636f5 100644 --- a/test/server/http_test_server_filter_integration_test.cc +++ b/test/server/http_test_server_filter_integration_test.cc @@ -14,6 +14,7 @@ namespace Nighthawk { using namespace testing; +constexpr absl::string_view kBadJson = "bad_json"; class HttpTestServerIntegrationTestBase : public Envoy::HttpIntegrationTest, public TestWithParam { @@ -40,7 +41,7 @@ class HttpTestServerIntegrationTestBase : public Envoy::HttpIntegrationTest, absl::string_view host, absl::string_view content_type, const std::function& request_header_delegate) { Envoy::Api::ApiPtr api = Envoy::Api::createApiForTest(); - Envoy::Event::DispatcherPtr dispatcher(api->allocateDispatcher()); + Envoy::Event::DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); std::shared_ptr cluster{ new NiceMock()}; Envoy::Upstream::HostDescriptionConstSharedPtr host_description{ @@ -258,6 +259,20 @@ TEST_P(HttpTestServerIntegrationNoConfigTest, TestHeaderConfig) { EXPECT_EQ("", response->body()); } +TEST_P(HttpTestServerIntegrationNoConfigTest, BadTestHeaderConfig) { + Envoy::BufferingStreamDecoderPtr response = makeSingleRequest( + lookupPort("http"), "GET", "/", "", downstream_protocol_, version_, "foo.com", "", + [](Envoy::Http::RequestHeaderMapImpl& request_headers) { + request_headers.addCopy(Nighthawk::Server::TestServer::HeaderNames::get().TestServerConfig, + kBadJson); + }); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("500", response->headers().Status()->value().getStringView()); + EXPECT_EQ("test-server didn't understand the request: Error merging json config: Unable to parse " + "JSON as proto (INVALID_ARGUMENT:Unexpected token.\nbad_json\n^): bad_json", + response->body()); +} + class HttpTestServerDecoderFilterTest : public Test {}; // Here we test config-level merging as well as its application at the response-header level. @@ -314,7 +329,7 @@ TEST_F(HttpTestServerDecoderFilterTest, HeaderMerge) { header_map, Envoy::Http::TestResponseHeaderMapImpl{ {":status", "200"}, {"foo", "bar2"}, {"foo2", "bar3"}})); - EXPECT_FALSE(f.mergeJsonConfig(R"(bad_json)", options, error_message)); + EXPECT_FALSE(f.mergeJsonConfig(kBadJson, options, error_message)); EXPECT_EQ("Error merging json config: Unable to parse JSON as proto (INVALID_ARGUMENT:Unexpected " "token.\nbad_json\n^): bad_json", error_message); diff --git a/test/stream_decoder_test.cc b/test/stream_decoder_test.cc index 8a80e232b..3cba8b2c7 100644 --- a/test/stream_decoder_test.cc +++ b/test/stream_decoder_test.cc @@ -20,15 +20,11 @@ using namespace testing; namespace Nighthawk { namespace Client { -namespace { -static const std::string TEST_TRACER_UID = "f4dca0a9-12c7-4307-8002-969403baf480"; -static const std::string TEST_TRACER_UID_BIT_SET = "f4dca0a9-12c7-b307-8002-969403baf480"; -} // namespace - class StreamDecoderTest : public Test, public StreamDecoderCompletionCallback { public: StreamDecoderTest() - : api_(Envoy::Api::createApiForTest(time_system_)), dispatcher_(api_->allocateDispatcher()), + : api_(Envoy::Api::createApiForTest(time_system_)), + dispatcher_(api_->allocateDispatcher("test_thread")), request_headers_(std::make_shared( std::initializer_list>({{":method", "GET"}}))), http_tracer_(std::make_unique()), @@ -53,7 +49,8 @@ class StreamDecoderTest : public Test, public StreamDecoderCompletionCallback { HeaderMapPtr request_headers_; uint64_t stream_decoder_completion_callbacks_{0}; uint64_t pool_failures_{0}; - Envoy::Tracing::HttpTracerPtr http_tracer_; + Envoy::Runtime::RandomGeneratorImpl random_generator_; + Envoy::Tracing::HttpTracerSharedPtr http_tracer_; Envoy::Http::ResponseHeaderMapPtr test_header_; Envoy::Http::ResponseTrailerMapPtr test_trailer_; }; @@ -63,7 +60,7 @@ TEST_F(StreamDecoderTest, HeaderOnlyTest) { auto decoder = new StreamDecoder( *dispatcher_, time_system_, *this, [&is_complete](bool, bool) { is_complete = true; }, connect_statistic_, latency_statistic_, response_header_size_statistic_, - response_body_size_statistic_, request_headers_, false, 0, TEST_TRACER_UID, http_tracer_); + response_body_size_statistic_, request_headers_, false, 0, random_generator_, http_tracer_); decoder->decodeHeaders(std::move(test_header_), true); EXPECT_TRUE(is_complete); EXPECT_EQ(1, stream_decoder_completion_callbacks_); @@ -74,7 +71,7 @@ TEST_F(StreamDecoderTest, HeaderWithBodyTest) { auto decoder = new StreamDecoder( *dispatcher_, time_system_, *this, [&is_complete](bool, bool) { is_complete = true; }, connect_statistic_, latency_statistic_, response_header_size_statistic_, - response_body_size_statistic_, request_headers_, false, 0, TEST_TRACER_UID, http_tracer_); + response_body_size_statistic_, request_headers_, false, 0, random_generator_, http_tracer_); decoder->decodeHeaders(std::move(test_header_), false); EXPECT_FALSE(is_complete); Envoy::Buffer::OwnedImpl buf(std::string(1, 'a')); @@ -90,7 +87,7 @@ TEST_F(StreamDecoderTest, TrailerTest) { auto decoder = new StreamDecoder( *dispatcher_, time_system_, *this, [&is_complete](bool, bool) { is_complete = true; }, connect_statistic_, latency_statistic_, response_header_size_statistic_, - response_body_size_statistic_, request_headers_, false, 0, TEST_TRACER_UID, http_tracer_); + response_body_size_statistic_, request_headers_, false, 0, random_generator_, http_tracer_); Envoy::Http::ResponseHeaderMapPtr headers{ new Envoy::Http::TestResponseHeaderMapImpl{{":status", "200"}}}; decoder->decodeHeaders(std::move(headers), false); @@ -104,7 +101,7 @@ TEST_F(StreamDecoderTest, LatencyIsNotMeasured) { auto decoder = new StreamDecoder( *dispatcher_, time_system_, *this, [](bool, bool) {}, connect_statistic_, latency_statistic_, response_header_size_statistic_, response_body_size_statistic_, request_headers_, false, 0, - TEST_TRACER_UID, http_tracer_); + random_generator_, http_tracer_); Envoy::Http::MockRequestEncoder stream_encoder; EXPECT_CALL(stream_encoder, getStream()); Envoy::Upstream::HostDescriptionConstSharedPtr ptr; @@ -136,20 +133,16 @@ TEST_F(StreamDecoderTest, LatencyIsMeasured) { auto request_header = std::make_shared( std::initializer_list>( {{":method", "GET"}, {":path", "/"}})); - auto expected_request_header = std::make_shared( - std::initializer_list>( - {{":method", "GET"}, {":path", "/"}, {"x-client-trace-id", TEST_TRACER_UID_BIT_SET}})); auto decoder = new StreamDecoder( *dispatcher_, time_system_, *this, [](bool, bool) {}, connect_statistic_, latency_statistic_, response_header_size_statistic_, response_body_size_statistic_, request_header, true, 0, - TEST_TRACER_UID, http_tracer_); + random_generator_, http_tracer_); Envoy::Http::MockRequestEncoder stream_encoder; EXPECT_CALL(stream_encoder, getStream()); Envoy::Upstream::HostDescriptionConstSharedPtr ptr; NiceMock stream_info; - EXPECT_CALL(stream_encoder, - encodeHeaders(Envoy::HeaderMapEqualRef(expected_request_header.get()), true)); + EXPECT_CALL(stream_encoder, encodeHeaders(_, true)); decoder->onPoolReady(stream_encoder, ptr, stream_info); EXPECT_EQ(1, connect_statistic_.count()); decoder->decodeHeaders(std::move(test_header_), false); @@ -163,7 +156,7 @@ TEST_F(StreamDecoderTest, StreamResetTest) { auto decoder = new StreamDecoder( *dispatcher_, time_system_, *this, [&is_complete](bool, bool) { is_complete = true; }, connect_statistic_, latency_statistic_, response_header_size_statistic_, - response_body_size_statistic_, request_headers_, false, 0, TEST_TRACER_UID, http_tracer_); + response_body_size_statistic_, request_headers_, false, 0, random_generator_, http_tracer_); decoder->decodeHeaders(std::move(test_header_), false); decoder->onResetStream(Envoy::Http::StreamResetReason::LocalReset, "fooreason"); EXPECT_TRUE(is_complete); // these do get reported. @@ -175,7 +168,7 @@ TEST_F(StreamDecoderTest, PoolFailureTest) { auto decoder = new StreamDecoder( *dispatcher_, time_system_, *this, [&is_complete](bool, bool) { is_complete = true; }, connect_statistic_, latency_statistic_, response_header_size_statistic_, - response_body_size_statistic_, request_headers_, false, 0, TEST_TRACER_UID, http_tracer_); + response_body_size_statistic_, request_headers_, false, 0, random_generator_, http_tracer_); Envoy::Upstream::HostDescriptionConstSharedPtr ptr; decoder->onPoolFailure(Envoy::Http::ConnectionPool::PoolFailureReason::Overflow, "fooreason", ptr); diff --git a/test/termination_predicate_test.cc b/test/termination_predicate_test.cc index 687e62f76..79bff05b0 100644 --- a/test/termination_predicate_test.cc +++ b/test/termination_predicate_test.cc @@ -28,10 +28,10 @@ TEST_F(TerminationPredicateTest, DurationTerminationPredicateImplTest) { DurationTerminationPredicateImpl pred(time_system, duration, time_system.monotonicTime()); EXPECT_EQ(pred.evaluate(), TerminationPredicate::Status::PROCEED); // move to the edge. - time_system.sleep(duration); + time_system.advanceTimeWait(duration); EXPECT_EQ(pred.evaluate(), TerminationPredicate::Status::PROCEED); // move past the edge, we expect the predicate to return TERMINATE. - time_system.sleep(1us); + time_system.advanceTimeWait(1us); EXPECT_EQ(pred.evaluate(), TerminationPredicate::Status::TERMINATE); } diff --git a/test/test_data/output_formatter.dotted.gold b/test/test_data/output_formatter.dotted.gold index 43152655a..f3beb91f5 100644 --- a/test/test_data/output_formatter.dotted.gold +++ b/test/test_data/output_formatter.dotted.gold @@ -10,7 +10,7 @@ worker_0..min: 0 worker_0..max: 0 worker_0.foo_size.samples: 4 worker_0.foo_size.mean: 15.5 -worker_0.foo_size.pstdev: 1.11803 +worker_0.foo_size.pstdev: 1.118033988749895 worker_0.foo_size.min: 14 worker_0.foo_size.max: 17 worker_0.foo_size.permilles-0.count: 1 @@ -48,7 +48,7 @@ worker_1..min: 0 worker_1..max: 0 worker_1.foo_size.samples: 4 worker_1.foo_size.mean: 15.5 -worker_1.foo_size.pstdev: 1.11803 +worker_1.foo_size.pstdev: 1.118033988749895 worker_1.foo_size.min: 14 worker_1.foo_size.max: 17 worker_1.foo_size.permilles-0.count: 1 @@ -86,7 +86,7 @@ global..min: 0 global..max: 0 global.foo_size.samples: 4 global.foo_size.mean: 15.5 -global.foo_size.pstdev: 1.11803 +global.foo_size.pstdev: 1.118033988749895 global.foo_size.min: 14 global.foo_size.max: 17 global.foo_size.permilles-0.count: 1 diff --git a/test/test_data/output_formatter.txt.gold b/test/test_data/output_formatter.txt.gold index b9b99f8f9..2929fd19b 100644 --- a/test/test_data/output_formatter.txt.gold +++ b/test/test_data/output_formatter.txt.gold @@ -4,7 +4,7 @@ stat_id (3 samples) min: 0s 001ms 000us | mean: 0s 002ms 000us | max: 0s 003ms 000us | pstdev: 0s 000ms 816us foo_size (4 samples) - min: 14 | mean: 15.5 | max: 17 | pstdev: 1.11803 + min: 14 | mean: 15.5 | max: 17 | pstdev: 1.118033988749895 Percentile Count Value 0.5 2 15 diff --git a/test/utility_test.cc b/test/utility_test.cc index f8058fa58..4d292d136 100644 --- a/test/utility_test.cc +++ b/test/utility_test.cc @@ -93,7 +93,7 @@ class UtilityAddressResolutionTest : public TestWithParamallocateDispatcher(); + auto dispatcher = api->allocateDispatcher("uri_resolution_thread"); auto u = UriImpl(uri); return u.resolve(*dispatcher, address_family); } @@ -142,14 +142,14 @@ TEST_P(UtilityAddressResolutionTest, ResolveTwiceReturnsCached) { : Envoy::Network::DnsLookupFamily::V4Only; Envoy::Api::ApiPtr api = Envoy::Api::createApiForTest(); - auto dispatcher = api->allocateDispatcher(); + auto dispatcher = api->allocateDispatcher("test_thread"); auto u = UriImpl("localhost"); EXPECT_EQ(u.resolve(*dispatcher, address_family).get(), u.resolve(*dispatcher, address_family).get()); } -TEST_F(UtilityTest, translateAddressFamilyGoodValues) { +TEST_F(UtilityTest, TranslateAddressFamilyGoodValues) { EXPECT_EQ(Envoy::Network::DnsLookupFamily::V6Only, Utility::translateFamilyOptionString( nighthawk::client::AddressFamily_AddressFamilyOptions_V6)); @@ -161,11 +161,11 @@ TEST_F(UtilityTest, translateAddressFamilyGoodValues) { nighthawk::client::AddressFamily_AddressFamilyOptions_AUTO)); } -TEST_F(UtilityTest, mapCountersFromStore) { +TEST_F(UtilityTest, MapCountersFromStore) { Envoy::Stats::IsolatedStoreImpl store; - store.counter("foo").inc(); - store.counter("worker.2.bar").inc(); - store.counter("worker.1.bar").inc(); + store.counterFromString("foo").inc(); + store.counterFromString("worker.2.bar").inc(); + store.counterFromString("worker.1.bar").inc(); uint64_t filter_delegate_hit_count = 0; const auto& counters = Utility().mapCountersFromStore( store, [&filter_delegate_hit_count](absl::string_view name, uint64_t value) { diff --git a/test/worker_test.cc b/test/worker_test.cc index 8cdad7aff..30263f0af 100644 --- a/test/worker_test.cc +++ b/test/worker_test.cc @@ -2,7 +2,6 @@ #include "external/envoy/source/common/runtime/runtime_impl.h" #include "external/envoy/source/common/stats/isolated_store_impl.h" -#include "external/envoy/test/mocks/init/mocks.h" #include "external/envoy/test/mocks/local_info/mocks.h" #include "external/envoy/test/mocks/protobuf/mocks.h" #include "external/envoy/test/mocks/thread_local/mocks.h" @@ -39,7 +38,6 @@ class WorkerTest : public Test { Envoy::Stats::IsolatedStoreImpl test_store_; Envoy::Runtime::RandomGeneratorImpl rand_; NiceMock local_info_; - Envoy::Init::MockManager init_manager_; NiceMock validation_visitor_; }; @@ -52,9 +50,9 @@ TEST_F(WorkerTest, WorkerExecutesOnThread) { TestWorker worker(*api_, tls_); NiceMock dispatcher; std::unique_ptr loader = - std::make_unique(Envoy::Runtime::LoaderPtr{ - new Envoy::Runtime::LoaderImpl(dispatcher, tls_, {}, local_info_, init_manager_, - test_store_, rand_, validation_visitor_, *api_)}); + std::make_unique( + Envoy::Runtime::LoaderPtr{new Envoy::Runtime::LoaderImpl( + dispatcher, tls_, {}, local_info_, test_store_, rand_, validation_visitor_, *api_)}); worker.start(); worker.waitForCompletion(); diff --git a/tools/shell_utils.sh b/tools/shell_utils.sh index 8aa65eed8..ab18006a6 100755 --- a/tools/shell_utils.sh +++ b/tools/shell_utils.sh @@ -1,11 +1,25 @@ source_venv() { VENV_DIR=$1 - if [[ "$VIRTUAL_ENV" == "" ]]; then + if [[ "${VIRTUAL_ENV}" == "" ]]; then if [[ ! -d "${VENV_DIR}"/venv ]]; then - virtualenv "${VENV_DIR}"/venv --no-site-packages --python=python2.7 + virtualenv "${VENV_DIR}"/venv --python=python3 fi source "${VENV_DIR}"/venv/bin/activate else echo "Found existing virtualenv" fi } + +python_venv() { + SCRIPT_DIR=$(realpath "$(dirname "$0")") + + BUILD_DIR=build_tools + PY_NAME="$1" + VENV_DIR="${BUILD_DIR}/${PY_NAME}" + + source_venv "${VENV_DIR}" + pip install -r "${SCRIPT_DIR}"/requirements.txt + + shift + python3 "${SCRIPT_DIR}/${PY_NAME}.py" $* +} \ No newline at end of file