From b631a17bfbba1c1b46ac7b3b1bf5adb4b05719d7 Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Mon, 9 Mar 2020 21:50:11 +0100 Subject: [PATCH 01/16] CI/Asan: Perform asan build in multiple steps to reduce memory usage (#315) - Perform the build in incremental steps, to reduce memory footprint of the build. - Additionally, separate test and build, to ensure they won't run concurrently. This helps stabilize the python integration test. These two things combined seem to help stabilize ASAN in CI. I could confirm a couple of consecutive succeeding CI runs: https://app.circleci.com/pipelines/github/envoyproxy/nighthawk?branch=pull%2F315 Associated issue: https://github.com/envoyproxy/nighthawk/issues/272 Leaving that open for now, let's close it later if we're confident the problem is resolved. Signed-off-by: Otto van der Schaaf --- ci/do_ci.sh | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 8de24d3df..6670fb9f5 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -73,6 +73,11 @@ function do_asan() { echo "bazel ASAN/UBSAN debug build with tests" echo "Building and testing envoy tests..." cd "${SRCDIR}" + # We build this in steps to avoid running out of memory in CI + run_bazel build ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-asan -- //source/exe/... + run_bazel build ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-asan -- //source/server/... + run_bazel build ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-asan -- //test/mocks/... + run_bazel build ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-asan -- //test/... run_bazel test ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-asan //test/... } @@ -177,10 +182,6 @@ case "$1" in exit 0 ;; asan) - if [ -n "$CIRCLECI" ]; then - # Decrease parallelism to avoid running out of memory - NUM_CPUS=7 - fi do_asan exit 0 ;; From 5652d351364ce31e819b8824e748b3c1741ed6a1 Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Wed, 11 Mar 2020 02:23:05 +0100 Subject: [PATCH 02/16] Update Envoy to 3a2512d923e2eee1fce9f4f6a23cf93f2e7ed93f (#316) Update to the latest + fixes to unbreak the build Signed-off-by: Otto van der Schaaf --- bazel/repositories.bzl | 4 ++-- source/client/BUILD | 1 + source/client/process_impl.cc | 5 +++-- source/client/process_impl.h | 2 ++ test/output_formatter_test.cc | 2 +- test/utility_test.cc | 4 ++-- 6 files changed, 11 insertions(+), 7 deletions(-) diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index c12bcb84a..5a383cc73 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,7 +1,7 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -ENVOY_COMMIT = "3a2512d923e2eee1fce9f4f6a23cf93f2e7ed93f" # March 1st, 2020 -ENVOY_SHA = "6a643ae3141762c403aa031cc19e65c441759785fd7dda51dcf5ad5f149283d0" +ENVOY_COMMIT = "9be85fbc561c373875a0102dddf9baf5a86543c1" # March 10th, 2020 +ENVOY_SHA = "111e34dd9f97700d6030dbac7fc826cb7c5fe4a1cccef3cc05c83508c84c6692" RULES_PYTHON_COMMIT = "dd7f9c5f01bafbfea08c44092b6b0c8fc8fcb77f" # Feb 22nd, 2020 RULES_PYTHON_SHA = "0aa9ec790a58053e3ab5af397879b267a625955f8297c239b2d8559c6773397b" diff --git a/source/client/BUILD b/source/client/BUILD index 4ae1e0cb8..b3235b9e1 100644 --- a/source/client/BUILD +++ b/source/client/BUILD @@ -52,6 +52,7 @@ envoy_cc_library( "@envoy//source/common/config:utility_lib_with_external_headers", "@envoy//source/common/event:dispatcher_includes_with_external_headers", "@envoy//source/common/event:real_time_system_lib_with_external_headers", + "@envoy//source/common/grpc:context_lib_with_external_headers", "@envoy//source/common/http:context_lib_with_external_headers", "@envoy//source/common/http:header_map_lib_with_external_headers", "@envoy//source/common/http:headers_lib_with_external_headers", diff --git a/source/client/process_impl.cc b/source/client/process_impl.cc index a5da471a3..a4aeb9684 100644 --- a/source/client/process_impl.cc +++ b/source/client/process_impl.cc @@ -108,6 +108,7 @@ ProcessImpl::ProcessImpl(const Options& options, Envoy::Event::TimeSystem& time_ {}, Envoy::Network::Utility::getLocalAddress(Envoy::Network::Address::IpVersion::v4), "nighthawk_service_zone", "nighthawk_service_cluster", "nighthawk_service_node")), secret_manager_(config_tracker_), http_context_(store_root_.symbolTable()), + grpc_context_(store_root_.symbolTable()), singleton_manager_(std::make_unique(api_->threadFactory())), access_log_manager_(std::chrono::milliseconds(1000), *api_, *dispatcher_, access_log_lock_, store_root_), @@ -426,8 +427,8 @@ bool ProcessImpl::run(OutputCollector& collector) { cluster_manager_factory_ = std::make_unique( admin_, Envoy::Runtime::LoaderSingleton::get(), store_root_, tls_, generator_, dispatcher_->createDnsResolver({}, false), *ssl_context_manager_, *dispatcher_, *local_info_, - secret_manager_, validation_context_, *api_, http_context_, access_log_manager_, - *singleton_manager_); + secret_manager_, validation_context_, *api_, http_context_, grpc_context_, + access_log_manager_, *singleton_manager_); cluster_manager_factory_->setConnectionReuseStrategy( options_.h1ConnectionReuseStrategy() == nighthawk::client::H1ConnectionReuseStrategy::LRU ? Http1PoolImpl::ConnectionReuseStrategy::LRU diff --git a/source/client/process_impl.h b/source/client/process_impl.h index dfba53f6a..9d0420990 100644 --- a/source/client/process_impl.h +++ b/source/client/process_impl.h @@ -17,6 +17,7 @@ #include "external/envoy/source/common/access_log/access_log_manager_impl.h" #include "external/envoy/source/common/common/logger.h" #include "external/envoy/source/common/event/real_time_system.h" +#include "external/envoy/source/common/grpc/context_impl.h" #include "external/envoy/source/common/http/context_impl.h" #include "external/envoy/source/common/protobuf/message_validator_impl.h" #include "external/envoy/source/common/secret/secret_manager_impl.h" @@ -114,6 +115,7 @@ class ProcessImpl : public Process, public Envoy::Logger::Loggable Date: Thu, 19 Mar 2020 18:39:47 +0100 Subject: [PATCH 03/16] Update Envoy to c34b82f261be45f60a4df8e40937f4066d20797b (#319) Signed-off-by: Otto van der Schaaf --- .bazelrc | 7 ++++++- bazel/repositories.bzl | 4 ++-- source/client/process_impl.cc | 1 - 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/.bazelrc b/.bazelrc index 256a9081a..2b2fce664 100644 --- a/.bazelrc +++ b/.bazelrc @@ -191,10 +191,15 @@ build:remote-ci --remote_executor=grpcs://remotebuildexecution.googleapis.com # Fuzz builds build:asan-fuzzer --config=clang-asan build:asan-fuzzer --define=FUZZING_ENGINE=libfuzzer -build:asan-fuzzer --copt=-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION build:asan-fuzzer --copt=-fsanitize=fuzzer-no-link +build:asan-fuzzer --copt=-fno-omit-frame-pointer # Remove UBSAN halt_on_error to avoid crashing on protobuf errors. build:asan-fuzzer --test_env=UBSAN_OPTIONS=print_stacktrace=1 +# Fuzzing without ASAN. This is useful for profiling fuzzers without any ASAN artifacts. +build:plain-fuzzer --define=FUZZING_ENGINE=libfuzzer +build:plain-fuzzer --copt=-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION +build:plain-fuzzer --copt=-fsanitize=fuzzer-no-link + try-import %workspace%/clang.bazelrc try-import %workspace%/user.bazelrc diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 5a383cc73..f9ea4bc9f 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,7 +1,7 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -ENVOY_COMMIT = "9be85fbc561c373875a0102dddf9baf5a86543c1" # March 10th, 2020 -ENVOY_SHA = "111e34dd9f97700d6030dbac7fc826cb7c5fe4a1cccef3cc05c83508c84c6692" +ENVOY_COMMIT = "c34b82f261be45f60a4df8e40937f4066d20797b" # March 18th, 2020 +ENVOY_SHA = "e64eeffce4d29cde2ca6b74956c0142cbe3b80259a754810001e19d841b64b99" RULES_PYTHON_COMMIT = "dd7f9c5f01bafbfea08c44092b6b0c8fc8fcb77f" # Feb 22nd, 2020 RULES_PYTHON_SHA = "0aa9ec790a58053e3ab5af397879b267a625955f8297c239b2d8559c6773397b" diff --git a/source/client/process_impl.cc b/source/client/process_impl.cc index a4aeb9684..32adb91af 100644 --- a/source/client/process_impl.cc +++ b/source/client/process_impl.cc @@ -357,7 +357,6 @@ void ProcessImpl::maybeCreateTracingDriver(const envoy::config::trace::v3::Traci Envoy::Runtime::LoaderSingleton::get(), *local_info_, generator_, time_system_); http_tracer_ = std::make_unique(std::move(zipkin_driver), *local_info_); - http_context_.setTracer(*http_tracer_); #else ENVOY_LOG(error, "Not build with any tracing support"); #endif From 0e0ba0bd1af4c8da1df4241916d1fa8437fa69bf Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Fri, 27 Mar 2020 14:30:10 +0100 Subject: [PATCH 04/16] Update Envoy to 631a009406c15c778a5571cde99eb8a3039ba7b1 (#320) Signed-off-by: Otto van der Schaaf --- bazel/repositories.bzl | 4 ++-- include/nighthawk/client/factories.h | 2 +- source/client/benchmark_client_impl.cc | 2 +- source/client/benchmark_client_impl.h | 4 ++-- source/client/client_worker_impl.cc | 2 +- source/client/client_worker_impl.h | 4 ++-- source/client/factories_impl.cc | 5 +++-- source/client/factories_impl.h | 2 +- source/client/process_impl.h | 2 +- source/client/stream_decoder.h | 4 ++-- test/benchmark_http_client_test.cc | 2 +- test/client_worker_test.cc | 2 +- test/factories_test.cc | 2 +- test/mocks/client/mock_benchmark_client_factory.h | 2 +- test/stream_decoder_test.cc | 2 +- 15 files changed, 21 insertions(+), 20 deletions(-) diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index f9ea4bc9f..bc3952b06 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,7 +1,7 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -ENVOY_COMMIT = "c34b82f261be45f60a4df8e40937f4066d20797b" # March 18th, 2020 -ENVOY_SHA = "e64eeffce4d29cde2ca6b74956c0142cbe3b80259a754810001e19d841b64b99" +ENVOY_COMMIT = "631a009406c15c778a5571cde99eb8a3039ba7b1" # March 26th, 2020 +ENVOY_SHA = "926fb5875b88a82b58653b6511129aaa5665ab04e1deb70d3afc9ad99866744d" RULES_PYTHON_COMMIT = "dd7f9c5f01bafbfea08c44092b6b0c8fc8fcb77f" # Feb 22nd, 2020 RULES_PYTHON_SHA = "0aa9ec790a58053e3ab5af397879b267a625955f8297c239b2d8559c6773397b" diff --git a/include/nighthawk/client/factories.h b/include/nighthawk/client/factories.h index d2e6b35a5..56d619029 100644 --- a/include/nighthawk/client/factories.h +++ b/include/nighthawk/client/factories.h @@ -27,7 +27,7 @@ class BenchmarkClientFactory { virtual BenchmarkClientPtr create(Envoy::Api::Api& api, Envoy::Event::Dispatcher& dispatcher, Envoy::Stats::Scope& scope, Envoy::Upstream::ClusterManagerPtr& cluster_manager, - Envoy::Tracing::HttpTracerPtr& http_tracer, + Envoy::Tracing::HttpTracerSharedPtr& http_tracer, absl::string_view cluster_name, RequestSource& request_generator) const PURE; }; diff --git a/source/client/benchmark_client_impl.cc b/source/client/benchmark_client_impl.cc index 8a1bc425f..6b816bc00 100644 --- a/source/client/benchmark_client_impl.cc +++ b/source/client/benchmark_client_impl.cc @@ -95,7 +95,7 @@ BenchmarkClientHttpImpl::BenchmarkClientHttpImpl( StatisticPtr&& connect_statistic, StatisticPtr&& response_statistic, StatisticPtr&& response_header_size_statistic, StatisticPtr&& response_body_size_statistic, bool use_h2, Envoy::Upstream::ClusterManagerPtr& cluster_manager, - Envoy::Tracing::HttpTracerPtr& http_tracer, absl::string_view cluster_name, + Envoy::Tracing::HttpTracerSharedPtr& http_tracer, absl::string_view cluster_name, RequestGenerator request_generator, const bool provide_resource_backpressure) : api_(api), dispatcher_(dispatcher), scope_(scope.createScope("benchmark.")), connect_statistic_(std::move(connect_statistic)), diff --git a/source/client/benchmark_client_impl.h b/source/client/benchmark_client_impl.h index 69c580eb6..2a42aebdf 100644 --- a/source/client/benchmark_client_impl.h +++ b/source/client/benchmark_client_impl.h @@ -120,7 +120,7 @@ class BenchmarkClientHttpImpl : public BenchmarkClient, StatisticPtr&& response_header_size_statistic, StatisticPtr&& response_body_size_statistic, bool use_h2, Envoy::Upstream::ClusterManagerPtr& cluster_manager, - Envoy::Tracing::HttpTracerPtr& http_tracer, + Envoy::Tracing::HttpTracerSharedPtr& http_tracer, absl::string_view cluster_name, RequestGenerator request_generator, const bool provide_resource_backpressure); void setConnectionLimit(uint32_t connection_limit) { connection_limit_ = connection_limit; } @@ -178,7 +178,7 @@ class BenchmarkClientHttpImpl : public BenchmarkClient, bool measure_latencies_{}; BenchmarkClientStats benchmark_client_stats_; Envoy::Upstream::ClusterManagerPtr& cluster_manager_; - Envoy::Tracing::HttpTracerPtr& http_tracer_; + Envoy::Tracing::HttpTracerSharedPtr& http_tracer_; std::string cluster_name_; const RequestGenerator request_generator_; const bool provide_resource_backpressure_; diff --git a/source/client/client_worker_impl.cc b/source/client/client_worker_impl.cc index e1eae9c08..e4231bc87 100644 --- a/source/client/client_worker_impl.cc +++ b/source/client/client_worker_impl.cc @@ -20,7 +20,7 @@ ClientWorkerImpl::ClientWorkerImpl(Envoy::Api::Api& api, Envoy::ThreadLocal::Ins const RequestSourceFactory& request_generator_factory, Envoy::Stats::Store& store, const int worker_number, const Envoy::MonotonicTime starting_time, - Envoy::Tracing::HttpTracerPtr& http_tracer, + Envoy::Tracing::HttpTracerSharedPtr& http_tracer, const HardCodedWarmupStyle hardcoded_warmup_style) : WorkerImpl(api, tls, store), time_source_(std::make_unique(*dispatcher_)), diff --git a/source/client/client_worker_impl.h b/source/client/client_worker_impl.h index ded0a9d9b..05f2fcb35 100644 --- a/source/client/client_worker_impl.h +++ b/source/client/client_worker_impl.h @@ -34,7 +34,7 @@ class ClientWorkerImpl : public WorkerImpl, virtual public ClientWorker { const RequestSourceFactory& request_generator_factory, Envoy::Stats::Store& store, const int worker_number, const Envoy::MonotonicTime starting_time, - Envoy::Tracing::HttpTracerPtr& http_tracer, + Envoy::Tracing::HttpTracerSharedPtr& http_tracer, const HardCodedWarmupStyle hardcoded_warmup_style); StatisticPtrMap statistics() const override; @@ -58,7 +58,7 @@ class ClientWorkerImpl : public WorkerImpl, virtual public ClientWorker { Envoy::Stats::ScopePtr worker_scope_; Envoy::Stats::ScopePtr worker_number_scope_; const int worker_number_; - Envoy::Tracing::HttpTracerPtr& http_tracer_; + Envoy::Tracing::HttpTracerSharedPtr& http_tracer_; RequestSourcePtr request_generator_; BenchmarkClientPtr benchmark_client_; PhasePtr phase_; diff --git a/source/client/factories_impl.cc b/source/client/factories_impl.cc index 2c7ac533b..fe8516cc4 100644 --- a/source/client/factories_impl.cc +++ b/source/client/factories_impl.cc @@ -27,8 +27,9 @@ BenchmarkClientFactoryImpl::BenchmarkClientFactoryImpl(const Options& options) BenchmarkClientPtr BenchmarkClientFactoryImpl::create( Envoy::Api::Api& api, Envoy::Event::Dispatcher& dispatcher, Envoy::Stats::Scope& scope, - Envoy::Upstream::ClusterManagerPtr& cluster_manager, Envoy::Tracing::HttpTracerPtr& http_tracer, - absl::string_view cluster_name, RequestSource& request_generator) const { + Envoy::Upstream::ClusterManagerPtr& cluster_manager, + Envoy::Tracing::HttpTracerSharedPtr& http_tracer, absl::string_view cluster_name, + RequestSource& request_generator) const { StatisticFactoryImpl statistic_factory(options_); // While we lack options to configure which statistic backend goes where, we directly pass // StreamingStatistic for the stats that track response sizes. Ideally we would have options diff --git a/source/client/factories_impl.h b/source/client/factories_impl.h index 3605739f5..928b63f99 100644 --- a/source/client/factories_impl.h +++ b/source/client/factories_impl.h @@ -30,7 +30,7 @@ class BenchmarkClientFactoryImpl : public OptionBasedFactoryImpl, public Benchma BenchmarkClientPtr create(Envoy::Api::Api& api, Envoy::Event::Dispatcher& dispatcher, Envoy::Stats::Scope& scope, Envoy::Upstream::ClusterManagerPtr& cluster_manager, - Envoy::Tracing::HttpTracerPtr& http_tracer, + Envoy::Tracing::HttpTracerSharedPtr& http_tracer, absl::string_view cluster_name, RequestSource& request_generator) const override; }; diff --git a/source/client/process_impl.h b/source/client/process_impl.h index 9d0420990..c5226ef99 100644 --- a/source/client/process_impl.h +++ b/source/client/process_impl.h @@ -127,7 +127,7 @@ class ProcessImpl : public Process, public Envoy::Logger::Loggable runtime_singleton_; Envoy::Init::WatcherImpl init_watcher_; - Tracing::HttpTracerPtr http_tracer_; + Tracing::HttpTracerSharedPtr http_tracer_; Envoy::Server::ValidationAdmin admin_; Envoy::ProtobufMessage::ProdValidationContextImpl validation_context_; bool shutdown_{true}; diff --git a/source/client/stream_decoder.h b/source/client/stream_decoder.h index e0a178227..9608bbc66 100644 --- a/source/client/stream_decoder.h +++ b/source/client/stream_decoder.h @@ -43,7 +43,7 @@ class StreamDecoder : public Envoy::Http::ResponseDecoder, Statistic& latency_statistic, Statistic& response_header_sizes_statistic, Statistic& response_body_sizes_statistic, HeaderMapPtr request_headers, bool measure_latencies, uint32_t request_body_size, std::string x_request_id, - Envoy::Tracing::HttpTracerPtr& http_tracer) + Envoy::Tracing::HttpTracerSharedPtr& http_tracer) : dispatcher_(dispatcher), time_source_(time_source), decoder_completion_callback_(decoder_completion_callback), caller_completion_callback_(std::move(caller_completion_callback)), @@ -110,7 +110,7 @@ class StreamDecoder : public Envoy::Http::ResponseDecoder, const uint32_t request_body_size_; Envoy::Tracing::EgressConfigImpl config_; Envoy::StreamInfo::StreamInfoImpl stream_info_; - Envoy::Tracing::HttpTracerPtr& http_tracer_; + Envoy::Tracing::HttpTracerSharedPtr& http_tracer_; Envoy::Tracing::SpanPtr active_span_; Envoy::StreamInfo::UpstreamTiming upstream_timing_; }; diff --git a/test/benchmark_http_client_test.cc b/test/benchmark_http_client_test.cc index d7b7073c7..ee70c66f2 100644 --- a/test/benchmark_http_client_test.cc +++ b/test/benchmark_http_client_test.cc @@ -154,7 +154,7 @@ class BenchmarkClientHttpTest : public Test { NiceMock stream_encoder_; Envoy::Upstream::MockThreadLocalCluster thread_local_cluster_; Envoy::Upstream::ClusterInfoConstSharedPtr cluster_info_; - Envoy::Tracing::HttpTracerPtr http_tracer_; + Envoy::Tracing::HttpTracerSharedPtr http_tracer_; std::string response_code_; RequestGenerator request_generator_; }; diff --git a/test/client_worker_test.cc b/test/client_worker_test.cc index 6867ceb82..21240f234 100644 --- a/test/client_worker_test.cc +++ b/test/client_worker_test.cc @@ -102,7 +102,7 @@ class ClientWorkerTest : public Test { Envoy::Init::MockManager init_manager_; NiceMock validation_visitor_; Envoy::Upstream::ClusterManagerPtr cluster_manager_ptr_; - Envoy::Tracing::HttpTracerPtr http_tracer_; + Envoy::Tracing::HttpTracerSharedPtr http_tracer_; }; TEST_F(ClientWorkerTest, BasicTest) { diff --git a/test/factories_test.cc b/test/factories_test.cc index 966e472f5..2f82f96e5 100644 --- a/test/factories_test.cc +++ b/test/factories_test.cc @@ -29,7 +29,7 @@ class FactoriesTest : public Test { Envoy::Stats::MockIsolatedStatsStore stats_store_; Envoy::Event::MockDispatcher dispatcher_; MockOptions options_; - Envoy::Tracing::HttpTracerPtr http_tracer_; + Envoy::Tracing::HttpTracerSharedPtr http_tracer_; }; TEST_F(FactoriesTest, CreateBenchmarkClient) { diff --git a/test/mocks/client/mock_benchmark_client_factory.h b/test/mocks/client/mock_benchmark_client_factory.h index b3b69b708..d67db595c 100644 --- a/test/mocks/client/mock_benchmark_client_factory.h +++ b/test/mocks/client/mock_benchmark_client_factory.h @@ -17,7 +17,7 @@ class MockBenchmarkClientFactory : public BenchmarkClientFactory { MOCK_CONST_METHOD7(create, BenchmarkClientPtr(Envoy::Api::Api&, Envoy::Event::Dispatcher&, Envoy::Stats::Scope&, Envoy::Upstream::ClusterManagerPtr&, - Envoy::Tracing::HttpTracerPtr&, absl::string_view, + Envoy::Tracing::HttpTracerSharedPtr&, absl::string_view, RequestSource& request_generator)); }; diff --git a/test/stream_decoder_test.cc b/test/stream_decoder_test.cc index 8a80e232b..b1703d01d 100644 --- a/test/stream_decoder_test.cc +++ b/test/stream_decoder_test.cc @@ -53,7 +53,7 @@ class StreamDecoderTest : public Test, public StreamDecoderCompletionCallback { HeaderMapPtr request_headers_; uint64_t stream_decoder_completion_callbacks_{0}; uint64_t pool_failures_{0}; - Envoy::Tracing::HttpTracerPtr http_tracer_; + Envoy::Tracing::HttpTracerSharedPtr http_tracer_; Envoy::Http::ResponseHeaderMapPtr test_header_; Envoy::Http::ResponseTrailerMapPtr test_trailer_; }; From adc3df56c8a4f2e93317fb42edb0b2631eb38d96 Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Fri, 10 Apr 2020 15:19:32 +0200 Subject: [PATCH 05/16] Update Envoy to f84440dc4f95890f14e2e0686b07258f030b54b3 (#321) - Trace id generation broke. This was amended by directly using the new request id uid implementation. It would be nice to leverage the new request id extension capability later for this. - Amended calls to get counters by name - Synced .bazelrc - Histogram value formatting subtly changed in some cases, probably caused by an update to Envoy's dependency on `libfmt`. Introduce an explicit formatting specifier in the affected output formatting code to address that. - Tweaks for resolving new OOM issues with ASAN/TSAN in CI that started with the updated Envoy revision. This affected both the build process as well as the testing process itself. Notably, the python-based integration tests would get OOM-killed in asan runs. Signed-off-by: Otto van der Schaaf --- .bazelrc | 13 ++++++++++--- .bazelversion | 1 + bazel/repositories.bzl | 4 ++-- ci/do_ci.sh | 13 ++++++++----- source/client/BUILD | 2 +- source/client/benchmark_client_impl.cc | 4 +--- source/client/factories_impl.cc | 2 +- source/client/output_formatter_impl.cc | 8 +++++--- source/client/stream_decoder.cc | 9 +++++---- source/client/stream_decoder.h | 11 ++++++----- test/benchmark_http_client_test.cc | 2 +- test/integration/integration_test.py | 4 +++- test/stream_decoder_test.cc | 26 +++++++++----------------- test/utility_test.cc | 6 +++--- 14 files changed, 56 insertions(+), 49 deletions(-) create mode 100644 .bazelversion diff --git a/.bazelrc b/.bazelrc index 2b2fce664..7f83d8173 100644 --- a/.bazelrc +++ b/.bazelrc @@ -16,7 +16,7 @@ startup --host_jvm_args=-Xmx2g build --workspace_status_command=bazel/get_workspace_status build --experimental_local_memory_estimate build --experimental_strict_action_env=true -build --host_force_python=PY2 +build --host_force_python=PY3 build --action_env=BAZEL_LINKLIBS=-l%:libstdc++.a build --action_env=BAZEL_LINKOPTS=-lm build --host_javabase=@bazel_tools//tools/jdk:remote_jdk11 @@ -162,7 +162,7 @@ build:remote-msan --config=rbe-toolchain-msan # Docker sandbox # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L7 -build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu@sha256:3788a87461f2b3dc8048ad0ce5df40438a56e0a8f1a4ab0f61b4ef0d8c11ff1f +build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu@sha256:ebf534b8aa505e8ff5663a31eed782942a742ae4d656b54f4236b00399f17911 build:docker-sandbox --spawn_strategy=docker build:docker-sandbox --strategy=Javac=docker build:docker-sandbox --strategy=Closure=docker @@ -201,5 +201,12 @@ build:plain-fuzzer --define=FUZZING_ENGINE=libfuzzer build:plain-fuzzer --copt=-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION build:plain-fuzzer --copt=-fsanitize=fuzzer-no-link +# Compile database generation config +# We don't care about built binaries so always strip and use fastbuild. +build:compdb -c fastbuild +build:compdb --strip=always +build:compdb --build_tag_filters=-nocompdb +build:compdb --define=ENVOY_CONFIG_COMPILATION_DATABASE=1 + try-import %workspace%/clang.bazelrc -try-import %workspace%/user.bazelrc +try-import %workspace%/user.bazelrc \ No newline at end of file diff --git a/.bazelversion b/.bazelversion new file mode 100644 index 000000000..ccbccc3dc --- /dev/null +++ b/.bazelversion @@ -0,0 +1 @@ +2.2.0 diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index bc3952b06..320abc2b8 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,7 +1,7 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -ENVOY_COMMIT = "631a009406c15c778a5571cde99eb8a3039ba7b1" # March 26th, 2020 -ENVOY_SHA = "926fb5875b88a82b58653b6511129aaa5665ab04e1deb70d3afc9ad99866744d" +ENVOY_COMMIT = "f84440dc4f95890f14e2e0686b07258f030b54b3" # April 8th, 2020 +ENVOY_SHA = "200477ab552bcaf08415836a67b7e4de5727db42b5a967a3c4232f519fd03cf2" RULES_PYTHON_COMMIT = "dd7f9c5f01bafbfea08c44092b6b0c8fc8fcb77f" # Feb 22nd, 2020 RULES_PYTHON_SHA = "0aa9ec790a58053e3ab5af397879b267a625955f8297c239b2d8559c6773397b" diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 6670fb9f5..2d94cb1ae 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -73,18 +73,21 @@ function do_asan() { echo "bazel ASAN/UBSAN debug build with tests" echo "Building and testing envoy tests..." cd "${SRCDIR}" + [ -z "$CIRCLECI" ] || export BAZEL_BUILD_OPTIONS="${BAZEL_TEST_OPTIONS} --jobs=6 --local_ram_resources=12288" + # We build this in steps to avoid running out of memory in CI - run_bazel build ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-asan -- //source/exe/... - run_bazel build ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-asan -- //source/server/... - run_bazel build ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-asan -- //test/mocks/... - run_bazel build ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-asan -- //test/... - run_bazel test ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-asan //test/... + run_bazel build ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-asan -- //source/exe/... && \ + run_bazel build ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-asan -- //source/server/... && \ + run_bazel build ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-asan -- //test/mocks/... && \ + run_bazel build ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-asan -- //test/... && \ + run_bazel test ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-asan -- //test/... } function do_tsan() { echo "bazel TSAN debug build with tests" echo "Building and testing envoy tests..." cd "${SRCDIR}" + [ -z "$CIRCLECI" ] || export BAZEL_BUILD_OPTIONS="${BAZEL_TEST_OPTIONS} --local_ram_resources=12288" run_bazel test ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-tsan //test/... } diff --git a/source/client/BUILD b/source/client/BUILD index b3235b9e1..3d65f3640 100644 --- a/source/client/BUILD +++ b/source/client/BUILD @@ -66,7 +66,6 @@ envoy_cc_library( "@envoy//source/common/protobuf:message_validator_lib_with_external_headers", "@envoy//source/common/protobuf:utility_lib_with_external_headers", "@envoy//source/common/runtime:runtime_lib_with_external_headers", - "@envoy//source/common/runtime:uuid_util_lib_with_external_headers", "@envoy//source/common/secret:secret_manager_impl_lib_with_external_headers", "@envoy//source/common/singleton:manager_impl_lib_with_external_headers", "@envoy//source/common/stats:allocator_lib_with_external_headers", @@ -80,6 +79,7 @@ envoy_cc_library( "@envoy//source/exe:platform_header_lib_with_external_headers", "@envoy//source/exe:platform_impl_lib", "@envoy//source/exe:process_wide_lib_with_external_headers", + "@envoy//source/common/http:request_id_extension_lib_with_external_headers", "@envoy//source/extensions/tracers:well_known_names_with_external_headers", "@envoy//source/extensions/transport_sockets:well_known_names_with_external_headers", "@envoy//source/extensions/transport_sockets/tls:context_lib_with_external_headers", diff --git a/source/client/benchmark_client_impl.cc b/source/client/benchmark_client_impl.cc index 6b816bc00..99b5a3a65 100644 --- a/source/client/benchmark_client_impl.cc +++ b/source/client/benchmark_client_impl.cc @@ -9,7 +9,6 @@ #include "external/envoy/source/common/http/headers.h" #include "external/envoy/source/common/http/utility.h" #include "external/envoy/source/common/network/utility.h" -#include "external/envoy/source/common/runtime/uuid_util.h" #include "client/stream_decoder.h" @@ -161,12 +160,11 @@ bool BenchmarkClientHttpImpl::tryStartRequest(CompletionCallback caller_completi } } - std::string x_request_id = generator_.uuid(); auto stream_decoder = new StreamDecoder( dispatcher_, api_.timeSource(), *this, std::move(caller_completion_callback), *connect_statistic_, *response_statistic_, *response_header_size_statistic_, *response_body_size_statistic_, request->header(), shouldMeasureLatencies(), content_length, - x_request_id, http_tracer_); + generator_, http_tracer_); requests_initiated_++; pool_ptr->newStream(*stream_decoder, *stream_decoder); return true; diff --git a/source/client/factories_impl.cc b/source/client/factories_impl.cc index fe8516cc4..e65111f01 100644 --- a/source/client/factories_impl.cc +++ b/source/client/factories_impl.cc @@ -196,7 +196,7 @@ TerminationPredicate* TerminationPredicateFactoryImpl::linkConfiguredPredicates( predicate.first, predicate.second); current_predicate = ¤t_predicate->link( std::make_unique( - scope.counter(predicate.first), predicate.second, termination_status)); + scope.counterFromString(predicate.first), predicate.second, termination_status)); } return current_predicate; } diff --git a/source/client/output_formatter_impl.cc b/source/client/output_formatter_impl.cc index e5bc44cdc..c438179a3 100644 --- a/source/client/output_formatter_impl.cc +++ b/source/client/output_formatter_impl.cc @@ -91,8 +91,9 @@ std::string ConsoleOutputFormatterImpl::formatProto(const nighthawk::client::Out header_written = true; } ss << fmt::format(" {:<{}}{:<{}}{:<{}}", p, 12, percentile.count(), 12, - percentile.has_duration() ? formatProtoDuration(percentile.duration()) - : fmt::format("{}", percentile.raw_value()), + percentile.has_duration() + ? formatProtoDuration(percentile.duration()) + : fmt::format("{}", static_cast(percentile.raw_value())), 15) << std::endl; } @@ -189,7 +190,8 @@ DottedStringOutputFormatterImpl::formatProto(const nighthawk::client::Output& ou "{}.microseconds: {}", percentile_prefix, Envoy::Protobuf::util::TimeUtil::DurationToMicroseconds(percentile.duration())); } else { - ss << fmt::format("{}.value: {}", percentile_prefix, percentile.raw_value()); + ss << fmt::format("{}.value: {}", percentile_prefix, + static_cast(percentile.raw_value())); } ss << std::endl; }); diff --git a/source/client/stream_decoder.cc b/source/client/stream_decoder.cc index b9220a0c3..afdfd367d 100644 --- a/source/client/stream_decoder.cc +++ b/source/client/stream_decoder.cc @@ -3,6 +3,7 @@ #include #include "external/envoy/source/common/http/http1/codec_impl.h" +#include "external/envoy/source/common/http/request_id_extension_uuid_impl.h" #include "external/envoy/source/common/http/utility.h" #include "external/envoy/source/common/network/address_impl.h" #include "external/envoy/source/common/stream_info/stream_info_impl.h" @@ -134,13 +135,13 @@ void StreamDecoder::finalizeActiveSpan() { } } -void StreamDecoder::setupForTracing(std::string& x_request_id) { +void StreamDecoder::setupForTracing() { auto headers_copy = std::make_unique(); Envoy::Http::HeaderMapImpl::copyFrom(*headers_copy, *request_headers_); Envoy::Tracing::Decision tracing_decision = {Envoy::Tracing::Reason::ClientForced, true}; - RELEASE_ASSERT(Envoy::UuidUtils::setTraceableUuid(x_request_id, Envoy::UuidTraceStatus::Client), - "setTraceableUuid failed"); - headers_copy->setClientTraceId(x_request_id); + Envoy::Http::UUIDRequestIDExtension uuid_generator(random_generator_); + uuid_generator.set(*headers_copy, true); + uuid_generator.setTraceStatus(*headers_copy, Envoy::Http::TraceStatus::Client); active_span_ = http_tracer_->startSpan(config_, *headers_copy, stream_info_, tracing_decision); active_span_->injectContext(*headers_copy); request_headers_.reset(headers_copy.release()); diff --git a/source/client/stream_decoder.h b/source/client/stream_decoder.h index 9608bbc66..869a8d57e 100644 --- a/source/client/stream_decoder.h +++ b/source/client/stream_decoder.h @@ -13,7 +13,6 @@ #include "nighthawk/common/statistic.h" #include "external/envoy/source/common/http/header_map_impl.h" -#include "external/envoy/source/common/runtime/uuid_util.h" #include "external/envoy/source/common/stream_info/stream_info_impl.h" #include "external/envoy/source/common/tracing/http_tracer_impl.h" @@ -42,7 +41,8 @@ class StreamDecoder : public Envoy::Http::ResponseDecoder, OperationCallback caller_completion_callback, Statistic& connect_statistic, Statistic& latency_statistic, Statistic& response_header_sizes_statistic, Statistic& response_body_sizes_statistic, HeaderMapPtr request_headers, - bool measure_latencies, uint32_t request_body_size, std::string x_request_id, + bool measure_latencies, uint32_t request_body_size, + Envoy::Runtime::RandomGenerator& random_generator, Envoy::Tracing::HttpTracerSharedPtr& http_tracer) : dispatcher_(dispatcher), time_source_(time_source), decoder_completion_callback_(decoder_completion_callback), @@ -53,9 +53,9 @@ class StreamDecoder : public Envoy::Http::ResponseDecoder, request_headers_(std::move(request_headers)), connect_start_(time_source_.monotonicTime()), complete_(false), measure_latencies_(measure_latencies), request_body_size_(request_body_size), stream_info_(time_source_), - http_tracer_(http_tracer) { + random_generator_(random_generator), http_tracer_(http_tracer) { if (measure_latencies_ && http_tracer_ != nullptr) { - setupForTracing(x_request_id); + setupForTracing(); } } @@ -83,7 +83,7 @@ class StreamDecoder : public Envoy::Http::ResponseDecoder, static Envoy::StreamInfo::ResponseFlag streamResetReasonToResponseFlag(Envoy::Http::StreamResetReason reset_reason); void finalizeActiveSpan(); - void setupForTracing(std::string& x_request_id); + void setupForTracing(); private: void onComplete(bool success); @@ -110,6 +110,7 @@ class StreamDecoder : public Envoy::Http::ResponseDecoder, const uint32_t request_body_size_; Envoy::Tracing::EgressConfigImpl config_; Envoy::StreamInfo::StreamInfoImpl stream_info_; + Envoy::Runtime::RandomGenerator& random_generator_; Envoy::Tracing::HttpTracerSharedPtr& http_tracer_; Envoy::Tracing::SpanPtr active_span_; Envoy::StreamInfo::UpstreamTiming upstream_timing_; diff --git a/test/benchmark_http_client_test.cc b/test/benchmark_http_client_test.cc index ee70c66f2..8800588f6 100644 --- a/test/benchmark_http_client_test.cc +++ b/test/benchmark_http_client_test.cc @@ -128,7 +128,7 @@ class BenchmarkClientHttpTest : public Test { } uint64_t getCounter(absl::string_view name) { - return client_->scope().counter(std::string(name)).value(); + return client_->scope().counterFromString(std::string(name)).value(); } Envoy::Upstream::MockClusterManager& cluster_manager() { diff --git a/test/integration/integration_test.py b/test/integration/integration_test.py index 915b3cc6d..2309521a7 100644 --- a/test/integration/integration_test.py +++ b/test/integration/integration_test.py @@ -7,6 +7,8 @@ import sys import pytest +from utility import isSanitizerRun + if __name__ == '__main__': path = os.path.dirname(os.path.realpath(__file__)) test_selection_arg = sys.argv[1] if len(sys.argv) > 1 else "" @@ -22,7 +24,7 @@ "-x", path, "-n", - "20" # Number of tests to run in parallel + "2" if isSanitizerRun() else "20" # Number of tests to run in parallel ], plugins=["xdist"]) exit(r) diff --git a/test/stream_decoder_test.cc b/test/stream_decoder_test.cc index b1703d01d..dd33cd053 100644 --- a/test/stream_decoder_test.cc +++ b/test/stream_decoder_test.cc @@ -20,11 +20,6 @@ using namespace testing; namespace Nighthawk { namespace Client { -namespace { -static const std::string TEST_TRACER_UID = "f4dca0a9-12c7-4307-8002-969403baf480"; -static const std::string TEST_TRACER_UID_BIT_SET = "f4dca0a9-12c7-b307-8002-969403baf480"; -} // namespace - class StreamDecoderTest : public Test, public StreamDecoderCompletionCallback { public: StreamDecoderTest() @@ -53,6 +48,7 @@ class StreamDecoderTest : public Test, public StreamDecoderCompletionCallback { HeaderMapPtr request_headers_; uint64_t stream_decoder_completion_callbacks_{0}; uint64_t pool_failures_{0}; + Envoy::Runtime::RandomGeneratorImpl random_generator_; Envoy::Tracing::HttpTracerSharedPtr http_tracer_; Envoy::Http::ResponseHeaderMapPtr test_header_; Envoy::Http::ResponseTrailerMapPtr test_trailer_; @@ -63,7 +59,7 @@ TEST_F(StreamDecoderTest, HeaderOnlyTest) { auto decoder = new StreamDecoder( *dispatcher_, time_system_, *this, [&is_complete](bool, bool) { is_complete = true; }, connect_statistic_, latency_statistic_, response_header_size_statistic_, - response_body_size_statistic_, request_headers_, false, 0, TEST_TRACER_UID, http_tracer_); + response_body_size_statistic_, request_headers_, false, 0, random_generator_, http_tracer_); decoder->decodeHeaders(std::move(test_header_), true); EXPECT_TRUE(is_complete); EXPECT_EQ(1, stream_decoder_completion_callbacks_); @@ -74,7 +70,7 @@ TEST_F(StreamDecoderTest, HeaderWithBodyTest) { auto decoder = new StreamDecoder( *dispatcher_, time_system_, *this, [&is_complete](bool, bool) { is_complete = true; }, connect_statistic_, latency_statistic_, response_header_size_statistic_, - response_body_size_statistic_, request_headers_, false, 0, TEST_TRACER_UID, http_tracer_); + response_body_size_statistic_, request_headers_, false, 0, random_generator_, http_tracer_); decoder->decodeHeaders(std::move(test_header_), false); EXPECT_FALSE(is_complete); Envoy::Buffer::OwnedImpl buf(std::string(1, 'a')); @@ -90,7 +86,7 @@ TEST_F(StreamDecoderTest, TrailerTest) { auto decoder = new StreamDecoder( *dispatcher_, time_system_, *this, [&is_complete](bool, bool) { is_complete = true; }, connect_statistic_, latency_statistic_, response_header_size_statistic_, - response_body_size_statistic_, request_headers_, false, 0, TEST_TRACER_UID, http_tracer_); + response_body_size_statistic_, request_headers_, false, 0, random_generator_, http_tracer_); Envoy::Http::ResponseHeaderMapPtr headers{ new Envoy::Http::TestResponseHeaderMapImpl{{":status", "200"}}}; decoder->decodeHeaders(std::move(headers), false); @@ -104,7 +100,7 @@ TEST_F(StreamDecoderTest, LatencyIsNotMeasured) { auto decoder = new StreamDecoder( *dispatcher_, time_system_, *this, [](bool, bool) {}, connect_statistic_, latency_statistic_, response_header_size_statistic_, response_body_size_statistic_, request_headers_, false, 0, - TEST_TRACER_UID, http_tracer_); + random_generator_, http_tracer_); Envoy::Http::MockRequestEncoder stream_encoder; EXPECT_CALL(stream_encoder, getStream()); Envoy::Upstream::HostDescriptionConstSharedPtr ptr; @@ -136,20 +132,16 @@ TEST_F(StreamDecoderTest, LatencyIsMeasured) { auto request_header = std::make_shared( std::initializer_list>( {{":method", "GET"}, {":path", "/"}})); - auto expected_request_header = std::make_shared( - std::initializer_list>( - {{":method", "GET"}, {":path", "/"}, {"x-client-trace-id", TEST_TRACER_UID_BIT_SET}})); auto decoder = new StreamDecoder( *dispatcher_, time_system_, *this, [](bool, bool) {}, connect_statistic_, latency_statistic_, response_header_size_statistic_, response_body_size_statistic_, request_header, true, 0, - TEST_TRACER_UID, http_tracer_); + random_generator_, http_tracer_); Envoy::Http::MockRequestEncoder stream_encoder; EXPECT_CALL(stream_encoder, getStream()); Envoy::Upstream::HostDescriptionConstSharedPtr ptr; NiceMock stream_info; - EXPECT_CALL(stream_encoder, - encodeHeaders(Envoy::HeaderMapEqualRef(expected_request_header.get()), true)); + EXPECT_CALL(stream_encoder, encodeHeaders(_, true)); decoder->onPoolReady(stream_encoder, ptr, stream_info); EXPECT_EQ(1, connect_statistic_.count()); decoder->decodeHeaders(std::move(test_header_), false); @@ -163,7 +155,7 @@ TEST_F(StreamDecoderTest, StreamResetTest) { auto decoder = new StreamDecoder( *dispatcher_, time_system_, *this, [&is_complete](bool, bool) { is_complete = true; }, connect_statistic_, latency_statistic_, response_header_size_statistic_, - response_body_size_statistic_, request_headers_, false, 0, TEST_TRACER_UID, http_tracer_); + response_body_size_statistic_, request_headers_, false, 0, random_generator_, http_tracer_); decoder->decodeHeaders(std::move(test_header_), false); decoder->onResetStream(Envoy::Http::StreamResetReason::LocalReset, "fooreason"); EXPECT_TRUE(is_complete); // these do get reported. @@ -175,7 +167,7 @@ TEST_F(StreamDecoderTest, PoolFailureTest) { auto decoder = new StreamDecoder( *dispatcher_, time_system_, *this, [&is_complete](bool, bool) { is_complete = true; }, connect_statistic_, latency_statistic_, response_header_size_statistic_, - response_body_size_statistic_, request_headers_, false, 0, TEST_TRACER_UID, http_tracer_); + response_body_size_statistic_, request_headers_, false, 0, random_generator_, http_tracer_); Envoy::Upstream::HostDescriptionConstSharedPtr ptr; decoder->onPoolFailure(Envoy::Http::ConnectionPool::PoolFailureReason::Overflow, "fooreason", ptr); diff --git a/test/utility_test.cc b/test/utility_test.cc index 7246c6191..7ec2fb414 100644 --- a/test/utility_test.cc +++ b/test/utility_test.cc @@ -163,9 +163,9 @@ TEST_F(UtilityTest, TranslateAddressFamilyGoodValues) { TEST_F(UtilityTest, MapCountersFromStore) { Envoy::Stats::IsolatedStoreImpl store; - store.counter("foo").inc(); - store.counter("worker.2.bar").inc(); - store.counter("worker.1.bar").inc(); + store.counterFromString("foo").inc(); + store.counterFromString("worker.2.bar").inc(); + store.counterFromString("worker.1.bar").inc(); uint64_t filter_delegate_hit_count = 0; const auto& counters = Utility().mapCountersFromStore( store, [&filter_delegate_hit_count](absl::string_view name, uint64_t value) { From 59683b759eb8f8bd8cce282795c08f9e2b3313d4 Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Sat, 11 Apr 2020 14:14:02 +0200 Subject: [PATCH 06/16] Update Envoy to 8e8209fa75f87ab53d4c78a466c8f927df930e50 (#322) Make unit tests to use advanceTimeWait in favor of sleep (see https://github.com/envoyproxy/envoy/pull/10551) Signed-off-by: Otto van der Schaaf --- bazel/repositories.bzl | 4 ++-- ci/do_ci.sh | 2 +- test/rate_limiter_test.cc | 24 ++++++++++++------------ test/termination_predicate_test.cc | 4 ++-- 4 files changed, 17 insertions(+), 17 deletions(-) diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 320abc2b8..b198504e9 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,7 +1,7 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -ENVOY_COMMIT = "f84440dc4f95890f14e2e0686b07258f030b54b3" # April 8th, 2020 -ENVOY_SHA = "200477ab552bcaf08415836a67b7e4de5727db42b5a967a3c4232f519fd03cf2" +ENVOY_COMMIT = "8e8209fa75f87ab53d4c78a466c8f927df930e50" # April 10th, 2020 +ENVOY_SHA = "ef9661e7c0c446e4bf600adb9328f5e5e3088cb897380c7e991b7f743f811a10" RULES_PYTHON_COMMIT = "dd7f9c5f01bafbfea08c44092b6b0c8fc8fcb77f" # Feb 22nd, 2020 RULES_PYTHON_SHA = "0aa9ec790a58053e3ab5af397879b267a625955f8297c239b2d8559c6773397b" diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 2d94cb1ae..5a8fb038e 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -73,7 +73,7 @@ function do_asan() { echo "bazel ASAN/UBSAN debug build with tests" echo "Building and testing envoy tests..." cd "${SRCDIR}" - [ -z "$CIRCLECI" ] || export BAZEL_BUILD_OPTIONS="${BAZEL_TEST_OPTIONS} --jobs=6 --local_ram_resources=12288" + [ -z "$CIRCLECI" ] || export BAZEL_BUILD_OPTIONS="${BAZEL_TEST_OPTIONS} --jobs=4 --local_ram_resources=12288" # We build this in steps to avoid running out of memory in CI run_bazel build ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-asan -- //source/exe/... && \ diff --git a/test/rate_limiter_test.cc b/test/rate_limiter_test.cc index 99f659da1..9da8424cc 100644 --- a/test/rate_limiter_test.cc +++ b/test/rate_limiter_test.cc @@ -25,11 +25,11 @@ TEST_F(RateLimiterTest, LinearRateLimiterTest) { EXPECT_FALSE(rate_limiter.tryAcquireOne()); - time_system.sleep(100ms); + time_system.advanceTimeWait(100ms); EXPECT_TRUE(rate_limiter.tryAcquireOne()); EXPECT_FALSE(rate_limiter.tryAcquireOne()); - time_system.sleep(1s); + time_system.advanceTimeWait(1s); for (int i = 0; i < 10; i++) { EXPECT_TRUE(rate_limiter.tryAcquireOne()); } @@ -91,13 +91,13 @@ TEST_F(RateLimiterTest, ScheduledStartingRateLimiterTest) { .WillRepeatedly(Return(true)); if (starting_late) { - time_system.sleep(schedule_delay); + time_system.advanceTimeWait(schedule_delay); } // We should expect zero releases until it is time to start. while (time_system.monotonicTime() < scheduled_starting_time) { EXPECT_FALSE(rate_limiter->tryAcquireOne()); - time_system.sleep(1ms); + time_system.advanceTimeWait(1ms); } // Now that is time to start, the rate limiter should propagate to the mock rate limiter. @@ -140,7 +140,7 @@ class BurstingRateLimiterIntegrationTest : public Test { EXPECT_EQ(burst_acquired, burst_size); EXPECT_EQ(i % (burst_interval_ms.count() - first_burst), 0); } - time_system.sleep(1ms); + time_system.advanceTimeWait(1ms); } } }; @@ -213,16 +213,16 @@ TEST_F(RateLimiterTest, DistributionSamplingRateLimiterImplSchedulingTest) { // The distribution first yields a 1 ns offset. So we don't expect to be green lighted. EXPECT_FALSE(rate_limiter->tryAcquireOne()); - time_system.sleep(1ns); + time_system.advanceTimeWait(1ns); EXPECT_TRUE(rate_limiter->tryAcquireOne()); // We expect releaseOne to be propagated. rate_limiter->releaseOne(); // The distribution will yield an offset of 0ns, we expect success. EXPECT_TRUE(rate_limiter->tryAcquireOne()); - // We don't sleep, and the distribution will yield a 1ns offset. No green light. + // We don't advanceTimeWait, and the distribution will yield a 1ns offset. No green light. EXPECT_FALSE(rate_limiter->tryAcquireOne()); - time_system.sleep(1ns); + time_system.advanceTimeWait(1ns); EXPECT_TRUE(rate_limiter->tryAcquireOne()); } @@ -259,7 +259,7 @@ class LinearRampingRateLimiterImplTest : public Test { if (expected_count > control_timings.size()) { control_timings.push_back(total_us_elapsed.count()); } - time_system.sleep(clock_tick); + time_system.advanceTimeWait(clock_tick); total_us_elapsed += clock_tick; } while (total_us_elapsed <= duration); @@ -368,12 +368,12 @@ class GraduallyOpeningRateLimiterFilterTest : public Test { acquisition_timings.push_back(total_ms_elapsed.count()); EXPECT_FALSE(rate_limiter->tryAcquireOne()); } - time_system.sleep(clock_tick); + time_system.advanceTimeWait(clock_tick); total_ms_elapsed += clock_tick; } while (total_ms_elapsed <= duration); EXPECT_FALSE(rate_limiter->tryAcquireOne()); - time_system.sleep(1s); + time_system.advanceTimeWait(1s); // Verify that after the rampup the expected constant pacing is maintained. // Calls should be forwarded to the regular linear rate limiter algorithm with its // corrective behavior so we can expect to acquire a series with that. @@ -410,7 +410,7 @@ TEST_F(ZipfRateLimiterImplTest, TimingVerificationTest) { if (rate_limiter->tryAcquireOne()) { aquisition_timings.push_back(total_ms_elapsed.count()); } - time_system.sleep(clock_tick); + time_system.advanceTimeWait(clock_tick); total_ms_elapsed += clock_tick; } while (total_ms_elapsed <= duration); EXPECT_EQ(aquisition_timings, diff --git a/test/termination_predicate_test.cc b/test/termination_predicate_test.cc index 687e62f76..79bff05b0 100644 --- a/test/termination_predicate_test.cc +++ b/test/termination_predicate_test.cc @@ -28,10 +28,10 @@ TEST_F(TerminationPredicateTest, DurationTerminationPredicateImplTest) { DurationTerminationPredicateImpl pred(time_system, duration, time_system.monotonicTime()); EXPECT_EQ(pred.evaluate(), TerminationPredicate::Status::PROCEED); // move to the edge. - time_system.sleep(duration); + time_system.advanceTimeWait(duration); EXPECT_EQ(pred.evaluate(), TerminationPredicate::Status::PROCEED); // move past the edge, we expect the predicate to return TERMINATE. - time_system.sleep(1us); + time_system.advanceTimeWait(1us); EXPECT_EQ(pred.evaluate(), TerminationPredicate::Status::TERMINATE); } From f780e2dfbf8a9e7a7b7fcb850100baa6632c1e2d Mon Sep 17 00:00:00 2001 From: "Adi (Suissa) Peleg" Date: Thu, 16 Apr 2020 23:23:11 -0400 Subject: [PATCH 07/16] Removing unused loader envoy_cc_mock (#323) Signed-off-by: Adi Suissa-Peleg --- test/BUILD | 1 - 1 file changed, 1 deletion(-) diff --git a/test/BUILD b/test/BUILD index 652198bab..2d3294744 100644 --- a/test/BUILD +++ b/test/BUILD @@ -2,7 +2,6 @@ licenses(["notice"]) # Apache 2 load( "@envoy//bazel:envoy_build_system.bzl", - "envoy_cc_mock", "envoy_cc_test", "envoy_package", ) From 79f5873d087f8230889237e36d1fdc35742e6852 Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Mon, 20 Apr 2020 17:06:03 +0200 Subject: [PATCH 08/16] CircleCI: reduce ASAN parallelism (#324) Signed-off-by: Otto van der Schaaf --- ci/do_ci.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 5a8fb038e..7be180f31 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -73,7 +73,6 @@ function do_asan() { echo "bazel ASAN/UBSAN debug build with tests" echo "Building and testing envoy tests..." cd "${SRCDIR}" - [ -z "$CIRCLECI" ] || export BAZEL_BUILD_OPTIONS="${BAZEL_TEST_OPTIONS} --jobs=4 --local_ram_resources=12288" # We build this in steps to avoid running out of memory in CI run_bazel build ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-asan -- //source/exe/... && \ @@ -123,6 +122,9 @@ if [ -n "$CIRCLECI" ]; then fi # We constrain parallelism in CI to avoid running out of memory. NUM_CPUS=8 + if [ "$1" == "asan" ]; then + NUM_CPUS=5 + fi fi if grep 'docker\|lxc' /proc/1/cgroup; then From d9a06b8af8fcd0917206d6f5f272a4ce4756acfc Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Mon, 20 Apr 2020 17:07:04 +0200 Subject: [PATCH 09/16] release prep: push docker image upon tagging (#325) Signed-off-by: Otto van der Schaaf --- ci/docker/docker_push.sh | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/ci/docker/docker_push.sh b/ci/docker/docker_push.sh index 4116590af..9fd065a87 100755 --- a/ci/docker/docker_push.sh +++ b/ci/docker/docker_push.sh @@ -13,11 +13,19 @@ fi DOCKER_IMAGE_PREFIX="${DOCKER_IMAGE_PREFIX:-envoyproxy/nighthawk}" # push the nighthawk image on tags or merge to master -if [[ -n "$CIRCLE_TAG" ]] || [[ "$CIRCLE_BRANCH" = 'master' ]]; then +if [[ "$CIRCLE_BRANCH" = 'master' ]]; then docker login -u "$DOCKERHUB_USERNAME" -p "$DOCKERHUB_PASSWORD" docker push "${DOCKER_IMAGE_PREFIX}-dev:latest" docker tag "${DOCKER_IMAGE_PREFIX}-dev:latest" "${DOCKER_IMAGE_PREFIX}-dev:${CIRCLE_SHA1}" docker push "${DOCKER_IMAGE_PREFIX}-dev:${CIRCLE_SHA1}" else - echo 'Ignoring non-master branch for docker push.' + if [[ -n "$CIRCLE_TAG" ]]; then + TAG="$CIRCLE_TAG" + docker login -u "$DOCKERHUB_USERNAME" -p "$DOCKERHUB_PASSWORD" + docker push "${DOCKER_IMAGE_PREFIX}:${TAG}" + docker tag "${DOCKER_IMAGE_PREFIX}:${TAG}" "${DOCKER_IMAGE_PREFIX}:${TAG}" + docker push "${DOCKER_IMAGE_PREFIX}:${TAG}" + else + echo 'Ignoring non-master branch for docker push.' + fi fi From afd6046417f661897ff59b55d0addd23fb1cc073 Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Mon, 20 Apr 2020 20:26:18 +0200 Subject: [PATCH 10/16] Update Envoy to 582ab4a353ac2c19f4326115b471cebeabe7ae8a (#326) Signed-off-by: Otto van der Schaaf --- .bazelrc | 4 ++-- bazel/repositories.bzl | 4 ++-- source/client/process_impl.cc | 2 +- source/common/worker_impl.cc | 4 ++-- test/benchmark_http_client_test.cc | 3 ++- test/server/http_test_server_filter_integration_test.cc | 2 +- test/stream_decoder_test.cc | 3 ++- test/utility_test.cc | 4 ++-- 8 files changed, 14 insertions(+), 12 deletions(-) diff --git a/.bazelrc b/.bazelrc index 7f83d8173..88b939ce4 100644 --- a/.bazelrc +++ b/.bazelrc @@ -161,8 +161,8 @@ build:remote-msan --config=rbe-toolchain-clang-libc++ build:remote-msan --config=rbe-toolchain-msan # Docker sandbox -# NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L7 -build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu@sha256:ebf534b8aa505e8ff5663a31eed782942a742ae4d656b54f4236b00399f17911 +# NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8 +build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:09a5a914c904faa39dbc641181cb43b68cabf626 build:docker-sandbox --spawn_strategy=docker build:docker-sandbox --strategy=Javac=docker build:docker-sandbox --strategy=Closure=docker diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index b198504e9..21dff258f 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,7 +1,7 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -ENVOY_COMMIT = "8e8209fa75f87ab53d4c78a466c8f927df930e50" # April 10th, 2020 -ENVOY_SHA = "ef9661e7c0c446e4bf600adb9328f5e5e3088cb897380c7e991b7f743f811a10" +ENVOY_COMMIT = "582ab4a353ac2c19f4326115b471cebeabe7ae8a" # April 17th, 2020 +ENVOY_SHA = "fd019d30f45bbc781e5e95324e3b16abcd23beca19baf0aa762bd39c602ddac1" RULES_PYTHON_COMMIT = "dd7f9c5f01bafbfea08c44092b6b0c8fc8fcb77f" # Feb 22nd, 2020 RULES_PYTHON_SHA = "0aa9ec790a58053e3ab5af397879b267a625955f8297c239b2d8559c6773397b" diff --git a/source/client/process_impl.cc b/source/client/process_impl.cc index 32adb91af..e25119a9a 100644 --- a/source/client/process_impl.cc +++ b/source/client/process_impl.cc @@ -101,7 +101,7 @@ ProcessImpl::ProcessImpl(const Options& options, Envoy::Event::TimeSystem& time_ : time_system_(time_system), stats_allocator_(symbol_table_), store_root_(stats_allocator_), api_(std::make_unique(platform_impl_.threadFactory(), store_root_, time_system_, platform_impl_.fileSystem())), - dispatcher_(api_->allocateDispatcher()), benchmark_client_factory_(options), + dispatcher_(api_->allocateDispatcher("main_thread")), benchmark_client_factory_(options), termination_predicate_factory_(options), sequencer_factory_(options), request_generator_factory_(options), options_(options), init_manager_("nh_init_manager"), local_info_(new Envoy::LocalInfo::LocalInfoImpl( diff --git a/source/common/worker_impl.cc b/source/common/worker_impl.cc index 5e59280c0..021a65f35 100644 --- a/source/common/worker_impl.cc +++ b/source/common/worker_impl.cc @@ -7,8 +7,8 @@ namespace Nighthawk { WorkerImpl::WorkerImpl(Envoy::Api::Api& api, Envoy::ThreadLocal::Instance& tls, Envoy::Stats::Store& store) - : thread_factory_(api.threadFactory()), dispatcher_(api.allocateDispatcher()), tls_(tls), - store_(store), time_source_(api.timeSource()) { + : thread_factory_(api.threadFactory()), dispatcher_(api.allocateDispatcher("worker_thread")), + tls_(tls), store_(store), time_source_(api.timeSource()) { tls.registerThread(*dispatcher_, false); } diff --git a/test/benchmark_http_client_test.cc b/test/benchmark_http_client_test.cc index 8800588f6..62a65cb60 100644 --- a/test/benchmark_http_client_test.cc +++ b/test/benchmark_http_client_test.cc @@ -28,7 +28,8 @@ namespace Nighthawk { class BenchmarkClientHttpTest : public Test { public: BenchmarkClientHttpTest() - : api_(Envoy::Api::createApiForTest(time_system_)), dispatcher_(api_->allocateDispatcher()), + : api_(Envoy::Api::createApiForTest(time_system_)), + dispatcher_(api_->allocateDispatcher("test_thread")), cluster_manager_(std::make_unique()), cluster_info_(std::make_unique()), http_tracer_(std::make_unique()), response_code_("200") { diff --git a/test/server/http_test_server_filter_integration_test.cc b/test/server/http_test_server_filter_integration_test.cc index 768ccf9ae..d63977b76 100644 --- a/test/server/http_test_server_filter_integration_test.cc +++ b/test/server/http_test_server_filter_integration_test.cc @@ -40,7 +40,7 @@ class HttpTestServerIntegrationTestBase : public Envoy::HttpIntegrationTest, absl::string_view host, absl::string_view content_type, const std::function& request_header_delegate) { Envoy::Api::ApiPtr api = Envoy::Api::createApiForTest(); - Envoy::Event::DispatcherPtr dispatcher(api->allocateDispatcher()); + Envoy::Event::DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); std::shared_ptr cluster{ new NiceMock()}; Envoy::Upstream::HostDescriptionConstSharedPtr host_description{ diff --git a/test/stream_decoder_test.cc b/test/stream_decoder_test.cc index dd33cd053..3cba8b2c7 100644 --- a/test/stream_decoder_test.cc +++ b/test/stream_decoder_test.cc @@ -23,7 +23,8 @@ namespace Client { class StreamDecoderTest : public Test, public StreamDecoderCompletionCallback { public: StreamDecoderTest() - : api_(Envoy::Api::createApiForTest(time_system_)), dispatcher_(api_->allocateDispatcher()), + : api_(Envoy::Api::createApiForTest(time_system_)), + dispatcher_(api_->allocateDispatcher("test_thread")), request_headers_(std::make_shared( std::initializer_list>({{":method", "GET"}}))), http_tracer_(std::make_unique()), diff --git a/test/utility_test.cc b/test/utility_test.cc index 7ec2fb414..4d292d136 100644 --- a/test/utility_test.cc +++ b/test/utility_test.cc @@ -93,7 +93,7 @@ class UtilityAddressResolutionTest : public TestWithParamallocateDispatcher(); + auto dispatcher = api->allocateDispatcher("uri_resolution_thread"); auto u = UriImpl(uri); return u.resolve(*dispatcher, address_family); } @@ -142,7 +142,7 @@ TEST_P(UtilityAddressResolutionTest, ResolveTwiceReturnsCached) { : Envoy::Network::DnsLookupFamily::V4Only; Envoy::Api::ApiPtr api = Envoy::Api::createApiForTest(); - auto dispatcher = api->allocateDispatcher(); + auto dispatcher = api->allocateDispatcher("test_thread"); auto u = UriImpl("localhost"); EXPECT_EQ(u.resolve(*dispatcher, address_family).get(), From c9ffaf601f8fd9a3559c5793582d2d045a3012d7 Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Wed, 22 Apr 2020 20:33:13 +0200 Subject: [PATCH 11/16] Log exception in ProcessImpl::run() (#327) Related to https://github.com/envoyproxy/nighthawk/issues/317 Log any exceptions and rethrow. This is to assist with diagnosis once it happens again in the CI env. Signed-off-by: Otto van der Schaaf --- source/client/process_impl.cc | 93 ++++++++++++--------- source/client/process_impl.h | 3 + test/BUILD | 1 + test/client_worker_test.cc | 14 +--- test/integration/integration_test.py | 2 +- test/integration/test_integration_basics.py | 9 +- 6 files changed, 64 insertions(+), 58 deletions(-) diff --git a/source/client/process_impl.cc b/source/client/process_impl.cc index e25119a9a..816d063f3 100644 --- a/source/client/process_impl.cc +++ b/source/client/process_impl.cc @@ -377,41 +377,8 @@ void ProcessImpl::addRequestSourceCluster( socket_address->set_port_value(uri.port()); } -bool ProcessImpl::run(OutputCollector& collector) { - std::vector uris; - UriPtr request_source_uri; - UriPtr tracing_uri; - - try { - // TODO(oschaaf): See if we can rid of resolving here. - // We now only do it to validate. - if (options_.uri().has_value()) { - uris.push_back(std::make_unique(options_.uri().value())); - } else { - for (const nighthawk::client::MultiTarget::Endpoint& endpoint : - options_.multiTargetEndpoints()) { - uris.push_back(std::make_unique(fmt::format( - "{}://{}:{}{}", options_.multiTargetUseHttps() ? "https" : "http", - endpoint.address().value(), endpoint.port().value(), options_.multiTargetPath()))); - } - } - for (const UriPtr& uri : uris) { - uri->resolve(*dispatcher_, Utility::translateFamilyOptionString(options_.addressFamily())); - } - if (options_.requestSource() != "") { - request_source_uri = std::make_unique(options_.requestSource()); - request_source_uri->resolve(*dispatcher_, - Utility::translateFamilyOptionString(options_.addressFamily())); - } - if (options_.trace() != "") { - tracing_uri = std::make_unique(options_.trace()); - tracing_uri->resolve(*dispatcher_, - Utility::translateFamilyOptionString(options_.addressFamily())); - } - } catch (const UriException&) { - return false; - } - +bool ProcessImpl::runInternal(OutputCollector& collector, const std::vector& uris, + const UriPtr& request_source_uri, const UriPtr& tracing_uri) { int number_of_workers = determineConcurrency(); shutdown_ = false; const std::vector& workers = createWorkers(number_of_workers); @@ -461,8 +428,8 @@ bool ProcessImpl::run(OutputCollector& collector) { std::chrono::nanoseconds total_execution_duration = 0ns; for (auto& worker : workers_) { auto sequencer_execution_duration = worker->phase().sequencer().executionDuration(); - // We don't write per-worker results if we only have a single worker, because the global results - // will be precisely the same. + // We don't write per-worker results if we only have a single worker, because the global + // results will be precisely the same. if (workers_.size() > 1) { StatisticFactoryImpl statistic_factory(options_); collector.addResult(fmt::format("worker_{}", i), @@ -473,10 +440,11 @@ bool ProcessImpl::run(OutputCollector& collector) { i++; } - // Note that above we use use counter values snapshotted by the workers right after its execution - // completes. Here we query the live counters to get to the global numbers. To make sure the - // global aggregated numbers line up, we must take care not to shut down the benchmark client - // before we do this, as that will increment certain counters like connections closed, etc. + // Note that above we use use counter values snapshotted by the workers right after its + // execution completes. Here we query the live counters to get to the global numbers. To make + // sure the global aggregated numbers line up, we must take care not to shut down the benchmark + // client before we do this, as that will increment certain counters like connections closed, + // etc. const auto& counters = Utility().mapCountersFromStore( store_root_, [](absl::string_view, uint64_t value) { return value > 0; }); StatisticFactoryImpl statistic_factory(options_); @@ -485,6 +453,49 @@ bool ProcessImpl::run(OutputCollector& collector) { return counters.find("sequencer.failed_terminations") == counters.end(); } +bool ProcessImpl::run(OutputCollector& collector) { + std::vector uris; + UriPtr request_source_uri; + UriPtr tracing_uri; + + try { + // TODO(oschaaf): See if we can rid of resolving here. + // We now only do it to validate. + if (options_.uri().has_value()) { + uris.push_back(std::make_unique(options_.uri().value())); + } else { + for (const nighthawk::client::MultiTarget::Endpoint& endpoint : + options_.multiTargetEndpoints()) { + uris.push_back(std::make_unique(fmt::format( + "{}://{}:{}{}", options_.multiTargetUseHttps() ? "https" : "http", + endpoint.address().value(), endpoint.port().value(), options_.multiTargetPath()))); + } + } + for (const UriPtr& uri : uris) { + uri->resolve(*dispatcher_, Utility::translateFamilyOptionString(options_.addressFamily())); + } + if (options_.requestSource() != "") { + request_source_uri = std::make_unique(options_.requestSource()); + request_source_uri->resolve(*dispatcher_, + Utility::translateFamilyOptionString(options_.addressFamily())); + } + if (options_.trace() != "") { + tracing_uri = std::make_unique(options_.trace()); + tracing_uri->resolve(*dispatcher_, + Utility::translateFamilyOptionString(options_.addressFamily())); + } + } catch (const UriException&) { + return false; + } + + try { + return runInternal(collector, uris, request_source_uri, tracing_uri); + } catch (Envoy::EnvoyException& ex) { + ENVOY_LOG(error, "Fatal exception: {}", ex.what()); + throw; + } +} + void ProcessImpl::setupForHRTimers() { // We override the local environment to indicate to libevent that we favor precision over // efficiency. Note that it is also possible to do this at setup time via libevent's api's. diff --git a/source/client/process_impl.h b/source/client/process_impl.h index c5226ef99..967ee7c59 100644 --- a/source/client/process_impl.h +++ b/source/client/process_impl.h @@ -93,6 +93,9 @@ class ProcessImpl : public Process, public Envoy::Logger::Loggable mergeWorkerStatistics(const std::vector& workers) const; void setupForHRTimers(); + bool runInternal(OutputCollector& collector, const std::vector& uris, + const UriPtr& request_source_uri, const UriPtr& tracing_uri); + Envoy::ProcessWide process_wide_; Envoy::PlatformImpl platform_impl_; Envoy::Event::TimeSystem& time_system_; diff --git a/test/BUILD b/test/BUILD index 2d3294744..2c9db7a40 100644 --- a/test/BUILD +++ b/test/BUILD @@ -59,6 +59,7 @@ envoy_cc_test( "@envoy//test/mocks/local_info:local_info_mocks", "@envoy//test/mocks/protobuf:protobuf_mocks", "@envoy//test/mocks/thread_local:thread_local_mocks", + "@envoy//test/test_common:simulated_time_system_lib", ], ) diff --git a/test/client_worker_test.cc b/test/client_worker_test.cc index 21240f234..5da4167bb 100644 --- a/test/client_worker_test.cc +++ b/test/client_worker_test.cc @@ -9,6 +9,7 @@ #include "external/envoy/test/mocks/local_info/mocks.h" #include "external/envoy/test/mocks/protobuf/mocks.h" #include "external/envoy/test/mocks/thread_local/mocks.h" +#include "external/envoy/test/test_common/simulated_time_system.h" #include "common/statistic_impl.h" @@ -74,11 +75,6 @@ class ClientWorkerTest : public Test { TerminationPredicatePtr createMockTerminationPredicate() { auto predicate = std::make_unique>(); ON_CALL(*predicate, appendToChain(_)).WillByDefault(ReturnRef(*predicate)); - EXPECT_CALL(*predicate, evaluateChain()) - .Times(AtLeast(0)) - .WillOnce(Return(TerminationPredicate::Status::PROCEED)) - .WillOnce(Return(TerminationPredicate::Status::TERMINATE)); - return predicate; } @@ -91,7 +87,7 @@ class ClientWorkerTest : public Test { MockRequestSourceFactory request_generator_factory_; Envoy::Stats::IsolatedStoreImpl store_; NiceMock tls_; - Envoy::Event::TestRealTimeSystem time_system_; + Envoy::Event::SimulatedTimeSystem time_system_; MockBenchmarkClient* benchmark_client_; MockSequencer* sequencer_; MockRequestSource* request_generator_; @@ -112,8 +108,7 @@ TEST_F(ClientWorkerTest, BasicTest) { InSequence dummy; EXPECT_CALL(*benchmark_client_, setShouldMeasureLatencies(false)).Times(1); EXPECT_CALL(*benchmark_client_, tryStartRequest(_)) - .Times(1) - .WillRepeatedly(Invoke(this, &ClientWorkerTest::CheckThreadChanged)); + .WillOnce(Invoke(this, &ClientWorkerTest::CheckThreadChanged)); EXPECT_CALL(*benchmark_client_, setShouldMeasureLatencies(true)).Times(1); EXPECT_CALL(*sequencer_, start).Times(1); EXPECT_CALL(*sequencer_, waitForCompletion).Times(1); @@ -124,8 +119,7 @@ TEST_F(ClientWorkerTest, BasicTest) { auto worker = std::make_unique( *api_, tls_, cluster_manager_ptr_, benchmark_client_factory_, termination_predicate_factory_, sequencer_factory_, request_generator_factory_, store_, worker_number, - time_system_.monotonicTime() + 10ms, http_tracer_, - ClientWorkerImpl::HardCodedWarmupStyle::ON); + time_system_.monotonicTime(), http_tracer_, ClientWorkerImpl::HardCodedWarmupStyle::ON); worker->start(); worker->waitForCompletion(); diff --git a/test/integration/integration_test.py b/test/integration/integration_test.py index 2309521a7..951d56b32 100644 --- a/test/integration/integration_test.py +++ b/test/integration/integration_test.py @@ -24,7 +24,7 @@ "-x", path, "-n", - "2" if isSanitizerRun() else "20" # Number of tests to run in parallel + "4" if isSanitizerRun() else "20" # Number of tests to run in parallel ], plugins=["xdist"]) exit(r) diff --git a/test/integration/test_integration_basics.py b/test/integration/test_integration_basics.py index 8a0153714..95619bfbf 100644 --- a/test/integration/test_integration_basics.py +++ b/test/integration/test_integration_basics.py @@ -554,19 +554,16 @@ def test_https_h1_sni(sni_test_server_fixture): # Verify failure when we set no host (will get plain http) parsed_json, _ = sni_test_server_fixture.runNighthawkClient( - [sni_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100"], + [sni_test_server_fixture.getTestServerRootUri(), "--rps", "20", "--duration", "100"], expect_failure=True) # Verify success when we use plain http and don't request the sni host - parsed_json, _ = sni_test_server_fixture.runNighthawkClient( - [sni_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100"], - expect_failure=True) - parsed_json, _ = sni_test_server_fixture.runNighthawkClient([ sni_test_server_fixture.getTestServerRootUri().replace("https://", "http://"), "--rps", "100", - "--duration", "100", "--termination-predicate", "benchmark.http_2xx:2" + "--duration", "20", "--termination-predicate", "benchmark.http_2xx:2" ], expect_failure=False) + counters = sni_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1) assertCounterGreaterEqual(counters, "upstream_cx_http1_total", 1) From e0ee2d210115c0a5aeb5dec76bbfcc960fca0868 Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Mon, 27 Apr 2020 15:36:09 +0200 Subject: [PATCH 12/16] Coverage: HttpTestServerDecoderFilter::sendReply() (#329) We don't reliably cover HttpTestServerDecoderFilter::sendReply() with bad request-header configuration. Add a c++ unit test to reliably cover that. Fixes https://github.com/envoyproxy/nighthawk/issues/296 Signed-off-by: Otto van der Schaaf --- .../http_test_server_filter_integration_test.cc | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/test/server/http_test_server_filter_integration_test.cc b/test/server/http_test_server_filter_integration_test.cc index d63977b76..8d73636f5 100644 --- a/test/server/http_test_server_filter_integration_test.cc +++ b/test/server/http_test_server_filter_integration_test.cc @@ -14,6 +14,7 @@ namespace Nighthawk { using namespace testing; +constexpr absl::string_view kBadJson = "bad_json"; class HttpTestServerIntegrationTestBase : public Envoy::HttpIntegrationTest, public TestWithParam { @@ -258,6 +259,20 @@ TEST_P(HttpTestServerIntegrationNoConfigTest, TestHeaderConfig) { EXPECT_EQ("", response->body()); } +TEST_P(HttpTestServerIntegrationNoConfigTest, BadTestHeaderConfig) { + Envoy::BufferingStreamDecoderPtr response = makeSingleRequest( + lookupPort("http"), "GET", "/", "", downstream_protocol_, version_, "foo.com", "", + [](Envoy::Http::RequestHeaderMapImpl& request_headers) { + request_headers.addCopy(Nighthawk::Server::TestServer::HeaderNames::get().TestServerConfig, + kBadJson); + }); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("500", response->headers().Status()->value().getStringView()); + EXPECT_EQ("test-server didn't understand the request: Error merging json config: Unable to parse " + "JSON as proto (INVALID_ARGUMENT:Unexpected token.\nbad_json\n^): bad_json", + response->body()); +} + class HttpTestServerDecoderFilterTest : public Test {}; // Here we test config-level merging as well as its application at the response-header level. @@ -314,7 +329,7 @@ TEST_F(HttpTestServerDecoderFilterTest, HeaderMerge) { header_map, Envoy::Http::TestResponseHeaderMapImpl{ {":status", "200"}, {"foo", "bar2"}, {"foo2", "bar3"}})); - EXPECT_FALSE(f.mergeJsonConfig(R"(bad_json)", options, error_message)); + EXPECT_FALSE(f.mergeJsonConfig(kBadJson, options, error_message)); EXPECT_EQ("Error merging json config: Unable to parse JSON as proto (INVALID_ARGUMENT:Unexpected " "token.\nbad_json\n^): bad_json", error_message); From c09fdcf66706c18c41c01d3f608ff4aa52dde1b2 Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Tue, 28 Apr 2020 14:50:21 +0200 Subject: [PATCH 13/16] Supplement doc comments in interface headers. (#330) Fixes https://github.com/envoyproxy/nighthawk/issues/268 Signed-off-by: Otto van der Schaaf --- include/nighthawk/client/factories.h | 29 +++++++++++++++++++-- include/nighthawk/client/output_collector.h | 13 +++++++++ include/nighthawk/client/output_formatter.h | 5 ++++ include/nighthawk/common/request.h | 4 +++ 4 files changed, 49 insertions(+), 2 deletions(-) diff --git a/include/nighthawk/client/factories.h b/include/nighthawk/client/factories.h index 56d619029..dc393d8a3 100644 --- a/include/nighthawk/client/factories.h +++ b/include/nighthawk/client/factories.h @@ -24,19 +24,44 @@ namespace Client { class BenchmarkClientFactory { public: virtual ~BenchmarkClientFactory() = default; + + /** + * Constructs a BenchmarkClient + * + * @param api reference to the Api object. + * @param dispatcher supplies the owning thread's dispatcher. + * @param scope stats scope for any stats tracked by the benchmark client. + * @param cluster_manager Cluster manager preconfigured with our target cluster. + * @param http_tracer Shared pointer to an http tracer implementation (e.g. Zipkin). + * @param cluster_name Name of the cluster that this benchmark client will use. In conjunction + * with cluster_manager this will allow the this BenchmarkClient to access the target connection + * pool. + * @param request_source Source of request-specifiers. Will be queries every time the + * BenchmarkClient is asked to issue a request. + * + * @return BenchmarkClientPtr pointer to a BenchmarkClient instance. + */ virtual BenchmarkClientPtr create(Envoy::Api::Api& api, Envoy::Event::Dispatcher& dispatcher, Envoy::Stats::Scope& scope, Envoy::Upstream::ClusterManagerPtr& cluster_manager, Envoy::Tracing::HttpTracerSharedPtr& http_tracer, absl::string_view cluster_name, - RequestSource& request_generator) const PURE; + RequestSource& request_source) const PURE; }; class OutputFormatterFactory { public: virtual ~OutputFormatterFactory() = default; + + /** + * Constructs an OutputFormatter instance according to the requested output format. + * + * @param options Proto configuration object indicating the desired output format. + * + * @return OutputFormatterPtr pointer to an OutputFormatter instance. + */ virtual OutputFormatterPtr - create(const nighthawk::client::OutputFormat_OutputFormatOptions) const PURE; + create(const nighthawk::client::OutputFormat_OutputFormatOptions options) const PURE; }; } // namespace Client diff --git a/include/nighthawk/client/output_collector.h b/include/nighthawk/client/output_collector.h index d55f2eb8f..1ff274821 100644 --- a/include/nighthawk/client/output_collector.h +++ b/include/nighthawk/client/output_collector.h @@ -15,6 +15,15 @@ namespace Client { class OutputCollector { public: virtual ~OutputCollector() = default; + + /** + * Adds a result to the structured output. + * + * @param name unique name of the result. E.g. worker_1. + * @param statistics Reference to a vector of statistics to add to the output. + * @param counters Reference to a map of counter values, keyed by name, to add to the output. + * @param execution_duration Execution duration associated to the to-be-added result. + */ virtual void addResult(absl::string_view name, const std::vector& statistics, const std::map& counters, const std::chrono::nanoseconds execution_duration) PURE; @@ -24,6 +33,10 @@ class OutputCollector { * @param output the output value to set. */ virtual void setOutput(const nighthawk::client::Output& output) PURE; + + /** + * @return nighthawk::client::Output proto output object. + */ virtual nighthawk::client::Output toProto() const PURE; }; diff --git a/include/nighthawk/client/output_formatter.h b/include/nighthawk/client/output_formatter.h index e3e15e44b..d28fffb0b 100644 --- a/include/nighthawk/client/output_formatter.h +++ b/include/nighthawk/client/output_formatter.h @@ -17,6 +17,11 @@ namespace Client { class OutputFormatter { public: virtual ~OutputFormatter() = default; + + /** + * @return std::string serialized representation of output. The specific format depends + * on the derived class, for example human-readable or json. + */ virtual std::string formatProto(const nighthawk::client::Output& output) const PURE; }; diff --git a/include/nighthawk/common/request.h b/include/nighthawk/common/request.h index d24fedeb3..d123b8736 100644 --- a/include/nighthawk/common/request.h +++ b/include/nighthawk/common/request.h @@ -15,6 +15,10 @@ using HeaderMapPtr = std::shared_ptr; class Request { public: virtual ~Request() = default; + + /** + * @return HeaderMapPtr shared pointer to a request header specification. + */ virtual HeaderMapPtr header() const PURE; // TODO(oschaaf): expectations }; From 0e681b88aaa54f4f23253bda5ffe876980105e4d Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Mon, 11 May 2020 17:51:35 +0200 Subject: [PATCH 14/16] Update Envoy to 888e0e28900a470df448c65d7b99d8065fd60251 (#331) This Envoy update is a bit more involved, because it forced moving away from the legacy connection pools it had, which got eliminated from the code base. So we migrate to Envoy's new pool models. * The h1 pool migrates seamlessly to the new model with minor changes. * The experimental h2 connection pool that supports multiple connections gets dropped. We now offer multiple connection support based on the new H2 pool. Behavior has changed, but there was a warning about that in the description of the --experimental flag (see changes to the test associated to our earlier experimental h2 pool) It should now be possible to add new options to add better support for h2 & multiple connections, and deprecate or remove the --experimental flag for that. Signed-off-by: Otto van der Schaaf --- bazel/repositories.bzl | 4 +- source/client/BUILD | 2 +- source/client/benchmark_client_impl.cc | 60 ++++----------------- source/client/benchmark_client_impl.h | 49 ++--------------- source/client/process_impl.cc | 13 ++--- source/server/README.md | 29 ++++++---- test/BUILD | 2 - test/benchmark_http_client_test.cc | 6 ++- test/client_worker_test.cc | 8 ++- test/integration/test_integration_basics.py | 14 +++-- test/worker_test.cc | 8 ++- 11 files changed, 60 insertions(+), 135 deletions(-) diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 21dff258f..dc2e006f6 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,7 +1,7 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -ENVOY_COMMIT = "582ab4a353ac2c19f4326115b471cebeabe7ae8a" # April 17th, 2020 -ENVOY_SHA = "fd019d30f45bbc781e5e95324e3b16abcd23beca19baf0aa762bd39c602ddac1" +ENVOY_COMMIT = "888e0e28900a470df448c65d7b99d8065fd60251" # May 9th, 2020 +ENVOY_SHA = "9a4e2342d1ddaf2c9ff6f819f2010d35871f8c19a93b58ee63f2e0fc57fc9fe6" RULES_PYTHON_COMMIT = "dd7f9c5f01bafbfea08c44092b6b0c8fc8fcb77f" # Feb 22nd, 2020 RULES_PYTHON_SHA = "0aa9ec790a58053e3ab5af397879b267a625955f8297c239b2d8559c6773397b" diff --git a/source/client/BUILD b/source/client/BUILD index 3d65f3640..7d4ede8f4 100644 --- a/source/client/BUILD +++ b/source/client/BUILD @@ -57,7 +57,7 @@ envoy_cc_library( "@envoy//source/common/http:header_map_lib_with_external_headers", "@envoy//source/common/http:headers_lib_with_external_headers", "@envoy//source/common/http/http1:codec_lib_with_external_headers", - "@envoy//source/common/http/http1:conn_pool_legacy_lib_with_external_headers", + "@envoy//source/common/http/http1:conn_pool_lib_with_external_headers", "@envoy//source/common/http/http2:conn_pool_lib_with_external_headers", "@envoy//source/common/init:manager_lib_with_external_headers", "@envoy//source/common/local_info:local_info_lib_with_external_headers", diff --git a/source/client/benchmark_client_impl.cc b/source/client/benchmark_client_impl.cc index 99b5a3a65..37902e58f 100644 --- a/source/client/benchmark_client_impl.cc +++ b/source/client/benchmark_client_impl.cc @@ -26,7 +26,11 @@ Http1PoolImpl::newStream(Envoy::Http::ResponseDecoder& response_decoder, // In prefetch mode we try to keep the amount of connections at the configured limit. if (prefetch_connections_) { while (host_->cluster().resourceManager(priority_).connections().canCreate()) { - createNewConnection(); + // We cannot rely on ::tryCreateConnection here, because that might decline without + // updating connections().canCreate() above. We would risk an infinite loop. + ActiveClientPtr client = instantiateActiveClient(); + connecting_request_capacity_ += client->effectiveConcurrentRequestLimit(); + client->moveIntoList(std::move(client), owningList(client->state_)); } } @@ -34,8 +38,7 @@ Http1PoolImpl::newStream(Envoy::Http::ResponseDecoder& response_decoder, // of ready_clients_, which will pick the oldest one instead. This makes us cycle through // all the available connections. if (!ready_clients_.empty() && connection_reuse_strategy_ == ConnectionReuseStrategy::LRU) { - ready_clients_.back()->moveBetweenLists(ready_clients_, busy_clients_); - attachRequestToClient(*busy_clients_.front(), response_decoder, callbacks); + attachRequestToClient(*ready_clients_.back(), response_decoder, callbacks); return nullptr; } @@ -43,52 +46,6 @@ Http1PoolImpl::newStream(Envoy::Http::ResponseDecoder& response_decoder, return ConnPoolImpl::newStream(response_decoder, callbacks); } -Http2PoolImpl::Http2PoolImpl( - Envoy::Event::Dispatcher& dispatcher, Envoy::Upstream::HostConstSharedPtr host, - Envoy::Upstream::ResourcePriority priority, - const Envoy::Network::ConnectionSocket::OptionsSharedPtr& options, // NOLINT - const Envoy::Network::TransportSocketOptionsSharedPtr& transport_socket_options) // NOLINT - : Envoy::Http::Legacy::ConnPoolImplBase(std::move(host), priority), dispatcher_(dispatcher), - socket_options_(options), transport_socket_options_(transport_socket_options) { - for (uint32_t i = 0; i < host_->cluster().resourceManager(priority_).connections().max(); i++) { - pools_.push_back(std::make_unique( - dispatcher_, host_, priority_, socket_options_, transport_socket_options_)); - } -} - -void Http2PoolImpl::addDrainedCallback(Envoy::Http::ConnectionPool::Instance::DrainedCb cb) { - for (auto& pool : pools_) { - pool->addDrainedCallback(cb); - } -} - -void Http2PoolImpl::drainConnections() { - for (auto& pool : pools_) { - pool->drainConnections(); - } -} - -bool Http2PoolImpl::hasActiveConnections() const { - for (auto& pool : pools_) { - if (pool->hasActiveConnections()) { - return true; - } - } - return false; -} - -Envoy::Http::ConnectionPool::Cancellable* -Http2PoolImpl::newStream(Envoy::Http::ResponseDecoder& response_decoder, - Envoy::Http::ConnectionPool::Callbacks& callbacks) { - // Use the simplest but probably naive approach of rotating over the available pool instances - // / connections to distribute requests accross them. - return pools_[pool_round_robin_index_++ % pools_.size()]->newStream(response_decoder, callbacks); -}; - -void Http2PoolImpl::checkForDrained() { - // TODO(oschaaf): this one is protected, can't forward it. -} - BenchmarkClientHttpImpl::BenchmarkClientHttpImpl( Envoy::Api::Api& api, Envoy::Event::Dispatcher& dispatcher, Envoy::Stats::Scope& scope, StatisticPtr&& connect_statistic, StatisticPtr&& response_statistic, @@ -200,9 +157,12 @@ void BenchmarkClientHttpImpl::onPoolFailure(Envoy::Http::ConnectionPool::PoolFai case Envoy::Http::ConnectionPool::PoolFailureReason::Overflow: benchmark_client_stats_.pool_overflow_.inc(); break; - case Envoy::Http::ConnectionPool::PoolFailureReason::ConnectionFailure: + case Envoy::Http::ConnectionPool::PoolFailureReason::LocalConnectionFailure: + case Envoy::Http::ConnectionPool::PoolFailureReason::RemoteConnectionFailure: benchmark_client_stats_.pool_connection_failure_.inc(); break; + case Envoy::Http::ConnectionPool::PoolFailureReason::Timeout: + break; default: NOT_REACHED_GCOVR_EXCL_LINE; } diff --git a/source/client/benchmark_client_impl.h b/source/client/benchmark_client_impl.h index 2a42aebdf..a40e90a19 100644 --- a/source/client/benchmark_client_impl.h +++ b/source/client/benchmark_client_impl.h @@ -15,7 +15,7 @@ #include "nighthawk/common/statistic.h" #include "external/envoy/source/common/common/logger.h" -#include "external/envoy/source/common/http/http1/conn_pool_legacy.h" +#include "external/envoy/source/common/http/http1/conn_pool.h" #include "external/envoy/source/common/http/http2/conn_pool.h" #include "external/envoy/source/common/runtime/runtime_impl.h" @@ -45,13 +45,13 @@ struct BenchmarkClientStats { ALL_BENCHMARK_CLIENT_STATS(GENERATE_COUNTER_STRUCT) }; -class Http1PoolImpl : public Envoy::Http::Legacy::Http1::ProdConnPoolImpl { +class Http1PoolImpl : public Envoy::Http::Http1::ProdConnPoolImpl { public: enum class ConnectionReuseStrategy { MRU, LRU, }; - using Envoy::Http::Legacy::Http1::ProdConnPoolImpl::ProdConnPoolImpl; + using Envoy::Http::Http1::ProdConnPoolImpl::ProdConnPoolImpl; Envoy::Http::ConnectionPool::Cancellable* newStream(Envoy::Http::ResponseDecoder& response_decoder, Envoy::Http::ConnectionPool::Callbacks& callbacks) override; @@ -67,49 +67,6 @@ class Http1PoolImpl : public Envoy::Http::Legacy::Http1::ProdConnPoolImpl { bool prefetch_connections_{}; }; -// Vanilla Envoy's HTTP/2 pool is single connection only (or actually sometimes dual in connection -// drainage scenarios). Http2PoolImpl is an experimental pool, which uses multiple vanilla Envoy -// HTTP/2 pools under the hood. Using multiple connections is useful when testing backends that need -// multiple connections to distribute load internally. Combining multiple connections with -// --max-requests-per-connection may help as well, as doing periodically initiating new connections -// may help the benchmark target by giving it an opportunity to rebalance. -class Http2PoolImpl : public Envoy::Http::ConnectionPool::Instance, - public Envoy::Http::Legacy::ConnPoolImplBase { -public: - // For doc comments, see Envoy::Http::ConnectionPool::Instance & Envoy::Http::ConnPoolImplBase - Http2PoolImpl( - Envoy::Event::Dispatcher& dispatcher, Envoy::Upstream::HostConstSharedPtr host, - Envoy::Upstream::ResourcePriority priority, - const Envoy::Network::ConnectionSocket::OptionsSharedPtr& options, // NOLINT - const Envoy::Network::TransportSocketOptionsSharedPtr& transport_socket_options); // NOLINT - - // Envoy::Http::ConnectionPool::Instance - Envoy::Http::Protocol protocol() const override { return Envoy::Http::Protocol::Http2; } - - void addDrainedCallback(Envoy::Http::ConnectionPool::Instance::DrainedCb cb) override; - - void drainConnections() override; - - bool hasActiveConnections() const override; - - Envoy::Upstream::HostDescriptionConstSharedPtr host() const override { return host_; }; - - Envoy::Http::ConnectionPool::Cancellable* - newStream(Envoy::Http::ResponseDecoder& response_decoder, - Envoy::Http::ConnectionPool::Callbacks& callbacks) override; - -protected: - // Envoy::Http::ConnPoolImplBase - void checkForDrained() override; - -private: - Envoy::Event::Dispatcher& dispatcher_; - const Envoy::Network::ConnectionSocket::OptionsSharedPtr socket_options_; - const Envoy::Network::TransportSocketOptionsSharedPtr transport_socket_options_; - std::vector> pools_; - uint64_t pool_round_robin_index_{0}; -}; - class BenchmarkClientHttpImpl : public BenchmarkClient, public StreamDecoderCompletionCallback, public Envoy::Logger::Loggable { diff --git a/source/client/process_impl.cc b/source/client/process_impl.cc index 816d063f3..df5845bed 100644 --- a/source/client/process_impl.cc +++ b/source/client/process_impl.cc @@ -74,9 +74,6 @@ class ClusterManagerFactory : public Envoy::Upstream::ProdClusterManagerFactory h1_pool->setConnectionReuseStrategy(connection_reuse_strategy_); h1_pool->setPrefetchConnections(prefetch_connections_); return Envoy::Http::ConnectionPool::InstancePtr{h1_pool}; - } else if (use_multi_conn_h2_pool_ && protocol == Envoy::Http::Protocol::Http2) { - return Envoy::Http::ConnectionPool::InstancePtr{ - new Http2PoolImpl(dispatcher, host, priority, options, transport_socket_options)}; } return Envoy::Upstream::ProdClusterManagerFactory::allocateConnPool( dispatcher, host, priority, protocol, options, transport_socket_options); @@ -89,12 +86,10 @@ class ClusterManagerFactory : public Envoy::Upstream::ProdClusterManagerFactory void setPrefetchConnections(const bool prefetch_connections) { prefetch_connections_ = prefetch_connections; } - void enableMultiConnectionH2Pool() { use_multi_conn_h2_pool_ = true; } private: Http1PoolImpl::ConnectionReuseStrategy connection_reuse_strategy_{}; bool prefetch_connections_{}; - bool use_multi_conn_h2_pool_{}; }; ProcessImpl::ProcessImpl(const Options& options, Envoy::Event::TimeSystem& time_system) @@ -278,6 +273,9 @@ void ProcessImpl::createBootstrapConfiguration(envoy::config::bootstrap::v3::Boo cluster->set_name(fmt::format("{}", i)); cluster->mutable_connect_timeout()->set_seconds(options_.timeout().count()); cluster->mutable_max_requests_per_connection()->set_value(options_.maxRequestsPerConnection()); + if (options_.h2() && options_.h2UseMultipleConnections()) { + cluster->mutable_http2_protocol_options()->mutable_max_concurrent_streams()->set_value(1); + } auto thresholds = cluster->mutable_circuit_breakers()->add_thresholds(); // We do not support any retrying. @@ -386,7 +384,7 @@ bool ProcessImpl::runInternal(OutputCollector& collector, const std::vector( Envoy::Runtime::LoaderPtr{new Envoy::Runtime::LoaderImpl( - *dispatcher_, tls_, {}, *local_info_, init_manager_, store_root_, generator_, + *dispatcher_, tls_, {}, *local_info_, store_root_, generator_, Envoy::ProtobufMessage::getStrictValidationVisitor(), *api_)}); ssl_context_manager_ = std::make_unique(time_system_); @@ -400,9 +398,6 @@ bool ProcessImpl::runInternal(OutputCollector& collector, const std::vectorsetPrefetchConnections(options_.prefetchConnections()); - if (options_.h2UseMultipleConnections()) { - cluster_manager_factory_->enableMultiConnectionH2Pool(); - } envoy::config::bootstrap::v3::Bootstrap bootstrap; createBootstrapConfiguration(bootstrap, uris, request_source_uri, number_of_workers); if (tracing_uri != nullptr) { diff --git a/source/server/README.md b/source/server/README.md index 010023690..e3dda42f4 100644 --- a/source/server/README.md +++ b/source/server/README.md @@ -100,17 +100,20 @@ bazel-bin/nighthawk_test_server [--disable-extensions ] [--hot-restart-version] [--restart-epoch ] [--log-path ] -[--log-format-escaped] [--log-format -] [--component-log-level -] [-l ] -[--local-address-ip-version ] -[--admin-address-path ] +[--log-format-prefix-with-location +] [--log-format-escaped] +[--log-format ] +[--component-log-level ] [-l +] [--local-address-ip-version +] [--admin-address-path +] [--reject-unknown-dynamic-fields] [--allow-unknown-static-fields] -[--allow-unknown-fields] [--config-yaml -] [-c ] [--concurrency -] [--base-id ] [--] -[--version] [-h] +[--allow-unknown-fields] +[--bootstrap-version ] +[--config-yaml ] [-c ] +[--concurrency ] [--base-id +] [--] [--version] [-h] Where: @@ -167,6 +170,10 @@ hot restart epoch # --log-path Path to logfile +--log-format-prefix-with-location +Prefix all occurrences of '%v' in log format with with '[%g:%#] ' +('[path/to/file.cc:99] '). + --log-format-escaped Escape c-style escape sequences in the application logs @@ -201,6 +208,10 @@ allow unknown fields in static configuration --allow-unknown-fields allow unknown fields in static configuration (DEPRECATED) +--bootstrap-version +API version to parse the bootstrap config as (e.g. 3). If unset, all +known versions will be attempted + --config-yaml Inline YAML configuration, merges with the contents of --config-path diff --git a/test/BUILD b/test/BUILD index 2c9db7a40..29cce7730 100644 --- a/test/BUILD +++ b/test/BUILD @@ -55,7 +55,6 @@ envoy_cc_test( "//test/mocks/common:mock_termination_predicate_factory", "@envoy//source/common/api:api_lib", "@envoy//source/common/stats:isolated_store_lib_with_external_headers", - "@envoy//test/mocks/init:init_mocks", "@envoy//test/mocks/local_info:local_info_mocks", "@envoy//test/mocks/protobuf:protobuf_mocks", "@envoy//test/mocks/thread_local:thread_local_mocks", @@ -238,7 +237,6 @@ envoy_cc_test( deps = [ "//source/common:nighthawk_common_lib", "@envoy//source/common/stats:isolated_store_lib_with_external_headers", - "@envoy//test/mocks/init:init_mocks", "@envoy//test/mocks/local_info:local_info_mocks", "@envoy//test/mocks/protobuf:protobuf_mocks", "@envoy//test/mocks/thread_local:thread_local_mocks", diff --git a/test/benchmark_http_client_test.cc b/test/benchmark_http_client_test.cc index 62a65cb60..983995df5 100644 --- a/test/benchmark_http_client_test.cc +++ b/test/benchmark_http_client_test.cc @@ -223,10 +223,12 @@ TEST_F(BenchmarkClientHttpTest, StatusTrackingInOnComplete) { TEST_F(BenchmarkClientHttpTest, PoolFailures) { setupBenchmarkClient(); - client_->onPoolFailure(Envoy::Http::ConnectionPool::PoolFailureReason::ConnectionFailure); + client_->onPoolFailure(Envoy::Http::ConnectionPool::PoolFailureReason::LocalConnectionFailure); + client_->onPoolFailure(Envoy::Http::ConnectionPool::PoolFailureReason::RemoteConnectionFailure); client_->onPoolFailure(Envoy::Http::ConnectionPool::PoolFailureReason::Overflow); + client_->onPoolFailure(Envoy::Http::ConnectionPool::PoolFailureReason::Timeout); EXPECT_EQ(1, getCounter("pool_overflow")); - EXPECT_EQ(1, getCounter("pool_connection_failure")); + EXPECT_EQ(2, getCounter("pool_connection_failure")); } TEST_F(BenchmarkClientHttpTest, RequestMethodPost) { diff --git a/test/client_worker_test.cc b/test/client_worker_test.cc index 5da4167bb..4a7b59517 100644 --- a/test/client_worker_test.cc +++ b/test/client_worker_test.cc @@ -5,7 +5,6 @@ #include "external/envoy/source/common/runtime/runtime_impl.h" #include "external/envoy/source/common/stats/isolated_store_impl.h" -#include "external/envoy/test/mocks/init/mocks.h" #include "external/envoy/test/mocks/local_info/mocks.h" #include "external/envoy/test/mocks/protobuf/mocks.h" #include "external/envoy/test/mocks/thread_local/mocks.h" @@ -36,9 +35,9 @@ class ClientWorkerTest : public Test { public: ClientWorkerTest() : api_(Envoy::Api::createApiForTest()), thread_id_(std::this_thread::get_id()) { - loader_ = std::make_unique(Envoy::Runtime::LoaderPtr{ - new Envoy::Runtime::LoaderImpl(dispatcher_, tls_, {}, local_info_, init_manager_, store_, - rand_, validation_visitor_, *api_)}); + loader_ = std::make_unique( + Envoy::Runtime::LoaderPtr{new Envoy::Runtime::LoaderImpl( + dispatcher_, tls_, {}, local_info_, store_, rand_, validation_visitor_, *api_)}); benchmark_client_ = new MockBenchmarkClient(); sequencer_ = new MockSequencer(); request_generator_ = new MockRequestSource(); @@ -95,7 +94,6 @@ class ClientWorkerTest : public Test { NiceMock dispatcher_; std::unique_ptr loader_; NiceMock local_info_; - Envoy::Init::MockManager init_manager_; NiceMock validation_visitor_; Envoy::Upstream::ClusterManagerPtr cluster_manager_ptr_; Envoy::Tracing::HttpTracerSharedPtr http_tracer_; diff --git a/test/integration/test_integration_basics.py b/test/integration/test_integration_basics.py index 95619bfbf..a34ef52fb 100644 --- a/test/integration/test_integration_basics.py +++ b/test/integration/test_integration_basics.py @@ -264,16 +264,22 @@ def test_https_h2(https_test_server_fixture): def test_https_h2_multiple_connections(https_test_server_fixture): """ Test that the experimental h2 pool uses multiple connections. + The burst we send ensures we will need 10 connections right away, as we + limit max active streams per connection to 1 by setting the experimental + flag to use multiple h2 connections. """ parsed_json, _ = https_test_server_fixture.runNighthawkClient([ "--h2", https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100", - "--termination-predicate", "benchmark.http_2xx:9", "--max-active-requests", "1", - "--experimental-h2-use-multiple-connections" + "--termination-predicate", "benchmark.http_2xx:99", "--max-active-requests", "10", + "--max-pending-requests", "10", "--experimental-h2-use-multiple-connections", "--burst-size", + "10" ]) counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) - assertCounterEqual(counters, "benchmark.http_2xx", 10) - assertCounterEqual(counters, "upstream_cx_http2_total", 10) + assertCounterEqual(counters, "benchmark.http_2xx", 100) + # Empirical observation shows we may end up creating more then 10 connections. + # This is stock Envoy h/2 pool behavior. + assertCounterGreaterEqual(counters, "upstream_cx_http2_total", 10) def _do_tls_configuration_test(https_test_server_fixture, cli_parameter, use_h2): diff --git a/test/worker_test.cc b/test/worker_test.cc index 8cdad7aff..30263f0af 100644 --- a/test/worker_test.cc +++ b/test/worker_test.cc @@ -2,7 +2,6 @@ #include "external/envoy/source/common/runtime/runtime_impl.h" #include "external/envoy/source/common/stats/isolated_store_impl.h" -#include "external/envoy/test/mocks/init/mocks.h" #include "external/envoy/test/mocks/local_info/mocks.h" #include "external/envoy/test/mocks/protobuf/mocks.h" #include "external/envoy/test/mocks/thread_local/mocks.h" @@ -39,7 +38,6 @@ class WorkerTest : public Test { Envoy::Stats::IsolatedStoreImpl test_store_; Envoy::Runtime::RandomGeneratorImpl rand_; NiceMock local_info_; - Envoy::Init::MockManager init_manager_; NiceMock validation_visitor_; }; @@ -52,9 +50,9 @@ TEST_F(WorkerTest, WorkerExecutesOnThread) { TestWorker worker(*api_, tls_); NiceMock dispatcher; std::unique_ptr loader = - std::make_unique(Envoy::Runtime::LoaderPtr{ - new Envoy::Runtime::LoaderImpl(dispatcher, tls_, {}, local_info_, init_manager_, - test_store_, rand_, validation_visitor_, *api_)}); + std::make_unique( + Envoy::Runtime::LoaderPtr{new Envoy::Runtime::LoaderImpl( + dispatcher, tls_, {}, local_info_, test_store_, rand_, validation_visitor_, *api_)}); worker.start(); worker.waitForCompletion(); From db22145462a43721204bd0b28dba96511f8b5aff Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Wed, 20 May 2020 01:34:20 +0200 Subject: [PATCH 15/16] Update Envoy to 31128e7dc22355876020188bc8feb99304663041 (#335) - As of this change we needed to reduce parallelism in CI to avoid OOM during coverage runs. This probably can be traced back to a new way of running coverage being used in Envoy, which trickled down to Nighthawk. - Removed our own `io_bazel_rules_python` as we can now piggy-back on Envoy for that. - Removes a file that accidentally crept in (lorem_ipsum.txt) Signed-off-by: Otto van der Schaaf --- .bazelrc | 68 +++++++++++++++++- .bazelversion | 2 +- .circleci/config.yml | 2 +- WORKSPACE | 4 ++ bazel/repositories.bzl | 14 +--- ci/do_ci.sh | 5 +- lorem_ipsum.txt | 1 - source/client/process_impl.cc | 2 +- source/server/README.md | 4 ++ test/coverage/gen_build.sh | 76 --------------------- test/run_nighthawk_bazel_coverage.sh | 26 ++----- test/test_data/output_formatter.dotted.gold | 6 +- test/test_data/output_formatter.txt.gold | 2 +- tools/shell_utils.sh | 18 ++++- 14 files changed, 108 insertions(+), 122 deletions(-) delete mode 100644 lorem_ipsum.txt delete mode 100755 test/coverage/gen_build.sh diff --git a/.bazelrc b/.bazelrc index 88b939ce4..0a920b70d 100644 --- a/.bazelrc +++ b/.bazelrc @@ -13,7 +13,7 @@ # Startup options cannot be selected via config. startup --host_jvm_args=-Xmx2g -build --workspace_status_command=bazel/get_workspace_status +build --workspace_status_command="bash bazel/get_workspace_status" build --experimental_local_memory_estimate build --experimental_strict_action_env=true build --host_force_python=PY3 @@ -113,6 +113,28 @@ build:sizeopt -c opt --copt -Os # Test options build --test_env=HEAPCHECK=normal --test_env=PPROF_PATH +# Coverage options +coverage --config=coverage +build:coverage --action_env=BAZEL_USE_LLVM_NATIVE_COVERAGE=1 +build:coverage --action_env=GCOV=llvm-profdata +build:coverage --copt=-DNDEBUG +build:coverage --test_timeout=900 +build:coverage --define=ENVOY_CONFIG_COVERAGE=1 +build:coverage --cxxopt="-DENVOY_CONFIG_COVERAGE=1" +build:coverage --coverage_support=@envoy//bazel/coverage:coverage_support +build:coverage --test_env=CC_CODE_COVERAGE_SCRIPT=external/envoy/bazel/coverage/collect_cc_coverage.sh +build:coverage --test_env=HEAPCHECK= +build:coverage --combined_report=lcov +build:coverage --strategy=TestRunner=sandboxed,local +build:coverage --strategy=CoverageReport=sandboxed,local +build:coverage --experimental_use_llvm_covmap +build:coverage --collect_code_coverage +build:coverage --instrumentation_filter="//source(?!/common/chromium_url|/extensions/quic_listeners/quiche/platform)[/:],//include[/:]" +coverage:test-coverage --test_arg="--log-path /dev/null" +coverage:test-coverage --test_arg="-l trace" +coverage:fuzz-coverage --config=asan-fuzzer +coverage:fuzz-coverage --run_under=@envoy//bazel/coverage:fuzz_coverage_wrapper.sh + # Remote execution: https://docs.bazel.build/versions/master/remote-execution.html build:rbe-toolchain --host_platform=@envoy_build_tools//toolchains:rbe_ubuntu_clang_platform build:rbe-toolchain --platforms=@envoy_build_tools//toolchains:rbe_ubuntu_clang_platform @@ -162,7 +184,7 @@ build:remote-msan --config=rbe-toolchain-msan # Docker sandbox # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8 -build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:09a5a914c904faa39dbc641181cb43b68cabf626 +build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:04f06115b6ee7cfea74930353fb47a41149cbec3 build:docker-sandbox --spawn_strategy=docker build:docker-sandbox --strategy=Javac=docker build:docker-sandbox --strategy=Closure=docker @@ -193,6 +215,7 @@ build:asan-fuzzer --config=clang-asan build:asan-fuzzer --define=FUZZING_ENGINE=libfuzzer build:asan-fuzzer --copt=-fsanitize=fuzzer-no-link build:asan-fuzzer --copt=-fno-omit-frame-pointer +build:asan-fuzzer --copt=-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION # Remove UBSAN halt_on_error to avoid crashing on protobuf errors. build:asan-fuzzer --test_env=UBSAN_OPTIONS=print_stacktrace=1 @@ -208,5 +231,46 @@ build:compdb --strip=always build:compdb --build_tag_filters=-nocompdb build:compdb --define=ENVOY_CONFIG_COMPILATION_DATABASE=1 +# Windows build quirks +build:windows --action_env=TMPDIR +build:windows --define signal_trace=disabled +build:windows --define hot_restart=disabled +build:windows --define tcmalloc=disabled +build:windows --define manual_stamp=manual_stamp + +# Should not be required after upstream fix to bazel, +# and already a no-op to linux/macos builds +# see issue https://github.com/bazelbuild/rules_foreign_cc/issues/301 +build:windows --copt="-DCARES_STATICLIB" +build:windows --copt="-DNGHTTP2_STATICLIB" +build:windows --copt="-DCURL_STATICLIB" + +# Required to work around build defects on Windows MSVC cl +# Unguarded gcc pragmas in quiche are not recognized by MSVC +build:msvc-cl --copt="/wd4068" +# Allows 'nodiscard' function return values to be discarded +build:msvc-cl --copt="/wd4834" +# Allows inline functions to be undefined +build:msvc-cl --copt="/wd4506" +build:msvc-cl --copt="-D_SILENCE_EXPERIMENTAL_FILESYSTEM_DEPRECATION_WARNING" + +# Required to work around Windows clang-cl build defects +# Ignore conflicting definitions of _WIN32_WINNT +# Overriding __TIME__ etc is problematic (and is actually an invalid no-op) +build:clang-cl --copt="-Wno-macro-redefined" +build:clang-cl --copt="-Wno-builtin-macro-redefined" +build:clang-cl --action_env=USE_CLANG_CL=1 + +# Defaults to 'auto' - Off for windows, so override to linux behavior +build:windows --enable_runfiles=yes + +# This should become adopted by bazel as the default +build:windows --features=compiler_param_file + +# These options attempt to force a monolithic binary including the CRT +build:windows --features=fully_static_link +build:windows --features=static_link_msvcrt +build:windows --dynamic_mode=off + try-import %workspace%/clang.bazelrc try-import %workspace%/user.bazelrc \ No newline at end of file diff --git a/.bazelversion b/.bazelversion index ccbccc3dc..fd2a01863 100644 --- a/.bazelversion +++ b/.bazelversion @@ -1 +1 @@ -2.2.0 +3.1.0 diff --git a/.circleci/config.yml b/.circleci/config.yml index ff9710745..fffd93077 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,7 +1,7 @@ references: envoy-build-image: &envoy-build-image # Jan 9th, 2020 - envoyproxy/envoy-build-ubuntu@sha256:3788a87461f2b3dc8048ad0ce5df40438a56e0a8f1a4ab0f61b4ef0d8c11ff1f + envoyproxy/envoy-build-ubuntu:04f06115b6ee7cfea74930353fb47a41149cbec3 version: 2 jobs: build: diff --git a/WORKSPACE b/WORKSPACE index 95a53aeb7..a13e6dad8 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -16,6 +16,10 @@ load("@envoy//bazel:repositories.bzl", "envoy_dependencies") envoy_dependencies() +load("@envoy//bazel:repositories_extra.bzl", "envoy_dependencies_extra") + +envoy_dependencies_extra() + load("@envoy//bazel:dependency_imports.bzl", "envoy_dependency_imports") envoy_dependency_imports() diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index dc2e006f6..63ea43818 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,10 +1,7 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -ENVOY_COMMIT = "888e0e28900a470df448c65d7b99d8065fd60251" # May 9th, 2020 -ENVOY_SHA = "9a4e2342d1ddaf2c9ff6f819f2010d35871f8c19a93b58ee63f2e0fc57fc9fe6" - -RULES_PYTHON_COMMIT = "dd7f9c5f01bafbfea08c44092b6b0c8fc8fcb77f" # Feb 22nd, 2020 -RULES_PYTHON_SHA = "0aa9ec790a58053e3ab5af397879b267a625955f8297c239b2d8559c6773397b" +ENVOY_COMMIT = "31128e7dc22355876020188bc8feb99304663041" # May 17th, 2020 +ENVOY_SHA = "30e150207239a0b08beb6944120c1c29b2fa9e9b47c900dfa020d00a36e1eedb" HDR_HISTOGRAM_C_VERSION = "0.9.13" # Feb 22nd, 2020 HDR_HISTOGRAM_C_SHA = "2bd4a4631b64f2f8cf968ef49dd03ff3c51b487c3c98a01217ae4cf4a35b8310" @@ -55,10 +52,3 @@ cc_library( strip_prefix = "HdrHistogram_c-%s" % HDR_HISTOGRAM_C_VERSION, url = "https://github.com/HdrHistogram/HdrHistogram_c/archive/%s.tar.gz" % HDR_HISTOGRAM_C_VERSION, ) - - http_archive( - name = "io_bazel_rules_python", - sha256 = RULES_PYTHON_SHA, - strip_prefix = "rules_python-%s" % RULES_PYTHON_COMMIT, - url = "https://github.com/bazelbuild/rules_python/archive/%s.tar.gz" % RULES_PYTHON_COMMIT, - ) diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 7be180f31..878a6efee 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -122,9 +122,12 @@ if [ -n "$CIRCLECI" ]; then fi # We constrain parallelism in CI to avoid running out of memory. NUM_CPUS=8 - if [ "$1" == "asan" ]; then + if [[ "$1" == "asan" ]]; then NUM_CPUS=5 fi + if [[ "$1" == "coverage" ]]; then + NUM_CPUS=3 + fi fi if grep 'docker\|lxc' /proc/1/cgroup; then diff --git a/lorem_ipsum.txt b/lorem_ipsum.txt deleted file mode 100644 index 19c215558..000000000 --- a/lorem_ipsum.txt +++ /dev/null @@ -1 +0,0 @@ -hay diff --git a/source/client/process_impl.cc b/source/client/process_impl.cc index df5845bed..135e40706 100644 --- a/source/client/process_impl.cc +++ b/source/client/process_impl.cc @@ -107,7 +107,7 @@ ProcessImpl::ProcessImpl(const Options& options, Envoy::Event::TimeSystem& time_ singleton_manager_(std::make_unique(api_->threadFactory())), access_log_manager_(std::chrono::milliseconds(1000), *api_, *dispatcher_, access_log_lock_, store_root_), - init_watcher_("Nighthawk", []() {}), validation_context_(false, false) { + init_watcher_("Nighthawk", []() {}), validation_context_(false, false, false) { // Any dispatchers created after the following call will use hr timers. setupForHRTimers(); std::string lower = absl::AsciiStrToLower( diff --git a/source/server/README.md b/source/server/README.md index e3dda42f4..bdc72cb26 100644 --- a/source/server/README.md +++ b/source/server/README.md @@ -107,6 +107,7 @@ bazel-bin/nighthawk_test_server [--disable-extensions ] ] [--local-address-ip-version ] [--admin-address-path ] +[--ignore-unknown-dynamic-fields] [--reject-unknown-dynamic-fields] [--allow-unknown-static-fields] [--allow-unknown-fields] @@ -199,6 +200,9 @@ The local IP address version (v4 or v6). --admin-address-path Admin address path +--ignore-unknown-dynamic-fields +ignore unknown fields in dynamic configuration + --reject-unknown-dynamic-fields reject unknown fields in dynamic configuration diff --git a/test/coverage/gen_build.sh b/test/coverage/gen_build.sh deleted file mode 100755 index 80d29af08..000000000 --- a/test/coverage/gen_build.sh +++ /dev/null @@ -1,76 +0,0 @@ -#!/bin/bash - -# Generate test/coverage/BUILD, which contains a single envoy_cc_test target -# that contains all C++ based tests suitable for performing the coverage run. A -# single binary (as opposed to multiple test targets) is require to work around -# the crazy in https://github.com/bazelbuild/bazel/issues/1118. This is used by -# the coverage runner script. - -set -e -set -x - -[ -z "${BAZEL_BIN}" ] && BAZEL_BIN=bazel -[ -z "${BUILDIFIER_BIN}" ] && BUILDIFIER_BIN=buildifier - -# Path to the generated BUILD file for the coverage target. -[ -z "${BUILD_PATH}" ] && BUILD_PATH="$(dirname "$0")"/BUILD - -# Extra repository information to include when generating coverage targets. This is useful for -# consuming projects. E.g., "@envoy". -[ -z "${REPOSITORY}" ] && REPOSITORY="" - -# This is an extra bazel path to query for additional targets. This is useful for consuming projects -# that want to run coverage over the public envoy code as well as private extensions. -# E.g., "//envoy-lyft/test/..." -[ -z "${EXTRA_QUERY_PATHS}" ] && EXTRA_QUERY_PATHS="" - -rm -f "${BUILD_PATH}" - -if [[ $# -gt 0 ]]; then - COVERAGE_TARGETS=$* -else - COVERAGE_TARGETS=//test/... -fi - -for target in ${COVERAGE_TARGETS}; do - TARGETS="$TARGETS $("${BAZEL_BIN}" query ${BAZEL_QUERY_OPTIONS} "attr('tags', 'coverage_test_lib', ${REPOSITORY}${target})" | grep "^//")" -done - -( - cat << EOF -# This file is generated by test/coverage/gen_build.sh automatically prior to -# coverage runs. It is under .gitignore. DO NOT EDIT, DO NOT CHECK IN. -load( - "@envoy//bazel:envoy_build_system.bzl", - "envoy_cc_test", - "envoy_package", -) - -envoy_package() - -envoy_cc_test( - name = "coverage_tests", - repository = "@envoy", - deps = [ -EOF - for t in ${TARGETS} - do - echo " \"$t\"," - done - cat << EOF - ], - # no-remote due to https://github.com/bazelbuild/bazel/issues/4685 - tags = ["manual", "no-remote"], - coverage = False, - # Due to the nature of coverage_tests, the shard of coverage_tests are very uneven, some of - # shard can take 100s and some takes only 10s, so we use the maximum sharding to here to let - # Bazel scheduling them across CPU cores. - # Sharding can be disabled by --test_sharding_strategy=disabled. - shard_count = 50, -) -EOF - -) > "${BUILD_PATH}" - -echo "Generated coverage BUILD file at: ${BUILD_PATH}" -"${BUILDIFIER_BIN}" "${BUILD_PATH}" \ No newline at end of file diff --git a/test/run_nighthawk_bazel_coverage.sh b/test/run_nighthawk_bazel_coverage.sh index fada02da0..227294bc9 100755 --- a/test/run_nighthawk_bazel_coverage.sh +++ b/test/run_nighthawk_bazel_coverage.sh @@ -24,39 +24,23 @@ else COVERAGE_TARGETS=//test/... fi -# Make sure //test/coverage:coverage_tests is up-to-date. -SCRIPT_DIR="$(realpath "$(dirname "$0")")" -"${SCRIPT_DIR}"/coverage/gen_build.sh ${COVERAGE_TARGETS} - -BAZEL_USE_LLVM_NATIVE_COVERAGE=1 GCOV=llvm-profdata bazel coverage ${BAZEL_BUILD_OPTIONS} \ - -c fastbuild --copt=-DNDEBUG --instrumentation_filter=//source/...,//include/... \ - --test_timeout=2000 --cxxopt="-DENVOY_CONFIG_COVERAGE=1" --test_output=errors \ - --test_arg="--log-path /dev/null" --test_arg="-l trace" --test_env=HEAPCHECK= \ - --test_env=ENVOY_IP_TEST_VERSIONS=v4only //test/coverage:coverage_tests +BAZEL_BUILD_OPTIONS+=" --config=test-coverage --test_tag_filters=-nocoverage --test_env=ENVOY_IP_TEST_VERSIONS=v4only" +bazel coverage ${BAZEL_BUILD_OPTIONS} --test_output=all ${COVERAGE_TARGETS} COVERAGE_DIR="${SRCDIR}"/generated/coverage mkdir -p "${COVERAGE_DIR}" -COVERAGE_IGNORE_REGEX="(/external/|pb\.(validate\.)?(h|cc)|/chromium_url/|/test/|/tmp)" -COVERAGE_BINARY="bazel-bin/test/coverage/coverage_tests" COVERAGE_DATA="${COVERAGE_DIR}/coverage.dat" -echo "Merging coverage data..." -llvm-profdata merge -sparse -o ${COVERAGE_DATA} $(find -L bazel-out/k8-fastbuild/testlogs/test/coverage/coverage_tests/ -name coverage.dat) +cp bazel-out/_coverage/_coverage_report.dat "${COVERAGE_DATA}" -echo "Generating report..." -llvm-cov show "${COVERAGE_BINARY}" -instr-profile="${COVERAGE_DATA}" -Xdemangler=c++filt \ - -ignore-filename-regex="${COVERAGE_IGNORE_REGEX}" -output-dir=${COVERAGE_DIR} -format=html -sed -i -e 's|>proc/self/cwd/|>|g' "${COVERAGE_DIR}/index.html" -sed -i -e 's|>bazel-out/[^/]*/bin/\([^/]*\)/[^<]*/_virtual_includes/[^/]*|>\1|g' "${COVERAGE_DIR}/index.html" +COVERAGE_VALUE=$(genhtml --prefix ${PWD} --output "${COVERAGE_DIR}" "${COVERAGE_DATA}" | tee /dev/stderr | grep lines... | cut -d ' ' -f 4) +COVERAGE_VALUE=${COVERAGE_VALUE%?} [[ -z "${ENVOY_COVERAGE_DIR}" ]] || rsync -av "${COVERAGE_DIR}"/ "${ENVOY_COVERAGE_DIR}" if [ "$VALIDATE_COVERAGE" == "true" ] then - COVERAGE_VALUE=$(llvm-cov export "${COVERAGE_BINARY}" -instr-profile="${COVERAGE_DATA}" \ - -ignore-filename-regex="${COVERAGE_IGNORE_REGEX}" -summary-only | \ - python3 -c "import sys, json; print(json.load(sys.stdin)['data'][0]['totals']['lines']['percent'])") COVERAGE_THRESHOLD=98.2 COVERAGE_FAILED=$(echo "${COVERAGE_VALUE}<${COVERAGE_THRESHOLD}" | bc) if test ${COVERAGE_FAILED} -eq 1; then diff --git a/test/test_data/output_formatter.dotted.gold b/test/test_data/output_formatter.dotted.gold index 43152655a..f3beb91f5 100644 --- a/test/test_data/output_formatter.dotted.gold +++ b/test/test_data/output_formatter.dotted.gold @@ -10,7 +10,7 @@ worker_0..min: 0 worker_0..max: 0 worker_0.foo_size.samples: 4 worker_0.foo_size.mean: 15.5 -worker_0.foo_size.pstdev: 1.11803 +worker_0.foo_size.pstdev: 1.118033988749895 worker_0.foo_size.min: 14 worker_0.foo_size.max: 17 worker_0.foo_size.permilles-0.count: 1 @@ -48,7 +48,7 @@ worker_1..min: 0 worker_1..max: 0 worker_1.foo_size.samples: 4 worker_1.foo_size.mean: 15.5 -worker_1.foo_size.pstdev: 1.11803 +worker_1.foo_size.pstdev: 1.118033988749895 worker_1.foo_size.min: 14 worker_1.foo_size.max: 17 worker_1.foo_size.permilles-0.count: 1 @@ -86,7 +86,7 @@ global..min: 0 global..max: 0 global.foo_size.samples: 4 global.foo_size.mean: 15.5 -global.foo_size.pstdev: 1.11803 +global.foo_size.pstdev: 1.118033988749895 global.foo_size.min: 14 global.foo_size.max: 17 global.foo_size.permilles-0.count: 1 diff --git a/test/test_data/output_formatter.txt.gold b/test/test_data/output_formatter.txt.gold index b9b99f8f9..2929fd19b 100644 --- a/test/test_data/output_formatter.txt.gold +++ b/test/test_data/output_formatter.txt.gold @@ -4,7 +4,7 @@ stat_id (3 samples) min: 0s 001ms 000us | mean: 0s 002ms 000us | max: 0s 003ms 000us | pstdev: 0s 000ms 816us foo_size (4 samples) - min: 14 | mean: 15.5 | max: 17 | pstdev: 1.11803 + min: 14 | mean: 15.5 | max: 17 | pstdev: 1.118033988749895 Percentile Count Value 0.5 2 15 diff --git a/tools/shell_utils.sh b/tools/shell_utils.sh index 8aa65eed8..ab18006a6 100755 --- a/tools/shell_utils.sh +++ b/tools/shell_utils.sh @@ -1,11 +1,25 @@ source_venv() { VENV_DIR=$1 - if [[ "$VIRTUAL_ENV" == "" ]]; then + if [[ "${VIRTUAL_ENV}" == "" ]]; then if [[ ! -d "${VENV_DIR}"/venv ]]; then - virtualenv "${VENV_DIR}"/venv --no-site-packages --python=python2.7 + virtualenv "${VENV_DIR}"/venv --python=python3 fi source "${VENV_DIR}"/venv/bin/activate else echo "Found existing virtualenv" fi } + +python_venv() { + SCRIPT_DIR=$(realpath "$(dirname "$0")") + + BUILD_DIR=build_tools + PY_NAME="$1" + VENV_DIR="${BUILD_DIR}/${PY_NAME}" + + source_venv "${VENV_DIR}" + pip install -r "${SCRIPT_DIR}"/requirements.txt + + shift + python3 "${SCRIPT_DIR}/${PY_NAME}.py" $* +} \ No newline at end of file From 1b69aafd9a7b8b3ef6dce348dc18810aa1f45650 Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Tue, 26 May 2020 16:34:47 +0200 Subject: [PATCH 16/16] Update Envoy to 74290ef76a76fbbf50f072dc33438791f93f68c7 (#336) Signed-off-by: Otto van der Schaaf --- bazel/repositories.bzl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 63ea43818..3f9cfcfd7 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,7 +1,7 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -ENVOY_COMMIT = "31128e7dc22355876020188bc8feb99304663041" # May 17th, 2020 -ENVOY_SHA = "30e150207239a0b08beb6944120c1c29b2fa9e9b47c900dfa020d00a36e1eedb" +ENVOY_COMMIT = "74290ef76a76fbbf50f072dc33438791f93f68c7" # May 25th, 2020 +ENVOY_SHA = "123ad63c7081f2575aa08a5038d2302d7def3ea5d86cc4dd274757b597212446" HDR_HISTOGRAM_C_VERSION = "0.9.13" # Feb 22nd, 2020 HDR_HISTOGRAM_C_SHA = "2bd4a4631b64f2f8cf968ef49dd03ff3c51b487c3c98a01217ae4cf4a35b8310"